aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-25 07:51:46 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-25 07:51:46 -0500
commit0b271ef4521756010675b1611bef20fd3096790d (patch)
tree2c9d22a2c74122a9904e533df27f41d63ffef394 /net
parentb19b3c74c7bbec45a848631b8f970ac110665a01 (diff)
parent4a6908a3a050aacc9c3a2f36b276b46c0629ad91 (diff)
Merge commit 'v2.6.28' into core/core
Diffstat (limited to 'net')
-rw-r--r--net/atm/svc.c6
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bridge/br_netfilter.c13
-rw-r--r--net/can/af_can.c68
-rw-r--r--net/can/bcm.c7
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c31
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/tcp_output.c15
-rw-r--r--net/ipv4/tcp_vegas.c82
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/wext.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c7
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/netlabel/netlabel_unlabeled.c48
-rw-r--r--net/phonet/pep-gprs.c27
-rw-r--r--net/phonet/pn_dev.c2
-rw-r--r--net/phonet/pn_netlink.c3
-rw-r--r--net/rose/af_rose.c10
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/socket.c1
-rw-r--r--net/sunrpc/svcsock.c9
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/unix/garbage.c13
-rw-r--r--net/wireless/reg.c4
-rw-r--r--net/xfrm/xfrm_policy.c1
29 files changed, 215 insertions, 166 deletions
diff --git a/net/atm/svc.c b/net/atm/svc.c
index de1e4f2f3a43..8fb54dc870b3 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -293,7 +293,10 @@ static int svc_listen(struct socket *sock,int backlog)
293 error = -EINVAL; 293 error = -EINVAL;
294 goto out; 294 goto out;
295 } 295 }
296 vcc_insert_socket(sk); 296 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
297 error = -EADDRINUSE;
298 goto out;
299 }
297 set_bit(ATM_VF_WAITING, &vcc->flags); 300 set_bit(ATM_VF_WAITING, &vcc->flags);
298 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 301 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
299 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); 302 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local);
@@ -307,6 +310,7 @@ static int svc_listen(struct socket *sock,int backlog)
307 goto out; 310 goto out;
308 } 311 }
309 set_bit(ATM_VF_LISTEN,&vcc->flags); 312 set_bit(ATM_VF_LISTEN,&vcc->flags);
313 vcc_insert_socket(sk);
310 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; 314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
311 error = -sk->sk_err; 315 error = -sk->sk_err;
312out: 316out:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ba537fae0a4c..ce68e046d963 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1786,8 +1786,6 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1786 if (err < 0) 1786 if (err < 0)
1787 return; 1787 return;
1788 1788
1789 __module_get(nsock->ops->owner);
1790
1791 /* Set our callbacks */ 1789 /* Set our callbacks */
1792 nsock->sk->sk_data_ready = rfcomm_l2data_ready; 1790 nsock->sk->sk_data_ready = rfcomm_l2data_ready;
1793 nsock->sk->sk_state_change = rfcomm_l2state_change; 1791 nsock->sk->sk_state_change = rfcomm_l2state_change;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index fa5cda4e552a..45f61c348e36 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -101,6 +101,18 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
101 pppoe_proto(skb) == htons(PPP_IPV6) && \ 101 pppoe_proto(skb) == htons(PPP_IPV6) && \
102 brnf_filter_pppoe_tagged) 102 brnf_filter_pppoe_tagged)
103 103
104static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
105{
106}
107
108static struct dst_ops fake_dst_ops = {
109 .family = AF_INET,
110 .protocol = __constant_htons(ETH_P_IP),
111 .update_pmtu = fake_update_pmtu,
112 .entry_size = sizeof(struct rtable),
113 .entries = ATOMIC_INIT(0),
114};
115
104/* 116/*
105 * Initialize bogus route table used to keep netfilter happy. 117 * Initialize bogus route table used to keep netfilter happy.
106 * Currently, we fill in the PMTU entry because netfilter 118 * Currently, we fill in the PMTU entry because netfilter
@@ -117,6 +129,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
117 rt->u.dst.path = &rt->u.dst; 129 rt->u.dst.path = &rt->u.dst;
118 rt->u.dst.metrics[RTAX_MTU - 1] = 1500; 130 rt->u.dst.metrics[RTAX_MTU - 1] = 1500;
119 rt->u.dst.flags = DST_NOXFRM; 131 rt->u.dst.flags = DST_NOXFRM;
132 rt->u.dst.ops = &fake_dst_ops;
120} 133}
121 134
122static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) 135static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7d4d2b3c137e..3dadb338addd 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -319,23 +319,52 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
319 return n ? d : NULL; 319 return n ? d : NULL;
320} 320}
321 321
322/**
323 * find_rcv_list - determine optimal filterlist inside device filter struct
324 * @can_id: pointer to CAN identifier of a given can_filter
325 * @mask: pointer to CAN mask of a given can_filter
326 * @d: pointer to the device filter struct
327 *
328 * Description:
329 * Returns the optimal filterlist to reduce the filter handling in the
330 * receive path. This function is called by service functions that need
331 * to register or unregister a can_filter in the filter lists.
332 *
333 * A filter matches in general, when
334 *
335 * <received_can_id> & mask == can_id & mask
336 *
337 * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
338 * relevant bits for the filter.
339 *
340 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
341 * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
342 * there is a special filterlist and a special rx path filter handling.
343 *
344 * Return:
345 * Pointer to optimal filterlist for the given can_id/mask pair.
346 * Constistency checked mask.
347 * Reduced can_id to have a preprocessed filter compare value.
348 */
322static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 349static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
323 struct dev_rcv_lists *d) 350 struct dev_rcv_lists *d)
324{ 351{
325 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 352 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
326 353
327 /* filter error frames */ 354 /* filter for error frames in extra filterlist */
328 if (*mask & CAN_ERR_FLAG) { 355 if (*mask & CAN_ERR_FLAG) {
329 /* clear CAN_ERR_FLAG in list entry */ 356 /* clear CAN_ERR_FLAG in filter entry */
330 *mask &= CAN_ERR_MASK; 357 *mask &= CAN_ERR_MASK;
331 return &d->rx[RX_ERR]; 358 return &d->rx[RX_ERR];
332 } 359 }
333 360
334 /* ensure valid values in can_mask */ 361 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
335 if (*mask & CAN_EFF_FLAG) 362
336 *mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG); 363#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
337 else 364
338 *mask &= (CAN_SFF_MASK | CAN_RTR_FLAG); 365 /* ensure valid values in can_mask for 'SFF only' frame filtering */
366 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
367 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
339 368
340 /* reduce condition testing at receive time */ 369 /* reduce condition testing at receive time */
341 *can_id &= *mask; 370 *can_id &= *mask;
@@ -348,15 +377,19 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
348 if (!(*mask)) 377 if (!(*mask))
349 return &d->rx[RX_ALL]; 378 return &d->rx[RX_ALL];
350 379
351 /* use extra filterset for the subscription of exactly *ONE* can_id */ 380 /* extra filterlists for the subscription of a single non-RTR can_id */
352 if (*can_id & CAN_EFF_FLAG) { 381 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
353 if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) { 382 && !(*can_id & CAN_RTR_FLAG)) {
354 /* RFC: a use-case for hash-tables in the future? */ 383
355 return &d->rx[RX_EFF]; 384 if (*can_id & CAN_EFF_FLAG) {
385 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
386 /* RFC: a future use-case for hash-tables? */
387 return &d->rx[RX_EFF];
388 }
389 } else {
390 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
391 return &d->rx_sff[*can_id];
356 } 392 }
357 } else {
358 if (*mask == CAN_SFF_MASK)
359 return &d->rx_sff[*can_id];
360 } 393 }
361 394
362 /* default: filter via can_id/can_mask */ 395 /* default: filter via can_id/can_mask */
@@ -589,7 +622,10 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
589 } 622 }
590 } 623 }
591 624
592 /* check CAN_ID specific entries */ 625 /* check filterlists for single non-RTR can_ids */
626 if (can_id & CAN_RTR_FLAG)
627 return matches;
628
593 if (can_id & CAN_EFF_FLAG) { 629 if (can_id & CAN_EFF_FLAG) {
594 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { 630 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
595 if (r->can_id == can_id) { 631 if (r->can_id == can_id) {
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d0dd382001e2..da0d426c0ce4 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -64,10 +64,11 @@
64#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 64#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
65 65
66/* get best masking value for can_rx_register() for a given single can_id */ 66/* get best masking value for can_rx_register() for a given single can_id */
67#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \ 67#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
68 (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK)) 68 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
69 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
69 70
70#define CAN_BCM_VERSION "20080415" 71#define CAN_BCM_VERSION CAN_VERSION
71static __initdata const char banner[] = KERN_INFO 72static __initdata const char banner[] = KERN_INFO
72 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; 73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";
73 74
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6c7af390be0a..dadac6281f20 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -133,9 +133,11 @@ static int poll_one_napi(struct netpoll_info *npinfo,
133 133
134 npinfo->rx_flags |= NETPOLL_RX_DROP; 134 npinfo->rx_flags |= NETPOLL_RX_DROP;
135 atomic_inc(&trapped); 135 atomic_inc(&trapped);
136 set_bit(NAPI_STATE_NPSVC, &napi->state);
136 137
137 work = napi->poll(napi, budget); 138 work = napi->poll(napi, budget);
138 139
140 clear_bit(NAPI_STATE_NPSVC, &napi->state);
139 atomic_dec(&trapped); 141 atomic_dec(&trapped);
140 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 142 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
141 143
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d49ef8301b5b..65f7757465bd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -149,7 +149,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
149 149
150void skb_truesize_bug(struct sk_buff *skb) 150void skb_truesize_bug(struct sk_buff *skb)
151{ 151{
152 printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " 152 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
153 "len=%u, sizeof(sk_buff)=%Zd\n", 153 "len=%u, sizeof(sk_buff)=%Zd\n",
154 skb->truesize, skb->len, sizeof(struct sk_buff)); 154 skb->truesize, skb->len, sizeof(struct sk_buff));
155} 155}
diff --git a/net/core/sock.c b/net/core/sock.c
index 341e39456952..edf7220889a4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2035,9 +2035,6 @@ static inline void release_proto_idx(struct proto *prot)
2035 2035
2036int proto_register(struct proto *prot, int alloc_slab) 2036int proto_register(struct proto *prot, int alloc_slab)
2037{ 2037{
2038 char *request_sock_slab_name = NULL;
2039 char *timewait_sock_slab_name;
2040
2041 if (alloc_slab) { 2038 if (alloc_slab) {
2042 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2039 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2043 SLAB_HWCACHE_ALIGN, NULL); 2040 SLAB_HWCACHE_ALIGN, NULL);
@@ -2051,12 +2048,12 @@ int proto_register(struct proto *prot, int alloc_slab)
2051 if (prot->rsk_prot != NULL) { 2048 if (prot->rsk_prot != NULL) {
2052 static const char mask[] = "request_sock_%s"; 2049 static const char mask[] = "request_sock_%s";
2053 2050
2054 request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); 2051 prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2055 if (request_sock_slab_name == NULL) 2052 if (prot->rsk_prot->slab_name == NULL)
2056 goto out_free_sock_slab; 2053 goto out_free_sock_slab;
2057 2054
2058 sprintf(request_sock_slab_name, mask, prot->name); 2055 sprintf(prot->rsk_prot->slab_name, mask, prot->name);
2059 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name, 2056 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2060 prot->rsk_prot->obj_size, 0, 2057 prot->rsk_prot->obj_size, 0,
2061 SLAB_HWCACHE_ALIGN, NULL); 2058 SLAB_HWCACHE_ALIGN, NULL);
2062 2059
@@ -2070,14 +2067,14 @@ int proto_register(struct proto *prot, int alloc_slab)
2070 if (prot->twsk_prot != NULL) { 2067 if (prot->twsk_prot != NULL) {
2071 static const char mask[] = "tw_sock_%s"; 2068 static const char mask[] = "tw_sock_%s";
2072 2069
2073 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); 2070 prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2074 2071
2075 if (timewait_sock_slab_name == NULL) 2072 if (prot->twsk_prot->twsk_slab_name == NULL)
2076 goto out_free_request_sock_slab; 2073 goto out_free_request_sock_slab;
2077 2074
2078 sprintf(timewait_sock_slab_name, mask, prot->name); 2075 sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
2079 prot->twsk_prot->twsk_slab = 2076 prot->twsk_prot->twsk_slab =
2080 kmem_cache_create(timewait_sock_slab_name, 2077 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2081 prot->twsk_prot->twsk_obj_size, 2078 prot->twsk_prot->twsk_obj_size,
2082 0, SLAB_HWCACHE_ALIGN, 2079 0, SLAB_HWCACHE_ALIGN,
2083 NULL); 2080 NULL);
@@ -2093,14 +2090,14 @@ int proto_register(struct proto *prot, int alloc_slab)
2093 return 0; 2090 return 0;
2094 2091
2095out_free_timewait_sock_slab_name: 2092out_free_timewait_sock_slab_name:
2096 kfree(timewait_sock_slab_name); 2093 kfree(prot->twsk_prot->twsk_slab_name);
2097out_free_request_sock_slab: 2094out_free_request_sock_slab:
2098 if (prot->rsk_prot && prot->rsk_prot->slab) { 2095 if (prot->rsk_prot && prot->rsk_prot->slab) {
2099 kmem_cache_destroy(prot->rsk_prot->slab); 2096 kmem_cache_destroy(prot->rsk_prot->slab);
2100 prot->rsk_prot->slab = NULL; 2097 prot->rsk_prot->slab = NULL;
2101 } 2098 }
2102out_free_request_sock_slab_name: 2099out_free_request_sock_slab_name:
2103 kfree(request_sock_slab_name); 2100 kfree(prot->rsk_prot->slab_name);
2104out_free_sock_slab: 2101out_free_sock_slab:
2105 kmem_cache_destroy(prot->slab); 2102 kmem_cache_destroy(prot->slab);
2106 prot->slab = NULL; 2103 prot->slab = NULL;
@@ -2123,18 +2120,14 @@ void proto_unregister(struct proto *prot)
2123 } 2120 }
2124 2121
2125 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2122 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2126 const char *name = kmem_cache_name(prot->rsk_prot->slab);
2127
2128 kmem_cache_destroy(prot->rsk_prot->slab); 2123 kmem_cache_destroy(prot->rsk_prot->slab);
2129 kfree(name); 2124 kfree(prot->rsk_prot->slab_name);
2130 prot->rsk_prot->slab = NULL; 2125 prot->rsk_prot->slab = NULL;
2131 } 2126 }
2132 2127
2133 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2128 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2134 const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
2135
2136 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2129 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2137 kfree(name); 2130 kfree(prot->twsk_prot->twsk_slab_name);
2138 prot->twsk_prot->twsk_slab = NULL; 2131 prot->twsk_prot->twsk_slab = NULL;
2139 } 2132 }
2140} 2133}
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index bea54a685109..8d489e746b21 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -61,7 +61,7 @@ static struct
61static struct xt_table nat_table = { 61static struct xt_table nat_table = {
62 .name = "nat", 62 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 63 .valid_hooks = NAT_VALID_HOOKS,
64 .lock = __RW_LOCK_UNLOCKED(__nat_table.lock), 64 .lock = __RW_LOCK_UNLOCKED(nat_table.lock),
65 .me = THIS_MODULE, 65 .me = THIS_MODULE,
66 .af = AF_INET, 66 .af = AF_INET,
67}; 67};
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ba85d8831893..fe3b4bdfd251 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1028,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1028 1028
1029/* Compute the current effective MSS, taking SACKs and IP options, 1029/* Compute the current effective MSS, taking SACKs and IP options,
1030 * and even PMTU discovery events into account. 1030 * and even PMTU discovery events into account.
1031 *
1032 * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up
1033 * cannot be large. However, taking into account rare use of URG, this
1034 * is not a big flaw.
1035 */ 1031 */
1036unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 1032unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1037{ 1033{
@@ -1046,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1046 1042
1047 mss_now = tp->mss_cache; 1043 mss_now = tp->mss_cache;
1048 1044
1049 if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) 1045 if (large_allowed && sk_can_gso(sk))
1050 doing_tso = 1; 1046 doing_tso = 1;
1051 1047
1052 if (dst) { 1048 if (dst) {
@@ -1516,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk)
1516 * send_head. This happens as incoming acks open up the remote 1512 * send_head. This happens as incoming acks open up the remote
1517 * window for us. 1513 * window for us.
1518 * 1514 *
1515 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1516 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1517 * account rare use of URG, this is not a big flaw.
1518 *
1519 * Returns 1, if no segments are in flight and we have queued segments, but 1519 * Returns 1, if no segments are in flight and we have queued segments, but
1520 * cannot send anything now because of SWS or another problem. 1520 * cannot send anything now because of SWS or another problem.
1521 */ 1521 */
@@ -1567,7 +1567,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1567 } 1567 }
1568 1568
1569 limit = mss_now; 1569 limit = mss_now;
1570 if (tso_segs > 1) 1570 if (tso_segs > 1 && !tcp_urg_mode(tp))
1571 limit = tcp_mss_split_point(sk, skb, mss_now, 1571 limit = tcp_mss_split_point(sk, skb, mss_now,
1572 cwnd_quota); 1572 cwnd_quota);
1573 1573
@@ -1616,6 +1616,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1616 */ 1616 */
1617void tcp_push_one(struct sock *sk, unsigned int mss_now) 1617void tcp_push_one(struct sock *sk, unsigned int mss_now)
1618{ 1618{
1619 struct tcp_sock *tp = tcp_sk(sk);
1619 struct sk_buff *skb = tcp_send_head(sk); 1620 struct sk_buff *skb = tcp_send_head(sk);
1620 unsigned int tso_segs, cwnd_quota; 1621 unsigned int tso_segs, cwnd_quota;
1621 1622
@@ -1630,7 +1631,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1630 BUG_ON(!tso_segs); 1631 BUG_ON(!tso_segs);
1631 1632
1632 limit = mss_now; 1633 limit = mss_now;
1633 if (tso_segs > 1) 1634 if (tso_segs > 1 && !tcp_urg_mode(tp))
1634 limit = tcp_mss_split_point(sk, skb, mss_now, 1635 limit = tcp_mss_split_point(sk, skb, mss_now,
1635 cwnd_quota); 1636 cwnd_quota);
1636 1637
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 14504dada116..a453aac91bd3 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -40,18 +40,14 @@
40 40
41#include "tcp_vegas.h" 41#include "tcp_vegas.h"
42 42
43/* Default values of the Vegas variables, in fixed-point representation 43static int alpha = 2;
44 * with V_PARAM_SHIFT bits to the right of the binary point. 44static int beta = 4;
45 */ 45static int gamma = 1;
46#define V_PARAM_SHIFT 1
47static int alpha = 2<<V_PARAM_SHIFT;
48static int beta = 4<<V_PARAM_SHIFT;
49static int gamma = 1<<V_PARAM_SHIFT;
50 46
51module_param(alpha, int, 0644); 47module_param(alpha, int, 0644);
52MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)"); 48MODULE_PARM_DESC(alpha, "lower bound of packets in network");
53module_param(beta, int, 0644); 49module_param(beta, int, 0644);
54MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)"); 50MODULE_PARM_DESC(beta, "upper bound of packets in network");
55module_param(gamma, int, 0644); 51module_param(gamma, int, 0644);
56MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); 52MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
57 53
@@ -172,49 +168,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
172 return; 168 return;
173 } 169 }
174 170
175 /* The key players are v_beg_snd_una and v_beg_snd_nxt.
176 *
177 * These are so named because they represent the approximate values
178 * of snd_una and snd_nxt at the beginning of the current RTT. More
179 * precisely, they represent the amount of data sent during the RTT.
180 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
181 * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
182 * bytes of data have been ACKed during the course of the RTT, giving
183 * an "actual" rate of:
184 *
185 * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
186 *
187 * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
188 * because delayed ACKs can cover more than one segment, so they
189 * don't line up nicely with the boundaries of RTTs.
190 *
191 * Another unfortunate fact of life is that delayed ACKs delay the
192 * advance of the left edge of our send window, so that the number
193 * of bytes we send in an RTT is often less than our cwnd will allow.
194 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
195 */
196
197 if (after(ack, vegas->beg_snd_nxt)) { 171 if (after(ack, vegas->beg_snd_nxt)) {
198 /* Do the Vegas once-per-RTT cwnd adjustment. */ 172 /* Do the Vegas once-per-RTT cwnd adjustment. */
199 u32 old_wnd, old_snd_cwnd;
200
201
202 /* Here old_wnd is essentially the window of data that was
203 * sent during the previous RTT, and has all
204 * been acknowledged in the course of the RTT that ended
205 * with the ACK we just received. Likewise, old_snd_cwnd
206 * is the cwnd during the previous RTT.
207 */
208 old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
209 tp->mss_cache;
210 old_snd_cwnd = vegas->beg_snd_cwnd;
211 173
212 /* Save the extent of the current window so we can use this 174 /* Save the extent of the current window so we can use this
213 * at the end of the next RTT. 175 * at the end of the next RTT.
214 */ 176 */
215 vegas->beg_snd_una = vegas->beg_snd_nxt;
216 vegas->beg_snd_nxt = tp->snd_nxt; 177 vegas->beg_snd_nxt = tp->snd_nxt;
217 vegas->beg_snd_cwnd = tp->snd_cwnd;
218 178
219 /* We do the Vegas calculations only if we got enough RTT 179 /* We do the Vegas calculations only if we got enough RTT
220 * samples that we can be reasonably sure that we got 180 * samples that we can be reasonably sure that we got
@@ -252,22 +212,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
252 * 212 *
253 * This is: 213 * This is:
254 * (actual rate in segments) * baseRTT 214 * (actual rate in segments) * baseRTT
255 * We keep it as a fixed point number with
256 * V_PARAM_SHIFT bits to the right of the binary point.
257 */ 215 */
258 target_cwnd = ((u64)old_wnd * vegas->baseRTT); 216 target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
259 target_cwnd <<= V_PARAM_SHIFT;
260 do_div(target_cwnd, rtt);
261 217
262 /* Calculate the difference between the window we had, 218 /* Calculate the difference between the window we had,
263 * and the window we would like to have. This quantity 219 * and the window we would like to have. This quantity
264 * is the "Diff" from the Arizona Vegas papers. 220 * is the "Diff" from the Arizona Vegas papers.
265 *
266 * Again, this is a fixed point number with
267 * V_PARAM_SHIFT bits to the right of the binary
268 * point.
269 */ 221 */
270 diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; 222 diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
271 223
272 if (diff > gamma && tp->snd_ssthresh > 2 ) { 224 if (diff > gamma && tp->snd_ssthresh > 2 ) {
273 /* Going too fast. Time to slow down 225 /* Going too fast. Time to slow down
@@ -282,16 +234,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
282 * truncation robs us of full link 234 * truncation robs us of full link
283 * utilization. 235 * utilization.
284 */ 236 */
285 tp->snd_cwnd = min(tp->snd_cwnd, 237 tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
286 ((u32)target_cwnd >>
287 V_PARAM_SHIFT)+1);
288 238
289 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 239 } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
290 /* Slow start. */ 240 /* Slow start. */
291 tcp_slow_start(tp); 241 tcp_slow_start(tp);
292 } else { 242 } else {
293 /* Congestion avoidance. */ 243 /* Congestion avoidance. */
294 u32 next_snd_cwnd;
295 244
296 /* Figure out where we would like cwnd 245 /* Figure out where we would like cwnd
297 * to be. 246 * to be.
@@ -300,32 +249,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
300 /* The old window was too fast, so 249 /* The old window was too fast, so
301 * we slow down. 250 * we slow down.
302 */ 251 */
303 next_snd_cwnd = old_snd_cwnd - 1; 252 tp->snd_cwnd--;
304 } else if (diff < alpha) { 253 } else if (diff < alpha) {
305 /* We don't have enough extra packets 254 /* We don't have enough extra packets
306 * in the network, so speed up. 255 * in the network, so speed up.
307 */ 256 */
308 next_snd_cwnd = old_snd_cwnd + 1; 257 tp->snd_cwnd++;
309 } else { 258 } else {
310 /* Sending just as fast as we 259 /* Sending just as fast as we
311 * should be. 260 * should be.
312 */ 261 */
313 next_snd_cwnd = old_snd_cwnd;
314 } 262 }
315
316 /* Adjust cwnd upward or downward, toward the
317 * desired value.
318 */
319 if (next_snd_cwnd > tp->snd_cwnd)
320 tp->snd_cwnd++;
321 else if (next_snd_cwnd < tp->snd_cwnd)
322 tp->snd_cwnd--;
323 } 263 }
324 264
325 if (tp->snd_cwnd < 2) 265 if (tp->snd_cwnd < 2)
326 tp->snd_cwnd = 2; 266 tp->snd_cwnd = 2;
327 else if (tp->snd_cwnd > tp->snd_cwnd_clamp) 267 else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
328 tp->snd_cwnd = tp->snd_cwnd_clamp; 268 tp->snd_cwnd = tp->snd_cwnd_clamp;
269
270 tp->snd_ssthresh = tcp_current_ssthresh(sk);
329 } 271 }
330 272
331 /* Wipe the slate clean for the next RTT. */ 273 /* Wipe the slate clean for the next RTT. */
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 172438320eec..d0f54d18e19b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -912,8 +912,13 @@ static void ndisc_recv_na(struct sk_buff *skb)
912 is invalid, but ndisc specs say nothing 912 is invalid, but ndisc specs say nothing
913 about it. It could be misconfiguration, or 913 about it. It could be misconfiguration, or
914 an smart proxy agent tries to help us :-) 914 an smart proxy agent tries to help us :-)
915
916 We should not print the error if NA has been
917 received from loopback - it is just our own
918 unsolicited advertisement.
915 */ 919 */
916 ND_PRINTK1(KERN_WARNING 920 if (skb->pkt_type != PACKET_LOOPBACK)
921 ND_PRINTK1(KERN_WARNING
917 "ICMPv6 NA: someone advertises our address on %s!\n", 922 "ICMPv6 NA: someone advertises our address on %s!\n",
918 ifp->idev->dev->name); 923 ifp->idev->dev->name);
919 in6_ifa_put(ifp); 924 in6_ifa_put(ifp);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7fef8ea1f5ec..d254446b85b5 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -99,7 +99,7 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
99 99
100 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 100 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
101 while (sta) { 101 while (sta) {
102 if (compare_ether_addr(sta->sta.addr, addr) == 0) 102 if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
103 break; 103 break;
104 sta = rcu_dereference(sta->hnext); 104 sta = rcu_dereference(sta->hnext);
105 } 105 }
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 742f811ca416..ab4ddba874be 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -271,6 +271,7 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev,
271 __u32 *mode, char *extra) 271 __u32 *mode, char *extra)
272{ 272{
273 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 273 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
274 struct ieee80211_local *local = sdata->local;
274 int type; 275 int type;
275 276
276 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 277 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -281,6 +282,13 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev,
281 type = NL80211_IFTYPE_STATION; 282 type = NL80211_IFTYPE_STATION;
282 break; 283 break;
283 case IW_MODE_ADHOC: 284 case IW_MODE_ADHOC:
285 /* Setting ad-hoc mode on non ibss channel is not
286 * supported.
287 */
288 if (local->oper_channel &&
289 (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS))
290 return -EOPNOTSUPP;
291
284 type = NL80211_IFTYPE_ADHOC; 292 type = NL80211_IFTYPE_ADHOC;
285 break; 293 break;
286 case IW_MODE_REPEAT: 294 case IW_MODE_REPEAT:
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 622d7c671cb7..233fdd2d7d21 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -305,9 +305,7 @@ void nf_conntrack_hash_insert(struct nf_conn *ct)
305 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 305 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
306 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 306 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
307 307
308 spin_lock_bh(&nf_conntrack_lock);
309 __nf_conntrack_hash_insert(ct, hash, repl_hash); 308 __nf_conntrack_hash_insert(ct, hash, repl_hash);
310 spin_unlock_bh(&nf_conntrack_lock);
311} 309}
312EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 310EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
313 311
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index a040d46f85d6..5f4a6516b3b6 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1090,7 +1090,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1090 struct nf_conn_help *help; 1090 struct nf_conn_help *help;
1091 struct nf_conntrack_helper *helper; 1091 struct nf_conntrack_helper *helper;
1092 1092
1093 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_KERNEL); 1093 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
1094 if (ct == NULL || IS_ERR(ct)) 1094 if (ct == NULL || IS_ERR(ct))
1095 return -ENOMEM; 1095 return -ENOMEM;
1096 1096
@@ -1138,7 +1138,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1138 } 1138 }
1139 } 1139 }
1140 1140
1141 nf_ct_acct_ext_add(ct, GFP_KERNEL); 1141 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1142 1142
1143#if defined(CONFIG_NF_CONNTRACK_MARK) 1143#if defined(CONFIG_NF_CONNTRACK_MARK)
1144 if (cda[CTA_MARK]) 1144 if (cda[CTA_MARK])
@@ -1212,13 +1212,14 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1212 atomic_inc(&master_ct->ct_general.use); 1212 atomic_inc(&master_ct->ct_general.use);
1213 } 1213 }
1214 1214
1215 spin_unlock_bh(&nf_conntrack_lock);
1216 err = -ENOENT; 1215 err = -ENOENT;
1217 if (nlh->nlmsg_flags & NLM_F_CREATE) 1216 if (nlh->nlmsg_flags & NLM_F_CREATE)
1218 err = ctnetlink_create_conntrack(cda, 1217 err = ctnetlink_create_conntrack(cda,
1219 &otuple, 1218 &otuple,
1220 &rtuple, 1219 &rtuple,
1221 master_ct); 1220 master_ct);
1221 spin_unlock_bh(&nf_conntrack_lock);
1222
1222 if (err < 0 && master_ct) 1223 if (err < 0 && master_ct)
1223 nf_ct_put(master_ct); 1224 nf_ct_put(master_ct);
1224 1225
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 02a8fed21082..1acc089be7e9 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -141,7 +141,7 @@ socket_mt(const struct sk_buff *skb, const struct xt_match_param *par)
141 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, 141 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
142 saddr, daddr, sport, dport, par->in, false); 142 saddr, daddr, sport, dport, par->in, false);
143 if (sk != NULL) { 143 if (sk != NULL) {
144 bool wildcard = (inet_sk(sk)->rcv_saddr == 0); 144 bool wildcard = (sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->rcv_saddr == 0);
145 145
146 nf_tproxy_put_sock(sk); 146 nf_tproxy_put_sock(sk);
147 if (wildcard) 147 if (wildcard)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e8a5c32b0f10..8c0308032178 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -562,7 +562,6 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
562 const struct in_addr *mask, 562 const struct in_addr *mask,
563 struct netlbl_audit *audit_info) 563 struct netlbl_audit *audit_info)
564{ 564{
565 int ret_val = 0;
566 struct netlbl_af4list *list_entry; 565 struct netlbl_af4list *list_entry;
567 struct netlbl_unlhsh_addr4 *entry; 566 struct netlbl_unlhsh_addr4 *entry;
568 struct audit_buffer *audit_buf; 567 struct audit_buffer *audit_buf;
@@ -574,9 +573,10 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
574 list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, 573 list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
575 &iface->addr4_list); 574 &iface->addr4_list);
576 spin_unlock(&netlbl_unlhsh_lock); 575 spin_unlock(&netlbl_unlhsh_lock);
577 if (list_entry == NULL) 576 if (list_entry != NULL)
578 ret_val = -ENOENT; 577 entry = netlbl_unlhsh_addr4_entry(list_entry);
579 entry = netlbl_unlhsh_addr4_entry(list_entry); 578 else
579 entry = NULL;
580 580
581 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 581 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
582 audit_info); 582 audit_info);
@@ -587,19 +587,21 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
587 addr->s_addr, mask->s_addr); 587 addr->s_addr, mask->s_addr);
588 if (dev != NULL) 588 if (dev != NULL)
589 dev_put(dev); 589 dev_put(dev);
590 if (entry && security_secid_to_secctx(entry->secid, 590 if (entry != NULL &&
591 &secctx, 591 security_secid_to_secctx(entry->secid,
592 &secctx_len) == 0) { 592 &secctx, &secctx_len) == 0) {
593 audit_log_format(audit_buf, " sec_obj=%s", secctx); 593 audit_log_format(audit_buf, " sec_obj=%s", secctx);
594 security_release_secctx(secctx, secctx_len); 594 security_release_secctx(secctx, secctx_len);
595 } 595 }
596 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); 596 audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
597 audit_log_end(audit_buf); 597 audit_log_end(audit_buf);
598 } 598 }
599 599
600 if (ret_val == 0) 600 if (entry == NULL)
601 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4); 601 return -ENOENT;
602 return ret_val; 602
603 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
604 return 0;
603} 605}
604 606
605#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 607#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -623,7 +625,6 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
623 const struct in6_addr *mask, 625 const struct in6_addr *mask,
624 struct netlbl_audit *audit_info) 626 struct netlbl_audit *audit_info)
625{ 627{
626 int ret_val = 0;
627 struct netlbl_af6list *list_entry; 628 struct netlbl_af6list *list_entry;
628 struct netlbl_unlhsh_addr6 *entry; 629 struct netlbl_unlhsh_addr6 *entry;
629 struct audit_buffer *audit_buf; 630 struct audit_buffer *audit_buf;
@@ -634,9 +635,10 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
634 spin_lock(&netlbl_unlhsh_lock); 635 spin_lock(&netlbl_unlhsh_lock);
635 list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); 636 list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list);
636 spin_unlock(&netlbl_unlhsh_lock); 637 spin_unlock(&netlbl_unlhsh_lock);
637 if (list_entry == NULL) 638 if (list_entry != NULL)
638 ret_val = -ENOENT; 639 entry = netlbl_unlhsh_addr6_entry(list_entry);
639 entry = netlbl_unlhsh_addr6_entry(list_entry); 640 else
641 entry = NULL;
640 642
641 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 643 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
642 audit_info); 644 audit_info);
@@ -647,19 +649,21 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
647 addr, mask); 649 addr, mask);
648 if (dev != NULL) 650 if (dev != NULL)
649 dev_put(dev); 651 dev_put(dev);
650 if (entry && security_secid_to_secctx(entry->secid, 652 if (entry != NULL &&
651 &secctx, 653 security_secid_to_secctx(entry->secid,
652 &secctx_len) == 0) { 654 &secctx, &secctx_len) == 0) {
653 audit_log_format(audit_buf, " sec_obj=%s", secctx); 655 audit_log_format(audit_buf, " sec_obj=%s", secctx);
654 security_release_secctx(secctx, secctx_len); 656 security_release_secctx(secctx, secctx_len);
655 } 657 }
656 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); 658 audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
657 audit_log_end(audit_buf); 659 audit_log_end(audit_buf);
658 } 660 }
659 661
660 if (ret_val == 0) 662 if (entry == NULL)
661 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6); 663 return -ENOENT;
662 return ret_val; 664
665 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
666 return 0;
663} 667}
664#endif /* IPv6 */ 668#endif /* IPv6 */
665 669
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 9978afbd9f2a..803eeef0aa85 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -155,12 +155,13 @@ static void gprs_data_ready(struct sock *sk, int len)
155static void gprs_write_space(struct sock *sk) 155static void gprs_write_space(struct sock *sk)
156{ 156{
157 struct gprs_dev *dev = sk->sk_user_data; 157 struct gprs_dev *dev = sk->sk_user_data;
158 struct net_device *net = dev->net;
158 unsigned credits = pep_writeable(sk); 159 unsigned credits = pep_writeable(sk);
159 160
160 spin_lock_bh(&dev->tx_lock); 161 spin_lock_bh(&dev->tx_lock);
161 dev->tx_max = credits; 162 dev->tx_max = credits;
162 if (credits > skb_queue_len(&dev->tx_queue)) 163 if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net))
163 netif_wake_queue(dev->net); 164 netif_wake_queue(net);
164 spin_unlock_bh(&dev->tx_lock); 165 spin_unlock_bh(&dev->tx_lock);
165} 166}
166 167
@@ -168,6 +169,23 @@ static void gprs_write_space(struct sock *sk)
168 * Network device callbacks 169 * Network device callbacks
169 */ 170 */
170 171
172static int gprs_open(struct net_device *dev)
173{
174 struct gprs_dev *gp = netdev_priv(dev);
175
176 gprs_write_space(gp->sk);
177 return 0;
178}
179
180static int gprs_close(struct net_device *dev)
181{
182 struct gprs_dev *gp = netdev_priv(dev);
183
184 netif_stop_queue(dev);
185 flush_work(&gp->tx_work);
186 return 0;
187}
188
171static int gprs_xmit(struct sk_buff *skb, struct net_device *net) 189static int gprs_xmit(struct sk_buff *skb, struct net_device *net)
172{ 190{
173 struct gprs_dev *dev = netdev_priv(net); 191 struct gprs_dev *dev = netdev_priv(net);
@@ -254,6 +272,8 @@ static void gprs_setup(struct net_device *net)
254 net->tx_queue_len = 10; 272 net->tx_queue_len = 10;
255 273
256 net->destructor = free_netdev; 274 net->destructor = free_netdev;
275 net->open = gprs_open;
276 net->stop = gprs_close;
257 net->hard_start_xmit = gprs_xmit; /* mandatory */ 277 net->hard_start_xmit = gprs_xmit; /* mandatory */
258 net->change_mtu = gprs_set_mtu; 278 net->change_mtu = gprs_set_mtu;
259 net->get_stats = gprs_get_stats; 279 net->get_stats = gprs_get_stats;
@@ -318,7 +338,6 @@ int gprs_attach(struct sock *sk)
318 dev->sk = sk; 338 dev->sk = sk;
319 339
320 printk(KERN_DEBUG"%s: attached\n", net->name); 340 printk(KERN_DEBUG"%s: attached\n", net->name);
321 gprs_write_space(sk); /* kick off TX */
322 return net->ifindex; 341 return net->ifindex;
323 342
324out_rel: 343out_rel:
@@ -341,7 +360,5 @@ void gprs_detach(struct sock *sk)
341 360
342 printk(KERN_DEBUG"%s: detached\n", net->name); 361 printk(KERN_DEBUG"%s: detached\n", net->name);
343 unregister_netdev(net); 362 unregister_netdev(net);
344 flush_scheduled_work();
345 sock_put(sk); 363 sock_put(sk);
346 skb_queue_purge(&dev->tx_queue);
347} 364}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 53be9fc82aaa..f93ff8ef47d0 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -115,7 +115,7 @@ int phonet_address_del(struct net_device *dev, u8 addr)
115 pnd = __phonet_get(dev); 115 pnd = __phonet_get(dev);
116 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) 116 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
117 err = -EADDRNOTAVAIL; 117 err = -EADDRNOTAVAIL;
118 if (bitmap_empty(pnd->addrs, 64)) 118 else if (bitmap_empty(pnd->addrs, 64))
119 __phonet_device_free(pnd); 119 __phonet_device_free(pnd);
120 spin_unlock_bh(&pndevs.lock); 120 spin_unlock_bh(&pndevs.lock);
121 return err; 121 return err;
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index b1770d66bc8d..242fe8f8c322 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -123,6 +123,7 @@ nla_put_failure:
123 123
124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
125{ 125{
126 struct net *net = sock_net(skb->sk);
126 struct phonet_device *pnd; 127 struct phonet_device *pnd;
127 int dev_idx = 0, dev_start_idx = cb->args[0]; 128 int dev_idx = 0, dev_start_idx = cb->args[0];
128 int addr_idx = 0, addr_start_idx = cb->args[1]; 129 int addr_idx = 0, addr_start_idx = cb->args[1];
@@ -131,6 +132,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
131 list_for_each_entry(pnd, &pndevs.list, list) { 132 list_for_each_entry(pnd, &pndevs.list, list) {
132 u8 addr; 133 u8 addr;
133 134
135 if (!net_eq(dev_net(pnd->netdev), net))
136 continue;
134 if (dev_idx > dev_start_idx) 137 if (dev_idx > dev_start_idx)
135 addr_start_idx = 0; 138 addr_start_idx = 0;
136 if (dev_idx++ < dev_start_idx) 139 if (dev_idx++ < dev_start_idx)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index a7f1ce11bc22..0c1cc7612800 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1072,6 +1072,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1072 unsigned char *asmptr; 1072 unsigned char *asmptr;
1073 int n, size, qbit = 0; 1073 int n, size, qbit = 0;
1074 1074
1075 /* ROSE empty frame has no meaning : don't send */
1076 if (len == 0)
1077 return 0;
1078
1075 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1079 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1076 return -EINVAL; 1080 return -EINVAL;
1077 1081
@@ -1265,6 +1269,12 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1265 skb_reset_transport_header(skb); 1269 skb_reset_transport_header(skb);
1266 copied = skb->len; 1270 copied = skb->len;
1267 1271
1272 /* ROSE empty frame has no meaning : ignore it */
1273 if (copied == 0) {
1274 skb_free_datagram(sk, skb);
1275 return copied;
1276 }
1277
1268 if (copied > size) { 1278 if (copied > size) {
1269 copied = size; 1279 copied = size;
1270 msg->msg_flags |= MSG_TRUNC; 1280 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a11959908d9a..98402f0efa47 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -46,9 +46,6 @@
46 layering other disciplines. It does not need to do bandwidth 46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token 47 control either since that can be handled by using token
48 bucket or other rate control. 48 bucket or other rate control.
49
50 The simulator is limited by the Linux timer resolution
51 and will create packet bursts on the HZ boundary (1ms).
52*/ 49*/
53 50
54struct netem_sched_data { 51struct netem_sched_data {
diff --git a/net/socket.c b/net/socket.c
index 92764d836891..76ba80aeac1a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2307,6 +2307,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
2307 } 2307 }
2308 2308
2309 (*newsock)->ops = sock->ops; 2309 (*newsock)->ops = sock->ops;
2310 __module_get((*newsock)->ops->owner);
2310 2311
2311done: 2312done:
2312 return err; 2313 return err;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 95293f549e9c..a1951dcc5776 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1183,7 +1183,11 @@ int svc_addsock(struct svc_serv *serv,
1183 else if (so->state > SS_UNCONNECTED) 1183 else if (so->state > SS_UNCONNECTED)
1184 err = -EISCONN; 1184 err = -EISCONN;
1185 else { 1185 else {
1186 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); 1186 if (!try_module_get(THIS_MODULE))
1187 err = -ENOENT;
1188 else
1189 svsk = svc_setup_socket(serv, so, &err,
1190 SVC_SOCK_DEFAULTS);
1187 if (svsk) { 1191 if (svsk) {
1188 struct sockaddr_storage addr; 1192 struct sockaddr_storage addr;
1189 struct sockaddr *sin = (struct sockaddr *)&addr; 1193 struct sockaddr *sin = (struct sockaddr *)&addr;
@@ -1196,7 +1200,8 @@ int svc_addsock(struct svc_serv *serv,
1196 spin_unlock_bh(&serv->sv_lock); 1200 spin_unlock_bh(&serv->sv_lock);
1197 svc_xprt_received(&svsk->sk_xprt); 1201 svc_xprt_received(&svsk->sk_xprt);
1198 err = 0; 1202 err = 0;
1199 } 1203 } else
1204 module_put(THIS_MODULE);
1200 } 1205 }
1201 if (err) { 1206 if (err) {
1202 sockfd_put(so); 1207 sockfd_put(so);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index eb90f77bb0e2..66d5ac4773ab 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1343,6 +1343,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1343 1343
1344 if (NULL == siocb->scm) 1344 if (NULL == siocb->scm)
1345 siocb->scm = &tmp_scm; 1345 siocb->scm = &tmp_scm;
1346 wait_for_unix_gc();
1346 err = scm_send(sock, msg, siocb->scm); 1347 err = scm_send(sock, msg, siocb->scm);
1347 if (err < 0) 1348 if (err < 0)
1348 return err; 1349 return err;
@@ -1493,6 +1494,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1493 1494
1494 if (NULL == siocb->scm) 1495 if (NULL == siocb->scm)
1495 siocb->scm = &tmp_scm; 1496 siocb->scm = &tmp_scm;
1497 wait_for_unix_gc();
1496 err = scm_send(sock, msg, siocb->scm); 1498 err = scm_send(sock, msg, siocb->scm);
1497 if (err < 0) 1499 if (err < 0)
1498 return err; 1500 return err;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6d4a9a8de5ef..abb3ab34cb1e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -80,6 +80,7 @@
80#include <linux/file.h> 80#include <linux/file.h>
81#include <linux/proc_fs.h> 81#include <linux/proc_fs.h>
82#include <linux/mutex.h> 82#include <linux/mutex.h>
83#include <linux/wait.h>
83 84
84#include <net/sock.h> 85#include <net/sock.h>
85#include <net/af_unix.h> 86#include <net/af_unix.h>
@@ -91,6 +92,7 @@
91static LIST_HEAD(gc_inflight_list); 92static LIST_HEAD(gc_inflight_list);
92static LIST_HEAD(gc_candidates); 93static LIST_HEAD(gc_candidates);
93static DEFINE_SPINLOCK(unix_gc_lock); 94static DEFINE_SPINLOCK(unix_gc_lock);
95static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
94 96
95unsigned int unix_tot_inflight; 97unsigned int unix_tot_inflight;
96 98
@@ -266,12 +268,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
266 list_move_tail(&u->link, &gc_candidates); 268 list_move_tail(&u->link, &gc_candidates);
267} 269}
268 270
269/* The external entry point: unix_gc() */ 271static bool gc_in_progress = false;
270 272
271void unix_gc(void) 273void wait_for_unix_gc(void)
272{ 274{
273 static bool gc_in_progress = false; 275 wait_event(unix_gc_wait, gc_in_progress == false);
276}
274 277
278/* The external entry point: unix_gc() */
279void unix_gc(void)
280{
275 struct unix_sock *u; 281 struct unix_sock *u;
276 struct unix_sock *next; 282 struct unix_sock *next;
277 struct sk_buff_head hitlist; 283 struct sk_buff_head hitlist;
@@ -376,6 +382,7 @@ void unix_gc(void)
376 /* All candidates should have been detached by now. */ 382 /* All candidates should have been detached by now. */
377 BUG_ON(!list_empty(&gc_candidates)); 383 BUG_ON(!list_empty(&gc_candidates));
378 gc_in_progress = false; 384 gc_in_progress = false;
385 wake_up(&unix_gc_wait);
379 386
380 out: 387 out:
381 spin_unlock(&unix_gc_lock); 388 spin_unlock(&unix_gc_lock);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 626dbb688499..eb3b1a9f9b12 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -343,9 +343,9 @@ static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by,
343 return 0; 343 return 0;
344 return -EALREADY; 344 return -EALREADY;
345 } 345 }
346 if (WARN_ON(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2)), 346 if (WARN(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2),
347 "Invalid Country IE regulatory hint passed " 347 "Invalid Country IE regulatory hint passed "
348 "to the wireless core\n") 348 "to the wireless core\n"))
349 return -EINVAL; 349 return -EINVAL;
350 /* We ignore Country IE hints for now, as we haven't yet 350 /* We ignore Country IE hints for now, as we haven't yet
351 * added the dot11MultiDomainCapabilityEnabled flag 351 * added the dot11MultiDomainCapabilityEnabled flag
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 058f04f54b90..fb216c9adf86 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -817,6 +817,7 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
817 continue; 817 continue;
818 hlist_del(&pol->bydst); 818 hlist_del(&pol->bydst);
819 hlist_del(&pol->byidx); 819 hlist_del(&pol->byidx);
820 list_del(&pol->walk.all);
820 write_unlock_bh(&xfrm_policy_lock); 821 write_unlock_bh(&xfrm_policy_lock);
821 822
822 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 823 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,