diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/atm/svc.c | 6 | ||||
-rw-r--r-- | net/can/af_can.c | 68 | ||||
-rw-r--r-- | net/can/bcm.c | 7 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/nf_nat_rule.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 22 | ||||
-rw-r--r-- | net/ipv4/tcp_vegas.c | 82 | ||||
-rw-r--r-- | net/ipv6/ndisc.c | 7 | ||||
-rw-r--r-- | net/mac80211/sta_info.c | 2 | ||||
-rw-r--r-- | net/netfilter/xt_socket.c | 2 | ||||
-rw-r--r-- | net/netlabel/netlabel_unlabeled.c | 48 | ||||
-rw-r--r-- | net/phonet/pep-gprs.c | 27 | ||||
-rw-r--r-- | net/phonet/pn_netlink.c | 3 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 3 | ||||
-rw-r--r-- | net/xfrm/xfrm_policy.c | 1 |
15 files changed, 146 insertions, 136 deletions
diff --git a/net/atm/svc.c b/net/atm/svc.c index de1e4f2f3a43..8fb54dc870b3 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c | |||
@@ -293,7 +293,10 @@ static int svc_listen(struct socket *sock,int backlog) | |||
293 | error = -EINVAL; | 293 | error = -EINVAL; |
294 | goto out; | 294 | goto out; |
295 | } | 295 | } |
296 | vcc_insert_socket(sk); | 296 | if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { |
297 | error = -EADDRINUSE; | ||
298 | goto out; | ||
299 | } | ||
297 | set_bit(ATM_VF_WAITING, &vcc->flags); | 300 | set_bit(ATM_VF_WAITING, &vcc->flags); |
298 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 301 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); |
299 | sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); | 302 | sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); |
@@ -307,6 +310,7 @@ static int svc_listen(struct socket *sock,int backlog) | |||
307 | goto out; | 310 | goto out; |
308 | } | 311 | } |
309 | set_bit(ATM_VF_LISTEN,&vcc->flags); | 312 | set_bit(ATM_VF_LISTEN,&vcc->flags); |
313 | vcc_insert_socket(sk); | ||
310 | sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; | 314 | sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; |
311 | error = -sk->sk_err; | 315 | error = -sk->sk_err; |
312 | out: | 316 | out: |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 7d4d2b3c137e..3dadb338addd 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -319,23 +319,52 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) | |||
319 | return n ? d : NULL; | 319 | return n ? d : NULL; |
320 | } | 320 | } |
321 | 321 | ||
322 | /** | ||
323 | * find_rcv_list - determine optimal filterlist inside device filter struct | ||
324 | * @can_id: pointer to CAN identifier of a given can_filter | ||
325 | * @mask: pointer to CAN mask of a given can_filter | ||
326 | * @d: pointer to the device filter struct | ||
327 | * | ||
328 | * Description: | ||
329 | * Returns the optimal filterlist to reduce the filter handling in the | ||
330 | * receive path. This function is called by service functions that need | ||
331 | * to register or unregister a can_filter in the filter lists. | ||
332 | * | ||
333 | * A filter matches in general, when | ||
334 | * | ||
335 | * <received_can_id> & mask == can_id & mask | ||
336 | * | ||
337 | * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe | ||
338 | * relevant bits for the filter. | ||
339 | * | ||
340 | * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can | ||
341 | * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames | ||
342 | * there is a special filterlist and a special rx path filter handling. | ||
343 | * | ||
344 | * Return: | ||
345 | * Pointer to optimal filterlist for the given can_id/mask pair. | ||
346 | * Constistency checked mask. | ||
347 | * Reduced can_id to have a preprocessed filter compare value. | ||
348 | */ | ||
322 | static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, | 349 | static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, |
323 | struct dev_rcv_lists *d) | 350 | struct dev_rcv_lists *d) |
324 | { | 351 | { |
325 | canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ | 352 | canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ |
326 | 353 | ||
327 | /* filter error frames */ | 354 | /* filter for error frames in extra filterlist */ |
328 | if (*mask & CAN_ERR_FLAG) { | 355 | if (*mask & CAN_ERR_FLAG) { |
329 | /* clear CAN_ERR_FLAG in list entry */ | 356 | /* clear CAN_ERR_FLAG in filter entry */ |
330 | *mask &= CAN_ERR_MASK; | 357 | *mask &= CAN_ERR_MASK; |
331 | return &d->rx[RX_ERR]; | 358 | return &d->rx[RX_ERR]; |
332 | } | 359 | } |
333 | 360 | ||
334 | /* ensure valid values in can_mask */ | 361 | /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ |
335 | if (*mask & CAN_EFF_FLAG) | 362 | |
336 | *mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG); | 363 | #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG) |
337 | else | 364 | |
338 | *mask &= (CAN_SFF_MASK | CAN_RTR_FLAG); | 365 | /* ensure valid values in can_mask for 'SFF only' frame filtering */ |
366 | if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) | ||
367 | *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); | ||
339 | 368 | ||
340 | /* reduce condition testing at receive time */ | 369 | /* reduce condition testing at receive time */ |
341 | *can_id &= *mask; | 370 | *can_id &= *mask; |
@@ -348,15 +377,19 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, | |||
348 | if (!(*mask)) | 377 | if (!(*mask)) |
349 | return &d->rx[RX_ALL]; | 378 | return &d->rx[RX_ALL]; |
350 | 379 | ||
351 | /* use extra filterset for the subscription of exactly *ONE* can_id */ | 380 | /* extra filterlists for the subscription of a single non-RTR can_id */ |
352 | if (*can_id & CAN_EFF_FLAG) { | 381 | if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) |
353 | if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) { | 382 | && !(*can_id & CAN_RTR_FLAG)) { |
354 | /* RFC: a use-case for hash-tables in the future? */ | 383 | |
355 | return &d->rx[RX_EFF]; | 384 | if (*can_id & CAN_EFF_FLAG) { |
385 | if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) { | ||
386 | /* RFC: a future use-case for hash-tables? */ | ||
387 | return &d->rx[RX_EFF]; | ||
388 | } | ||
389 | } else { | ||
390 | if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) | ||
391 | return &d->rx_sff[*can_id]; | ||
356 | } | 392 | } |
357 | } else { | ||
358 | if (*mask == CAN_SFF_MASK) | ||
359 | return &d->rx_sff[*can_id]; | ||
360 | } | 393 | } |
361 | 394 | ||
362 | /* default: filter via can_id/can_mask */ | 395 | /* default: filter via can_id/can_mask */ |
@@ -589,7 +622,10 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) | |||
589 | } | 622 | } |
590 | } | 623 | } |
591 | 624 | ||
592 | /* check CAN_ID specific entries */ | 625 | /* check filterlists for single non-RTR can_ids */ |
626 | if (can_id & CAN_RTR_FLAG) | ||
627 | return matches; | ||
628 | |||
593 | if (can_id & CAN_EFF_FLAG) { | 629 | if (can_id & CAN_EFF_FLAG) { |
594 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { | 630 | hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { |
595 | if (r->can_id == can_id) { | 631 | if (r->can_id == can_id) { |
diff --git a/net/can/bcm.c b/net/can/bcm.c index d0dd382001e2..da0d426c0ce4 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -64,10 +64,11 @@ | |||
64 | #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ | 64 | #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ |
65 | 65 | ||
66 | /* get best masking value for can_rx_register() for a given single can_id */ | 66 | /* get best masking value for can_rx_register() for a given single can_id */ |
67 | #define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \ | 67 | #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ |
68 | (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK)) | 68 | (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ |
69 | (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) | ||
69 | 70 | ||
70 | #define CAN_BCM_VERSION "20080415" | 71 | #define CAN_BCM_VERSION CAN_VERSION |
71 | static __initdata const char banner[] = KERN_INFO | 72 | static __initdata const char banner[] = KERN_INFO |
72 | "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; | 73 | "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; |
73 | 74 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6c7af390be0a..dadac6281f20 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -133,9 +133,11 @@ static int poll_one_napi(struct netpoll_info *npinfo, | |||
133 | 133 | ||
134 | npinfo->rx_flags |= NETPOLL_RX_DROP; | 134 | npinfo->rx_flags |= NETPOLL_RX_DROP; |
135 | atomic_inc(&trapped); | 135 | atomic_inc(&trapped); |
136 | set_bit(NAPI_STATE_NPSVC, &napi->state); | ||
136 | 137 | ||
137 | work = napi->poll(napi, budget); | 138 | work = napi->poll(napi, budget); |
138 | 139 | ||
140 | clear_bit(NAPI_STATE_NPSVC, &napi->state); | ||
139 | atomic_dec(&trapped); | 141 | atomic_dec(&trapped); |
140 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; | 142 | npinfo->rx_flags &= ~NETPOLL_RX_DROP; |
141 | 143 | ||
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index bea54a685109..8d489e746b21 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -61,7 +61,7 @@ static struct | |||
61 | static struct xt_table nat_table = { | 61 | static struct xt_table nat_table = { |
62 | .name = "nat", | 62 | .name = "nat", |
63 | .valid_hooks = NAT_VALID_HOOKS, | 63 | .valid_hooks = NAT_VALID_HOOKS, |
64 | .lock = __RW_LOCK_UNLOCKED(__nat_table.lock), | 64 | .lock = __RW_LOCK_UNLOCKED(nat_table.lock), |
65 | .me = THIS_MODULE, | 65 | .me = THIS_MODULE, |
66 | .af = AF_INET, | 66 | .af = AF_INET, |
67 | }; | 67 | }; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 85b07eba1879..fe3b4bdfd251 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -722,8 +722,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |||
722 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, | 722 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, |
723 | unsigned int mss_now) | 723 | unsigned int mss_now) |
724 | { | 724 | { |
725 | if (skb->len <= mss_now || !sk_can_gso(sk) || | 725 | if (skb->len <= mss_now || !sk_can_gso(sk)) { |
726 | tcp_urg_mode(tcp_sk(sk))) { | ||
727 | /* Avoid the costly divide in the normal | 726 | /* Avoid the costly divide in the normal |
728 | * non-TSO case. | 727 | * non-TSO case. |
729 | */ | 728 | */ |
@@ -1029,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) | |||
1029 | 1028 | ||
1030 | /* Compute the current effective MSS, taking SACKs and IP options, | 1029 | /* Compute the current effective MSS, taking SACKs and IP options, |
1031 | * and even PMTU discovery events into account. | 1030 | * and even PMTU discovery events into account. |
1032 | * | ||
1033 | * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up | ||
1034 | * cannot be large. However, taking into account rare use of URG, this | ||
1035 | * is not a big flaw. | ||
1036 | */ | 1031 | */ |
1037 | unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | 1032 | unsigned int tcp_current_mss(struct sock *sk, int large_allowed) |
1038 | { | 1033 | { |
@@ -1047,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
1047 | 1042 | ||
1048 | mss_now = tp->mss_cache; | 1043 | mss_now = tp->mss_cache; |
1049 | 1044 | ||
1050 | if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) | 1045 | if (large_allowed && sk_can_gso(sk)) |
1051 | doing_tso = 1; | 1046 | doing_tso = 1; |
1052 | 1047 | ||
1053 | if (dst) { | 1048 | if (dst) { |
@@ -1164,9 +1159,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, | |||
1164 | { | 1159 | { |
1165 | int tso_segs = tcp_skb_pcount(skb); | 1160 | int tso_segs = tcp_skb_pcount(skb); |
1166 | 1161 | ||
1167 | if (!tso_segs || | 1162 | if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { |
1168 | (tso_segs > 1 && (tcp_skb_mss(skb) != mss_now || | ||
1169 | tcp_urg_mode(tcp_sk(sk))))) { | ||
1170 | tcp_set_skb_tso_segs(sk, skb, mss_now); | 1163 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
1171 | tso_segs = tcp_skb_pcount(skb); | 1164 | tso_segs = tcp_skb_pcount(skb); |
1172 | } | 1165 | } |
@@ -1519,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1519 | * send_head. This happens as incoming acks open up the remote | 1512 | * send_head. This happens as incoming acks open up the remote |
1520 | * window for us. | 1513 | * window for us. |
1521 | * | 1514 | * |
1515 | * LARGESEND note: !tcp_urg_mode is overkill, only frames between | ||
1516 | * snd_up-64k-mss .. snd_up cannot be large. However, taking into | ||
1517 | * account rare use of URG, this is not a big flaw. | ||
1518 | * | ||
1522 | * Returns 1, if no segments are in flight and we have queued segments, but | 1519 | * Returns 1, if no segments are in flight and we have queued segments, but |
1523 | * cannot send anything now because of SWS or another problem. | 1520 | * cannot send anything now because of SWS or another problem. |
1524 | */ | 1521 | */ |
@@ -1570,7 +1567,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
1570 | } | 1567 | } |
1571 | 1568 | ||
1572 | limit = mss_now; | 1569 | limit = mss_now; |
1573 | if (tso_segs > 1) | 1570 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
1574 | limit = tcp_mss_split_point(sk, skb, mss_now, | 1571 | limit = tcp_mss_split_point(sk, skb, mss_now, |
1575 | cwnd_quota); | 1572 | cwnd_quota); |
1576 | 1573 | ||
@@ -1619,6 +1616,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, | |||
1619 | */ | 1616 | */ |
1620 | void tcp_push_one(struct sock *sk, unsigned int mss_now) | 1617 | void tcp_push_one(struct sock *sk, unsigned int mss_now) |
1621 | { | 1618 | { |
1619 | struct tcp_sock *tp = tcp_sk(sk); | ||
1622 | struct sk_buff *skb = tcp_send_head(sk); | 1620 | struct sk_buff *skb = tcp_send_head(sk); |
1623 | unsigned int tso_segs, cwnd_quota; | 1621 | unsigned int tso_segs, cwnd_quota; |
1624 | 1622 | ||
@@ -1633,7 +1631,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
1633 | BUG_ON(!tso_segs); | 1631 | BUG_ON(!tso_segs); |
1634 | 1632 | ||
1635 | limit = mss_now; | 1633 | limit = mss_now; |
1636 | if (tso_segs > 1) | 1634 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
1637 | limit = tcp_mss_split_point(sk, skb, mss_now, | 1635 | limit = tcp_mss_split_point(sk, skb, mss_now, |
1638 | cwnd_quota); | 1636 | cwnd_quota); |
1639 | 1637 | ||
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 14504dada116..a453aac91bd3 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -40,18 +40,14 @@ | |||
40 | 40 | ||
41 | #include "tcp_vegas.h" | 41 | #include "tcp_vegas.h" |
42 | 42 | ||
43 | /* Default values of the Vegas variables, in fixed-point representation | 43 | static int alpha = 2; |
44 | * with V_PARAM_SHIFT bits to the right of the binary point. | 44 | static int beta = 4; |
45 | */ | 45 | static int gamma = 1; |
46 | #define V_PARAM_SHIFT 1 | ||
47 | static int alpha = 2<<V_PARAM_SHIFT; | ||
48 | static int beta = 4<<V_PARAM_SHIFT; | ||
49 | static int gamma = 1<<V_PARAM_SHIFT; | ||
50 | 46 | ||
51 | module_param(alpha, int, 0644); | 47 | module_param(alpha, int, 0644); |
52 | MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)"); | 48 | MODULE_PARM_DESC(alpha, "lower bound of packets in network"); |
53 | module_param(beta, int, 0644); | 49 | module_param(beta, int, 0644); |
54 | MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)"); | 50 | MODULE_PARM_DESC(beta, "upper bound of packets in network"); |
55 | module_param(gamma, int, 0644); | 51 | module_param(gamma, int, 0644); |
56 | MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); | 52 | MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); |
57 | 53 | ||
@@ -172,49 +168,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
172 | return; | 168 | return; |
173 | } | 169 | } |
174 | 170 | ||
175 | /* The key players are v_beg_snd_una and v_beg_snd_nxt. | ||
176 | * | ||
177 | * These are so named because they represent the approximate values | ||
178 | * of snd_una and snd_nxt at the beginning of the current RTT. More | ||
179 | * precisely, they represent the amount of data sent during the RTT. | ||
180 | * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, | ||
181 | * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding | ||
182 | * bytes of data have been ACKed during the course of the RTT, giving | ||
183 | * an "actual" rate of: | ||
184 | * | ||
185 | * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration) | ||
186 | * | ||
187 | * Unfortunately, v_beg_snd_una is not exactly equal to snd_una, | ||
188 | * because delayed ACKs can cover more than one segment, so they | ||
189 | * don't line up nicely with the boundaries of RTTs. | ||
190 | * | ||
191 | * Another unfortunate fact of life is that delayed ACKs delay the | ||
192 | * advance of the left edge of our send window, so that the number | ||
193 | * of bytes we send in an RTT is often less than our cwnd will allow. | ||
194 | * So we keep track of our cwnd separately, in v_beg_snd_cwnd. | ||
195 | */ | ||
196 | |||
197 | if (after(ack, vegas->beg_snd_nxt)) { | 171 | if (after(ack, vegas->beg_snd_nxt)) { |
198 | /* Do the Vegas once-per-RTT cwnd adjustment. */ | 172 | /* Do the Vegas once-per-RTT cwnd adjustment. */ |
199 | u32 old_wnd, old_snd_cwnd; | ||
200 | |||
201 | |||
202 | /* Here old_wnd is essentially the window of data that was | ||
203 | * sent during the previous RTT, and has all | ||
204 | * been acknowledged in the course of the RTT that ended | ||
205 | * with the ACK we just received. Likewise, old_snd_cwnd | ||
206 | * is the cwnd during the previous RTT. | ||
207 | */ | ||
208 | old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) / | ||
209 | tp->mss_cache; | ||
210 | old_snd_cwnd = vegas->beg_snd_cwnd; | ||
211 | 173 | ||
212 | /* Save the extent of the current window so we can use this | 174 | /* Save the extent of the current window so we can use this |
213 | * at the end of the next RTT. | 175 | * at the end of the next RTT. |
214 | */ | 176 | */ |
215 | vegas->beg_snd_una = vegas->beg_snd_nxt; | ||
216 | vegas->beg_snd_nxt = tp->snd_nxt; | 177 | vegas->beg_snd_nxt = tp->snd_nxt; |
217 | vegas->beg_snd_cwnd = tp->snd_cwnd; | ||
218 | 178 | ||
219 | /* We do the Vegas calculations only if we got enough RTT | 179 | /* We do the Vegas calculations only if we got enough RTT |
220 | * samples that we can be reasonably sure that we got | 180 | * samples that we can be reasonably sure that we got |
@@ -252,22 +212,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
252 | * | 212 | * |
253 | * This is: | 213 | * This is: |
254 | * (actual rate in segments) * baseRTT | 214 | * (actual rate in segments) * baseRTT |
255 | * We keep it as a fixed point number with | ||
256 | * V_PARAM_SHIFT bits to the right of the binary point. | ||
257 | */ | 215 | */ |
258 | target_cwnd = ((u64)old_wnd * vegas->baseRTT); | 216 | target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt; |
259 | target_cwnd <<= V_PARAM_SHIFT; | ||
260 | do_div(target_cwnd, rtt); | ||
261 | 217 | ||
262 | /* Calculate the difference between the window we had, | 218 | /* Calculate the difference between the window we had, |
263 | * and the window we would like to have. This quantity | 219 | * and the window we would like to have. This quantity |
264 | * is the "Diff" from the Arizona Vegas papers. | 220 | * is the "Diff" from the Arizona Vegas papers. |
265 | * | ||
266 | * Again, this is a fixed point number with | ||
267 | * V_PARAM_SHIFT bits to the right of the binary | ||
268 | * point. | ||
269 | */ | 221 | */ |
270 | diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; | 222 | diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; |
271 | 223 | ||
272 | if (diff > gamma && tp->snd_ssthresh > 2 ) { | 224 | if (diff > gamma && tp->snd_ssthresh > 2 ) { |
273 | /* Going too fast. Time to slow down | 225 | /* Going too fast. Time to slow down |
@@ -282,16 +234,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
282 | * truncation robs us of full link | 234 | * truncation robs us of full link |
283 | * utilization. | 235 | * utilization. |
284 | */ | 236 | */ |
285 | tp->snd_cwnd = min(tp->snd_cwnd, | 237 | tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); |
286 | ((u32)target_cwnd >> | ||
287 | V_PARAM_SHIFT)+1); | ||
288 | 238 | ||
289 | } else if (tp->snd_cwnd <= tp->snd_ssthresh) { | 239 | } else if (tp->snd_cwnd <= tp->snd_ssthresh) { |
290 | /* Slow start. */ | 240 | /* Slow start. */ |
291 | tcp_slow_start(tp); | 241 | tcp_slow_start(tp); |
292 | } else { | 242 | } else { |
293 | /* Congestion avoidance. */ | 243 | /* Congestion avoidance. */ |
294 | u32 next_snd_cwnd; | ||
295 | 244 | ||
296 | /* Figure out where we would like cwnd | 245 | /* Figure out where we would like cwnd |
297 | * to be. | 246 | * to be. |
@@ -300,32 +249,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
300 | /* The old window was too fast, so | 249 | /* The old window was too fast, so |
301 | * we slow down. | 250 | * we slow down. |
302 | */ | 251 | */ |
303 | next_snd_cwnd = old_snd_cwnd - 1; | 252 | tp->snd_cwnd--; |
304 | } else if (diff < alpha) { | 253 | } else if (diff < alpha) { |
305 | /* We don't have enough extra packets | 254 | /* We don't have enough extra packets |
306 | * in the network, so speed up. | 255 | * in the network, so speed up. |
307 | */ | 256 | */ |
308 | next_snd_cwnd = old_snd_cwnd + 1; | 257 | tp->snd_cwnd++; |
309 | } else { | 258 | } else { |
310 | /* Sending just as fast as we | 259 | /* Sending just as fast as we |
311 | * should be. | 260 | * should be. |
312 | */ | 261 | */ |
313 | next_snd_cwnd = old_snd_cwnd; | ||
314 | } | 262 | } |
315 | |||
316 | /* Adjust cwnd upward or downward, toward the | ||
317 | * desired value. | ||
318 | */ | ||
319 | if (next_snd_cwnd > tp->snd_cwnd) | ||
320 | tp->snd_cwnd++; | ||
321 | else if (next_snd_cwnd < tp->snd_cwnd) | ||
322 | tp->snd_cwnd--; | ||
323 | } | 263 | } |
324 | 264 | ||
325 | if (tp->snd_cwnd < 2) | 265 | if (tp->snd_cwnd < 2) |
326 | tp->snd_cwnd = 2; | 266 | tp->snd_cwnd = 2; |
327 | else if (tp->snd_cwnd > tp->snd_cwnd_clamp) | 267 | else if (tp->snd_cwnd > tp->snd_cwnd_clamp) |
328 | tp->snd_cwnd = tp->snd_cwnd_clamp; | 268 | tp->snd_cwnd = tp->snd_cwnd_clamp; |
269 | |||
270 | tp->snd_ssthresh = tcp_current_ssthresh(sk); | ||
329 | } | 271 | } |
330 | 272 | ||
331 | /* Wipe the slate clean for the next RTT. */ | 273 | /* Wipe the slate clean for the next RTT. */ |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 172438320eec..d0f54d18e19b 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -912,8 +912,13 @@ static void ndisc_recv_na(struct sk_buff *skb) | |||
912 | is invalid, but ndisc specs say nothing | 912 | is invalid, but ndisc specs say nothing |
913 | about it. It could be misconfiguration, or | 913 | about it. It could be misconfiguration, or |
914 | an smart proxy agent tries to help us :-) | 914 | an smart proxy agent tries to help us :-) |
915 | |||
916 | We should not print the error if NA has been | ||
917 | received from loopback - it is just our own | ||
918 | unsolicited advertisement. | ||
915 | */ | 919 | */ |
916 | ND_PRINTK1(KERN_WARNING | 920 | if (skb->pkt_type != PACKET_LOOPBACK) |
921 | ND_PRINTK1(KERN_WARNING | ||
917 | "ICMPv6 NA: someone advertises our address on %s!\n", | 922 | "ICMPv6 NA: someone advertises our address on %s!\n", |
918 | ifp->idev->dev->name); | 923 | ifp->idev->dev->name); |
919 | in6_ifa_put(ifp); | 924 | in6_ifa_put(ifp); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 7fef8ea1f5ec..d254446b85b5 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -99,7 +99,7 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr) | |||
99 | 99 | ||
100 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); | 100 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); |
101 | while (sta) { | 101 | while (sta) { |
102 | if (compare_ether_addr(sta->sta.addr, addr) == 0) | 102 | if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) |
103 | break; | 103 | break; |
104 | sta = rcu_dereference(sta->hnext); | 104 | sta = rcu_dereference(sta->hnext); |
105 | } | 105 | } |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 02a8fed21082..1acc089be7e9 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -141,7 +141,7 @@ socket_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
141 | sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, | 141 | sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, |
142 | saddr, daddr, sport, dport, par->in, false); | 142 | saddr, daddr, sport, dport, par->in, false); |
143 | if (sk != NULL) { | 143 | if (sk != NULL) { |
144 | bool wildcard = (inet_sk(sk)->rcv_saddr == 0); | 144 | bool wildcard = (sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->rcv_saddr == 0); |
145 | 145 | ||
146 | nf_tproxy_put_sock(sk); | 146 | nf_tproxy_put_sock(sk); |
147 | if (wildcard) | 147 | if (wildcard) |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index e8a5c32b0f10..8c0308032178 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -562,7 +562,6 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, | |||
562 | const struct in_addr *mask, | 562 | const struct in_addr *mask, |
563 | struct netlbl_audit *audit_info) | 563 | struct netlbl_audit *audit_info) |
564 | { | 564 | { |
565 | int ret_val = 0; | ||
566 | struct netlbl_af4list *list_entry; | 565 | struct netlbl_af4list *list_entry; |
567 | struct netlbl_unlhsh_addr4 *entry; | 566 | struct netlbl_unlhsh_addr4 *entry; |
568 | struct audit_buffer *audit_buf; | 567 | struct audit_buffer *audit_buf; |
@@ -574,9 +573,10 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, | |||
574 | list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, | 573 | list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, |
575 | &iface->addr4_list); | 574 | &iface->addr4_list); |
576 | spin_unlock(&netlbl_unlhsh_lock); | 575 | spin_unlock(&netlbl_unlhsh_lock); |
577 | if (list_entry == NULL) | 576 | if (list_entry != NULL) |
578 | ret_val = -ENOENT; | 577 | entry = netlbl_unlhsh_addr4_entry(list_entry); |
579 | entry = netlbl_unlhsh_addr4_entry(list_entry); | 578 | else |
579 | entry = NULL; | ||
580 | 580 | ||
581 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, | 581 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, |
582 | audit_info); | 582 | audit_info); |
@@ -587,19 +587,21 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, | |||
587 | addr->s_addr, mask->s_addr); | 587 | addr->s_addr, mask->s_addr); |
588 | if (dev != NULL) | 588 | if (dev != NULL) |
589 | dev_put(dev); | 589 | dev_put(dev); |
590 | if (entry && security_secid_to_secctx(entry->secid, | 590 | if (entry != NULL && |
591 | &secctx, | 591 | security_secid_to_secctx(entry->secid, |
592 | &secctx_len) == 0) { | 592 | &secctx, &secctx_len) == 0) { |
593 | audit_log_format(audit_buf, " sec_obj=%s", secctx); | 593 | audit_log_format(audit_buf, " sec_obj=%s", secctx); |
594 | security_release_secctx(secctx, secctx_len); | 594 | security_release_secctx(secctx, secctx_len); |
595 | } | 595 | } |
596 | audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); | 596 | audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); |
597 | audit_log_end(audit_buf); | 597 | audit_log_end(audit_buf); |
598 | } | 598 | } |
599 | 599 | ||
600 | if (ret_val == 0) | 600 | if (entry == NULL) |
601 | call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4); | 601 | return -ENOENT; |
602 | return ret_val; | 602 | |
603 | call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4); | ||
604 | return 0; | ||
603 | } | 605 | } |
604 | 606 | ||
605 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 607 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
@@ -623,7 +625,6 @@ static int netlbl_unlhsh_remove_addr6(struct net *net, | |||
623 | const struct in6_addr *mask, | 625 | const struct in6_addr *mask, |
624 | struct netlbl_audit *audit_info) | 626 | struct netlbl_audit *audit_info) |
625 | { | 627 | { |
626 | int ret_val = 0; | ||
627 | struct netlbl_af6list *list_entry; | 628 | struct netlbl_af6list *list_entry; |
628 | struct netlbl_unlhsh_addr6 *entry; | 629 | struct netlbl_unlhsh_addr6 *entry; |
629 | struct audit_buffer *audit_buf; | 630 | struct audit_buffer *audit_buf; |
@@ -634,9 +635,10 @@ static int netlbl_unlhsh_remove_addr6(struct net *net, | |||
634 | spin_lock(&netlbl_unlhsh_lock); | 635 | spin_lock(&netlbl_unlhsh_lock); |
635 | list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); | 636 | list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); |
636 | spin_unlock(&netlbl_unlhsh_lock); | 637 | spin_unlock(&netlbl_unlhsh_lock); |
637 | if (list_entry == NULL) | 638 | if (list_entry != NULL) |
638 | ret_val = -ENOENT; | 639 | entry = netlbl_unlhsh_addr6_entry(list_entry); |
639 | entry = netlbl_unlhsh_addr6_entry(list_entry); | 640 | else |
641 | entry = NULL; | ||
640 | 642 | ||
641 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, | 643 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, |
642 | audit_info); | 644 | audit_info); |
@@ -647,19 +649,21 @@ static int netlbl_unlhsh_remove_addr6(struct net *net, | |||
647 | addr, mask); | 649 | addr, mask); |
648 | if (dev != NULL) | 650 | if (dev != NULL) |
649 | dev_put(dev); | 651 | dev_put(dev); |
650 | if (entry && security_secid_to_secctx(entry->secid, | 652 | if (entry != NULL && |
651 | &secctx, | 653 | security_secid_to_secctx(entry->secid, |
652 | &secctx_len) == 0) { | 654 | &secctx, &secctx_len) == 0) { |
653 | audit_log_format(audit_buf, " sec_obj=%s", secctx); | 655 | audit_log_format(audit_buf, " sec_obj=%s", secctx); |
654 | security_release_secctx(secctx, secctx_len); | 656 | security_release_secctx(secctx, secctx_len); |
655 | } | 657 | } |
656 | audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); | 658 | audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); |
657 | audit_log_end(audit_buf); | 659 | audit_log_end(audit_buf); |
658 | } | 660 | } |
659 | 661 | ||
660 | if (ret_val == 0) | 662 | if (entry == NULL) |
661 | call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6); | 663 | return -ENOENT; |
662 | return ret_val; | 664 | |
665 | call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6); | ||
666 | return 0; | ||
663 | } | 667 | } |
664 | #endif /* IPv6 */ | 668 | #endif /* IPv6 */ |
665 | 669 | ||
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index 9978afbd9f2a..803eeef0aa85 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c | |||
@@ -155,12 +155,13 @@ static void gprs_data_ready(struct sock *sk, int len) | |||
155 | static void gprs_write_space(struct sock *sk) | 155 | static void gprs_write_space(struct sock *sk) |
156 | { | 156 | { |
157 | struct gprs_dev *dev = sk->sk_user_data; | 157 | struct gprs_dev *dev = sk->sk_user_data; |
158 | struct net_device *net = dev->net; | ||
158 | unsigned credits = pep_writeable(sk); | 159 | unsigned credits = pep_writeable(sk); |
159 | 160 | ||
160 | spin_lock_bh(&dev->tx_lock); | 161 | spin_lock_bh(&dev->tx_lock); |
161 | dev->tx_max = credits; | 162 | dev->tx_max = credits; |
162 | if (credits > skb_queue_len(&dev->tx_queue)) | 163 | if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net)) |
163 | netif_wake_queue(dev->net); | 164 | netif_wake_queue(net); |
164 | spin_unlock_bh(&dev->tx_lock); | 165 | spin_unlock_bh(&dev->tx_lock); |
165 | } | 166 | } |
166 | 167 | ||
@@ -168,6 +169,23 @@ static void gprs_write_space(struct sock *sk) | |||
168 | * Network device callbacks | 169 | * Network device callbacks |
169 | */ | 170 | */ |
170 | 171 | ||
172 | static int gprs_open(struct net_device *dev) | ||
173 | { | ||
174 | struct gprs_dev *gp = netdev_priv(dev); | ||
175 | |||
176 | gprs_write_space(gp->sk); | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static int gprs_close(struct net_device *dev) | ||
181 | { | ||
182 | struct gprs_dev *gp = netdev_priv(dev); | ||
183 | |||
184 | netif_stop_queue(dev); | ||
185 | flush_work(&gp->tx_work); | ||
186 | return 0; | ||
187 | } | ||
188 | |||
171 | static int gprs_xmit(struct sk_buff *skb, struct net_device *net) | 189 | static int gprs_xmit(struct sk_buff *skb, struct net_device *net) |
172 | { | 190 | { |
173 | struct gprs_dev *dev = netdev_priv(net); | 191 | struct gprs_dev *dev = netdev_priv(net); |
@@ -254,6 +272,8 @@ static void gprs_setup(struct net_device *net) | |||
254 | net->tx_queue_len = 10; | 272 | net->tx_queue_len = 10; |
255 | 273 | ||
256 | net->destructor = free_netdev; | 274 | net->destructor = free_netdev; |
275 | net->open = gprs_open; | ||
276 | net->stop = gprs_close; | ||
257 | net->hard_start_xmit = gprs_xmit; /* mandatory */ | 277 | net->hard_start_xmit = gprs_xmit; /* mandatory */ |
258 | net->change_mtu = gprs_set_mtu; | 278 | net->change_mtu = gprs_set_mtu; |
259 | net->get_stats = gprs_get_stats; | 279 | net->get_stats = gprs_get_stats; |
@@ -318,7 +338,6 @@ int gprs_attach(struct sock *sk) | |||
318 | dev->sk = sk; | 338 | dev->sk = sk; |
319 | 339 | ||
320 | printk(KERN_DEBUG"%s: attached\n", net->name); | 340 | printk(KERN_DEBUG"%s: attached\n", net->name); |
321 | gprs_write_space(sk); /* kick off TX */ | ||
322 | return net->ifindex; | 341 | return net->ifindex; |
323 | 342 | ||
324 | out_rel: | 343 | out_rel: |
@@ -341,7 +360,5 @@ void gprs_detach(struct sock *sk) | |||
341 | 360 | ||
342 | printk(KERN_DEBUG"%s: detached\n", net->name); | 361 | printk(KERN_DEBUG"%s: detached\n", net->name); |
343 | unregister_netdev(net); | 362 | unregister_netdev(net); |
344 | flush_scheduled_work(); | ||
345 | sock_put(sk); | 363 | sock_put(sk); |
346 | skb_queue_purge(&dev->tx_queue); | ||
347 | } | 364 | } |
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index b1770d66bc8d..242fe8f8c322 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c | |||
@@ -123,6 +123,7 @@ nla_put_failure: | |||
123 | 123 | ||
124 | static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | 124 | static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) |
125 | { | 125 | { |
126 | struct net *net = sock_net(skb->sk); | ||
126 | struct phonet_device *pnd; | 127 | struct phonet_device *pnd; |
127 | int dev_idx = 0, dev_start_idx = cb->args[0]; | 128 | int dev_idx = 0, dev_start_idx = cb->args[0]; |
128 | int addr_idx = 0, addr_start_idx = cb->args[1]; | 129 | int addr_idx = 0, addr_start_idx = cb->args[1]; |
@@ -131,6 +132,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | |||
131 | list_for_each_entry(pnd, &pndevs.list, list) { | 132 | list_for_each_entry(pnd, &pndevs.list, list) { |
132 | u8 addr; | 133 | u8 addr; |
133 | 134 | ||
135 | if (!net_eq(dev_net(pnd->netdev), net)) | ||
136 | continue; | ||
134 | if (dev_idx > dev_start_idx) | 137 | if (dev_idx > dev_start_idx) |
135 | addr_start_idx = 0; | 138 | addr_start_idx = 0; |
136 | if (dev_idx++ < dev_start_idx) | 139 | if (dev_idx++ < dev_start_idx) |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index a11959908d9a..98402f0efa47 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -46,9 +46,6 @@ | |||
46 | layering other disciplines. It does not need to do bandwidth | 46 | layering other disciplines. It does not need to do bandwidth |
47 | control either since that can be handled by using token | 47 | control either since that can be handled by using token |
48 | bucket or other rate control. | 48 | bucket or other rate control. |
49 | |||
50 | The simulator is limited by the Linux timer resolution | ||
51 | and will create packet bursts on the HZ boundary (1ms). | ||
52 | */ | 49 | */ |
53 | 50 | ||
54 | struct netem_sched_data { | 51 | struct netem_sched_data { |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 058f04f54b90..fb216c9adf86 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -817,6 +817,7 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info) | |||
817 | continue; | 817 | continue; |
818 | hlist_del(&pol->bydst); | 818 | hlist_del(&pol->bydst); |
819 | hlist_del(&pol->byidx); | 819 | hlist_del(&pol->byidx); |
820 | list_del(&pol->walk.all); | ||
820 | write_unlock_bh(&xfrm_policy_lock); | 821 | write_unlock_bh(&xfrm_policy_lock); |
821 | 822 | ||
822 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, | 823 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, |