aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-12-06 01:54:40 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-06 01:54:40 -0500
commit730c30ec646bd252a9448a66ecd51d794853513f (patch)
treec0d413860f9d8bf37374f17cfabb4911143465d7 /net
parent726e07a8a38168266ac95d87736f9501a2d9e7b2 (diff)
parent0a0755c9fe47dc9f8271935909c66096e43efbfe (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/wireless/iwlwifi/iwl-core.c drivers/net/wireless/iwlwifi/iwl-sta.c
Diffstat (limited to 'net')
-rw-r--r--net/atm/svc.c6
-rw-r--r--net/can/af_can.c68
-rw-r--r--net/can/bcm.c7
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_vegas.c2
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netlabel/netlabel_unlabeled.c10
-rw-r--r--net/phonet/pn_netlink.c3
8 files changed, 83 insertions, 37 deletions
diff --git a/net/atm/svc.c b/net/atm/svc.c
index e9c65500f84e..7b831b526d0b 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -293,7 +293,10 @@ static int svc_listen(struct socket *sock,int backlog)
293 error = -EINVAL; 293 error = -EINVAL;
294 goto out; 294 goto out;
295 } 295 }
296 vcc_insert_socket(sk); 296 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
297 error = -EADDRINUSE;
298 goto out;
299 }
297 set_bit(ATM_VF_WAITING, &vcc->flags); 300 set_bit(ATM_VF_WAITING, &vcc->flags);
298 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 301 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
299 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); 302 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local);
@@ -307,6 +310,7 @@ static int svc_listen(struct socket *sock,int backlog)
307 goto out; 310 goto out;
308 } 311 }
309 set_bit(ATM_VF_LISTEN,&vcc->flags); 312 set_bit(ATM_VF_LISTEN,&vcc->flags);
313 vcc_insert_socket(sk);
310 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; 314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
311 error = -sk->sk_err; 315 error = -sk->sk_err;
312out: 316out:
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7d4d2b3c137e..3dadb338addd 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -319,23 +319,52 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
319 return n ? d : NULL; 319 return n ? d : NULL;
320} 320}
321 321
322/**
323 * find_rcv_list - determine optimal filterlist inside device filter struct
324 * @can_id: pointer to CAN identifier of a given can_filter
325 * @mask: pointer to CAN mask of a given can_filter
326 * @d: pointer to the device filter struct
327 *
328 * Description:
329 * Returns the optimal filterlist to reduce the filter handling in the
330 * receive path. This function is called by service functions that need
331 * to register or unregister a can_filter in the filter lists.
332 *
333 * A filter matches in general, when
334 *
335 * <received_can_id> & mask == can_id & mask
336 *
337 * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
338 * relevant bits for the filter.
339 *
340 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
341 * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
342 * there is a special filterlist and a special rx path filter handling.
343 *
344 * Return:
345 * Pointer to optimal filterlist for the given can_id/mask pair.
346 * Constistency checked mask.
347 * Reduced can_id to have a preprocessed filter compare value.
348 */
322static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 349static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
323 struct dev_rcv_lists *d) 350 struct dev_rcv_lists *d)
324{ 351{
325 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 352 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
326 353
327 /* filter error frames */ 354 /* filter for error frames in extra filterlist */
328 if (*mask & CAN_ERR_FLAG) { 355 if (*mask & CAN_ERR_FLAG) {
329 /* clear CAN_ERR_FLAG in list entry */ 356 /* clear CAN_ERR_FLAG in filter entry */
330 *mask &= CAN_ERR_MASK; 357 *mask &= CAN_ERR_MASK;
331 return &d->rx[RX_ERR]; 358 return &d->rx[RX_ERR];
332 } 359 }
333 360
334 /* ensure valid values in can_mask */ 361 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
335 if (*mask & CAN_EFF_FLAG) 362
336 *mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG); 363#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
337 else 364
338 *mask &= (CAN_SFF_MASK | CAN_RTR_FLAG); 365 /* ensure valid values in can_mask for 'SFF only' frame filtering */
366 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
367 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
339 368
340 /* reduce condition testing at receive time */ 369 /* reduce condition testing at receive time */
341 *can_id &= *mask; 370 *can_id &= *mask;
@@ -348,15 +377,19 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
348 if (!(*mask)) 377 if (!(*mask))
349 return &d->rx[RX_ALL]; 378 return &d->rx[RX_ALL];
350 379
351 /* use extra filterset for the subscription of exactly *ONE* can_id */ 380 /* extra filterlists for the subscription of a single non-RTR can_id */
352 if (*can_id & CAN_EFF_FLAG) { 381 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
353 if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) { 382 && !(*can_id & CAN_RTR_FLAG)) {
354 /* RFC: a use-case for hash-tables in the future? */ 383
355 return &d->rx[RX_EFF]; 384 if (*can_id & CAN_EFF_FLAG) {
385 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
386 /* RFC: a future use-case for hash-tables? */
387 return &d->rx[RX_EFF];
388 }
389 } else {
390 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
391 return &d->rx_sff[*can_id];
356 } 392 }
357 } else {
358 if (*mask == CAN_SFF_MASK)
359 return &d->rx_sff[*can_id];
360 } 393 }
361 394
362 /* default: filter via can_id/can_mask */ 395 /* default: filter via can_id/can_mask */
@@ -589,7 +622,10 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
589 } 622 }
590 } 623 }
591 624
592 /* check CAN_ID specific entries */ 625 /* check filterlists for single non-RTR can_ids */
626 if (can_id & CAN_RTR_FLAG)
627 return matches;
628
593 if (can_id & CAN_EFF_FLAG) { 629 if (can_id & CAN_EFF_FLAG) {
594 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { 630 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
595 if (r->can_id == can_id) { 631 if (r->can_id == can_id) {
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d0dd382001e2..da0d426c0ce4 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -64,10 +64,11 @@
64#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 64#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
65 65
66/* get best masking value for can_rx_register() for a given single can_id */ 66/* get best masking value for can_rx_register() for a given single can_id */
67#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \ 67#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
68 (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK)) 68 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
69 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
69 70
70#define CAN_BCM_VERSION "20080415" 71#define CAN_BCM_VERSION CAN_VERSION
71static __initdata const char banner[] = KERN_INFO 72static __initdata const char banner[] = KERN_INFO
72 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; 73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";
73 74
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 80147ba44141..59505ce0c8fd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -722,8 +722,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
722static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 722static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
723 unsigned int mss_now) 723 unsigned int mss_now)
724{ 724{
725 if (skb->len <= mss_now || !sk_can_gso(sk) || 725 if (skb->len <= mss_now || !sk_can_gso(sk)) {
726 tcp_urg_mode(tcp_sk(sk))) {
727 /* Avoid the costly divide in the normal 726 /* Avoid the costly divide in the normal
728 * non-TSO case. 727 * non-TSO case.
729 */ 728 */
@@ -1029,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1029 1028
1030/* Compute the current effective MSS, taking SACKs and IP options, 1029/* Compute the current effective MSS, taking SACKs and IP options,
1031 * and even PMTU discovery events into account. 1030 * and even PMTU discovery events into account.
1032 *
1033 * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up
1034 * cannot be large. However, taking into account rare use of URG, this
1035 * is not a big flaw.
1036 */ 1031 */
1037unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 1032unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1038{ 1033{
@@ -1047,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1047 1042
1048 mss_now = tp->mss_cache; 1043 mss_now = tp->mss_cache;
1049 1044
1050 if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) 1045 if (large_allowed && sk_can_gso(sk))
1051 doing_tso = 1; 1046 doing_tso = 1;
1052 1047
1053 if (dst) { 1048 if (dst) {
@@ -1164,9 +1159,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
1164{ 1159{
1165 int tso_segs = tcp_skb_pcount(skb); 1160 int tso_segs = tcp_skb_pcount(skb);
1166 1161
1167 if (!tso_segs || 1162 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1168 (tso_segs > 1 && (tcp_skb_mss(skb) != mss_now ||
1169 tcp_urg_mode(tcp_sk(sk))))) {
1170 tcp_set_skb_tso_segs(sk, skb, mss_now); 1163 tcp_set_skb_tso_segs(sk, skb, mss_now);
1171 tso_segs = tcp_skb_pcount(skb); 1164 tso_segs = tcp_skb_pcount(skb);
1172 } 1165 }
@@ -1519,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk)
1519 * send_head. This happens as incoming acks open up the remote 1512 * send_head. This happens as incoming acks open up the remote
1520 * window for us. 1513 * window for us.
1521 * 1514 *
1515 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1516 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1517 * account rare use of URG, this is not a big flaw.
1518 *
1522 * Returns 1, if no segments are in flight and we have queued segments, but 1519 * Returns 1, if no segments are in flight and we have queued segments, but
1523 * cannot send anything now because of SWS or another problem. 1520 * cannot send anything now because of SWS or another problem.
1524 */ 1521 */
@@ -1563,7 +1560,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1563 } 1560 }
1564 1561
1565 limit = mss_now; 1562 limit = mss_now;
1566 if (tso_segs > 1) 1563 if (tso_segs > 1 && !tcp_urg_mode(tp))
1567 limit = tcp_mss_split_point(sk, skb, mss_now, 1564 limit = tcp_mss_split_point(sk, skb, mss_now,
1568 cwnd_quota); 1565 cwnd_quota);
1569 1566
@@ -1620,6 +1617,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1620 */ 1617 */
1621void tcp_push_one(struct sock *sk, unsigned int mss_now) 1618void tcp_push_one(struct sock *sk, unsigned int mss_now)
1622{ 1619{
1620 struct tcp_sock *tp = tcp_sk(sk);
1623 struct sk_buff *skb = tcp_send_head(sk); 1621 struct sk_buff *skb = tcp_send_head(sk);
1624 unsigned int tso_segs, cwnd_quota; 1622 unsigned int tso_segs, cwnd_quota;
1625 1623
@@ -1634,7 +1632,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1634 BUG_ON(!tso_segs); 1632 BUG_ON(!tso_segs);
1635 1633
1636 limit = mss_now; 1634 limit = mss_now;
1637 if (tso_segs > 1) 1635 if (tso_segs > 1 && !tcp_urg_mode(tp))
1638 limit = tcp_mss_split_point(sk, skb, mss_now, 1636 limit = tcp_mss_split_point(sk, skb, mss_now,
1639 cwnd_quota); 1637 cwnd_quota);
1640 1638
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 14504dada116..7cd22262de3a 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -326,6 +326,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
326 tp->snd_cwnd = 2; 326 tp->snd_cwnd = 2;
327 else if (tp->snd_cwnd > tp->snd_cwnd_clamp) 327 else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
328 tp->snd_cwnd = tp->snd_cwnd_clamp; 328 tp->snd_cwnd = tp->snd_cwnd_clamp;
329
330 tp->snd_ssthresh = tcp_current_ssthresh(sk);
329 } 331 }
330 332
331 /* Wipe the slate clean for the next RTT. */ 333 /* Wipe the slate clean for the next RTT. */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index b22110a4a75e..10c5539c20ab 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -99,7 +99,7 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
99 99
100 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 100 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
101 while (sta) { 101 while (sta) {
102 if (compare_ether_addr(sta->sta.addr, addr) == 0) 102 if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
103 break; 103 break;
104 sta = rcu_dereference(sta->hnext); 104 sta = rcu_dereference(sta->hnext);
105 } 105 }
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e8a5c32b0f10..90c8506a0aac 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -574,9 +574,10 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
574 list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, 574 list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
575 &iface->addr4_list); 575 &iface->addr4_list);
576 spin_unlock(&netlbl_unlhsh_lock); 576 spin_unlock(&netlbl_unlhsh_lock);
577 if (list_entry == NULL) 577 if (list_entry != NULL)
578 entry = netlbl_unlhsh_addr4_entry(list_entry);
579 else
578 ret_val = -ENOENT; 580 ret_val = -ENOENT;
579 entry = netlbl_unlhsh_addr4_entry(list_entry);
580 581
581 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 582 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
582 audit_info); 583 audit_info);
@@ -634,9 +635,10 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
634 spin_lock(&netlbl_unlhsh_lock); 635 spin_lock(&netlbl_unlhsh_lock);
635 list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); 636 list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list);
636 spin_unlock(&netlbl_unlhsh_lock); 637 spin_unlock(&netlbl_unlhsh_lock);
637 if (list_entry == NULL) 638 if (list_entry != NULL)
639 entry = netlbl_unlhsh_addr6_entry(list_entry);
640 else
638 ret_val = -ENOENT; 641 ret_val = -ENOENT;
639 entry = netlbl_unlhsh_addr6_entry(list_entry);
640 642
641 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 643 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
642 audit_info); 644 audit_info);
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index b1770d66bc8d..242fe8f8c322 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -123,6 +123,7 @@ nla_put_failure:
123 123
124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
125{ 125{
126 struct net *net = sock_net(skb->sk);
126 struct phonet_device *pnd; 127 struct phonet_device *pnd;
127 int dev_idx = 0, dev_start_idx = cb->args[0]; 128 int dev_idx = 0, dev_start_idx = cb->args[0];
128 int addr_idx = 0, addr_start_idx = cb->args[1]; 129 int addr_idx = 0, addr_start_idx = cb->args[1];
@@ -131,6 +132,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
131 list_for_each_entry(pnd, &pndevs.list, list) { 132 list_for_each_entry(pnd, &pndevs.list, list) {
132 u8 addr; 133 u8 addr;
133 134
135 if (!net_eq(dev_net(pnd->netdev), net))
136 continue;
134 if (dev_idx > dev_start_idx) 137 if (dev_idx > dev_start_idx)
135 addr_start_idx = 0; 138 addr_start_idx = 0;
136 if (dev_idx++ < dev_start_idx) 139 if (dev_idx++ < dev_start_idx)