aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-10-08 16:22:22 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-08 16:22:22 -0400
commit64b1f00a0830e1c53874067273a096b228d83d36 (patch)
treedd547b0f1d431d0995b8eaa711cedb92399f31fe /net
parent16b99a4f6644d58c94acb4b4253e84049de588c5 (diff)
parent5301e3e117d88ef0967ce278912e54757f1a31a2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netfilter.c11
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/rds/send.c11
-rw-r--r--net/rds/tcp_connect.c5
-rw-r--r--net/rds/threads.c3
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/ematch.c5
-rw-r--r--net/sctp/sm_statefuns.c19
9 files changed, 49 insertions, 14 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index fa1270cc5086..1bada53bb195 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -316,6 +316,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
316 ETH_HLEN-ETH_ALEN); 316 ETH_HLEN-ETH_ALEN);
317 /* tell br_dev_xmit to continue with forwarding */ 317 /* tell br_dev_xmit to continue with forwarding */
318 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 318 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
319 /* FIXME Need to refragment */
319 ret = neigh->output(neigh, skb); 320 ret = neigh->output(neigh, skb);
320 } 321 }
321 neigh_release(neigh); 322 neigh_release(neigh);
@@ -371,6 +372,10 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
371 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 372 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
372 struct rtable *rt; 373 struct rtable *rt;
373 int err; 374 int err;
375 int frag_max_size;
376
377 frag_max_size = IPCB(skb)->frag_max_size;
378 BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
374 379
375 if (nf_bridge->mask & BRNF_PKT_TYPE) { 380 if (nf_bridge->mask & BRNF_PKT_TYPE) {
376 skb->pkt_type = PACKET_OTHERHOST; 381 skb->pkt_type = PACKET_OTHERHOST;
@@ -775,13 +780,19 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
775static int br_nf_dev_queue_xmit(struct sk_buff *skb) 780static int br_nf_dev_queue_xmit(struct sk_buff *skb)
776{ 781{
777 int ret; 782 int ret;
783 int frag_max_size;
778 784
785 /* This is wrong! We should preserve the original fragment
786 * boundaries by preserving frag_list rather than refragmenting.
787 */
779 if (skb->protocol == htons(ETH_P_IP) && 788 if (skb->protocol == htons(ETH_P_IP) &&
780 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && 789 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
781 !skb_is_gso(skb)) { 790 !skb_is_gso(skb)) {
791 frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
782 if (br_parse_ip_options(skb)) 792 if (br_parse_ip_options(skb))
783 /* Drop invalid packet */ 793 /* Drop invalid packet */
784 return NF_DROP; 794 return NF_DROP;
795 IPCB(skb)->frag_max_size = frag_max_size;
785 ret = ip_fragment(skb, br_dev_queue_push_xmit); 796 ret = ip_fragment(skb, br_dev_queue_push_xmit);
786 } else 797 } else
787 ret = br_dev_queue_push_xmit(skb); 798 ret = br_dev_queue_push_xmit(skb);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d8cbaa694227..4d783d071305 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -306,10 +306,14 @@ struct net_bridge
306 306
307struct br_input_skb_cb { 307struct br_input_skb_cb {
308 struct net_device *brdev; 308 struct net_device *brdev;
309
309#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 310#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
310 int igmp; 311 int igmp;
311 int mrouters_only; 312 int mrouters_only;
312#endif 313#endif
314
315 u16 frag_max_size;
316
313#ifdef CONFIG_BRIDGE_VLAN_FILTERING 317#ifdef CONFIG_BRIDGE_VLAN_FILTERING
314 bool vlan_filtered; 318 bool vlan_filtered;
315#endif 319#endif
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index de3b1c86b8d3..12c3c8ef3849 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -786,7 +786,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
786 encap_limit = t->parms.encap_limit; 786 encap_limit = t->parms.encap_limit;
787 787
788 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 788 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
789 fl6.flowi6_proto = IPPROTO_IPIP; 789 fl6.flowi6_proto = IPPROTO_GRE;
790 790
791 dsfield = ipv4_get_dsfield(iph); 791 dsfield = ipv4_get_dsfield(iph);
792 792
@@ -836,7 +836,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
836 encap_limit = t->parms.encap_limit; 836 encap_limit = t->parms.encap_limit;
837 837
838 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 838 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
839 fl6.flowi6_proto = IPPROTO_IPV6; 839 fl6.flowi6_proto = IPPROTO_GRE;
840 840
841 dsfield = ipv6_get_dsfield(ipv6h); 841 dsfield = ipv6_get_dsfield(ipv6h);
842 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 842 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
diff --git a/net/rds/send.c b/net/rds/send.c
index 23718160d71e..0a64541020b0 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -593,8 +593,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
593 sock_put(rds_rs_to_sk(rs)); 593 sock_put(rds_rs_to_sk(rs));
594 } 594 }
595 rs = rm->m_rs; 595 rs = rm->m_rs;
596 sock_hold(rds_rs_to_sk(rs)); 596 if (rs)
597 sock_hold(rds_rs_to_sk(rs));
597 } 598 }
599 if (!rs)
600 goto unlock_and_drop;
598 spin_lock(&rs->rs_lock); 601 spin_lock(&rs->rs_lock);
599 602
600 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { 603 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
@@ -638,9 +641,6 @@ unlock_and_drop:
638 * queue. This means that in the TCP case, the message may not have been 641 * queue. This means that in the TCP case, the message may not have been
639 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked 642 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
640 * checks the RDS_MSG_HAS_ACK_SEQ bit. 643 * checks the RDS_MSG_HAS_ACK_SEQ bit.
641 *
642 * XXX It's not clear to me how this is safely serialized with socket
643 * destruction. Maybe it should bail if it sees SOCK_DEAD.
644 */ 644 */
645void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 645void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
646 is_acked_func is_acked) 646 is_acked_func is_acked)
@@ -711,6 +711,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
711 */ 711 */
712 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { 712 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
713 spin_unlock_irqrestore(&conn->c_lock, flags); 713 spin_unlock_irqrestore(&conn->c_lock, flags);
714 spin_lock_irqsave(&rm->m_rs_lock, flags);
715 rm->m_rs = NULL;
716 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
714 continue; 717 continue;
715 } 718 }
716 list_del_init(&rm->m_conn_item); 719 list_del_init(&rm->m_conn_item);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index a65ee78db0c5..f9f564a6c960 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -106,11 +106,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
106 rds_tcp_set_callbacks(sock, conn); 106 rds_tcp_set_callbacks(sock, conn);
107 ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), 107 ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest),
108 O_NONBLOCK); 108 O_NONBLOCK);
109 sock = NULL;
110 109
111 rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); 110 rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
112 if (ret == -EINPROGRESS) 111 if (ret == -EINPROGRESS)
113 ret = 0; 112 ret = 0;
113 if (ret == 0)
114 sock = NULL;
115 else
116 rds_tcp_restore_callbacks(sock, conn->c_transport_data);
114 117
115out: 118out:
116 if (sock) 119 if (sock)
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 65eaefcab241..dc2402e871fd 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -78,8 +78,7 @@ void rds_connect_complete(struct rds_connection *conn)
78 "current state is %d\n", 78 "current state is %d\n",
79 __func__, 79 __func__,
80 atomic_read(&conn->c_state)); 80 atomic_read(&conn->c_state));
81 atomic_set(&conn->c_state, RDS_CONN_ERROR); 81 rds_conn_drop(conn);
82 queue_work(rds_wq, &conn->c_down_w);
83 return; 82 return;
84 } 83 }
85 84
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 77147c8c4acc..aad6a679fb13 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -549,6 +549,7 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
549 tcf_tree_lock(tp); 549 tcf_tree_lock(tp);
550 list_splice_init(&dst->actions, &tmp); 550 list_splice_init(&dst->actions, &tmp);
551 list_splice(&src->actions, &dst->actions); 551 list_splice(&src->actions, &dst->actions);
552 dst->type = src->type;
552 tcf_tree_unlock(tp); 553 tcf_tree_unlock(tp);
553 tcf_action_destroy(&tmp, TCA_ACT_UNBIND); 554 tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
554#endif 555#endif
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 8250c36543d8..6742200b1307 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -528,9 +528,10 @@ pop_stack:
528 match_idx = stack[--stackp]; 528 match_idx = stack[--stackp];
529 cur_match = tcf_em_get_match(tree, match_idx); 529 cur_match = tcf_em_get_match(tree, match_idx);
530 530
531 if (tcf_em_is_inverted(cur_match))
532 res = !res;
533
531 if (tcf_em_early_end(cur_match, res)) { 534 if (tcf_em_early_end(cur_match, res)) {
532 if (tcf_em_is_inverted(cur_match))
533 res = !res;
534 goto pop_stack; 535 goto pop_stack;
535 } else { 536 } else {
536 match_idx++; 537 match_idx++;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d3f1ea460c50..c8f606324134 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1775,9 +1775,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
1775 /* Update the content of current association. */ 1775 /* Update the content of current association. */
1776 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1776 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1777 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1777 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
1778 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1778 if (sctp_state(asoc, SHUTDOWN_PENDING) &&
1779 SCTP_STATE(SCTP_STATE_ESTABLISHED)); 1779 (sctp_sstate(asoc->base.sk, CLOSING) ||
1780 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1780 sock_flag(asoc->base.sk, SOCK_DEAD))) {
1781 /* if were currently in SHUTDOWN_PENDING, but the socket
1782 * has been closed by user, don't transition to ESTABLISHED.
1783 * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
1784 */
1785 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1786 return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
1787 SCTP_ST_CHUNK(0), NULL,
1788 commands);
1789 } else {
1790 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1791 SCTP_STATE(SCTP_STATE_ESTABLISHED));
1792 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
1793 }
1781 return SCTP_DISPOSITION_CONSUME; 1794 return SCTP_DISPOSITION_CONSUME;
1782 1795
1783nomem_ev: 1796nomem_ev: