aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-02-26 21:55:51 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-26 21:55:51 -0500
commitff4783ce78c08d2990126ce1874250ae8e72bbd2 (patch)
tree5c95885a4ab768101dd72942b57c238d452a7565 /net/netfilter
parent622121719934f60378279eb440d3cec2fc3176d2 (diff)
parent203738e548cefc3fc3c2f73a9063176c9f3583d5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/sfc/rx.c Overlapping changes in drivers/net/ethernet/sfc/rx.c, one to change the rx_buf->is_page boolean into a set of u16 flags, and another to adjust how ->ip_summed is initialized. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c38
-rw-r--r--net/netfilter/nf_conntrack_netlink.c46
-rw-r--r--net/netfilter/nf_queue.c40
-rw-r--r--net/netfilter/xt_TEE.c5
5 files changed, 86 insertions, 45 deletions
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 611c3359b94d..2555816e7788 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
232 __be16 dport = 0; /* destination port to forward */ 232 __be16 dport = 0; /* destination port to forward */
233 unsigned int flags; 233 unsigned int flags;
234 struct ip_vs_conn_param param; 234 struct ip_vs_conn_param param;
235 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
235 union nf_inet_addr snet; /* source network of the client, 236 union nf_inet_addr snet; /* source network of the client,
236 after masking */ 237 after masking */
237 238
@@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
267 { 268 {
268 int protocol = iph.protocol; 269 int protocol = iph.protocol;
269 const union nf_inet_addr *vaddr = &iph.daddr; 270 const union nf_inet_addr *vaddr = &iph.daddr;
270 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
271 __be16 vport = 0; 271 __be16 vport = 0;
272 272
273 if (dst_port == svc->port) { 273 if (dst_port == svc->port) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 76613f5a55c0..ed86a3be678e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -404,19 +404,49 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
404 &net->ct.hash[repl_hash]); 404 &net->ct.hash[repl_hash]);
405} 405}
406 406
407void nf_conntrack_hash_insert(struct nf_conn *ct) 407int
408nf_conntrack_hash_check_insert(struct nf_conn *ct)
408{ 409{
409 struct net *net = nf_ct_net(ct); 410 struct net *net = nf_ct_net(ct);
410 unsigned int hash, repl_hash; 411 unsigned int hash, repl_hash;
412 struct nf_conntrack_tuple_hash *h;
413 struct hlist_nulls_node *n;
411 u16 zone; 414 u16 zone;
412 415
413 zone = nf_ct_zone(ct); 416 zone = nf_ct_zone(ct);
414 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 417 hash = hash_conntrack(net, zone,
415 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 418 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
419 repl_hash = hash_conntrack(net, zone,
420 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
421
422 spin_lock_bh(&nf_conntrack_lock);
416 423
424 /* See if there's one in the list already, including reverse */
425 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
426 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
427 &h->tuple) &&
428 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
429 goto out;
430 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
431 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
432 &h->tuple) &&
433 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
434 goto out;
435
436 add_timer(&ct->timeout);
437 nf_conntrack_get(&ct->ct_general);
417 __nf_conntrack_hash_insert(ct, hash, repl_hash); 438 __nf_conntrack_hash_insert(ct, hash, repl_hash);
439 NF_CT_STAT_INC(net, insert);
440 spin_unlock_bh(&nf_conntrack_lock);
441
442 return 0;
443
444out:
445 NF_CT_STAT_INC(net, insert_failed);
446 spin_unlock_bh(&nf_conntrack_lock);
447 return -EEXIST;
418} 448}
419EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 449EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
420 450
421/* Confirm a connection given skb; places it in hash table */ 451/* Confirm a connection given skb; places it in hash table */
422int 452int
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 28d0312d890a..04fb409623d2 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1404,15 +1404,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1404 nf_ct_protonum(ct)); 1404 nf_ct_protonum(ct));
1405 if (helper == NULL) { 1405 if (helper == NULL) {
1406 rcu_read_unlock(); 1406 rcu_read_unlock();
1407 spin_unlock_bh(&nf_conntrack_lock);
1408#ifdef CONFIG_MODULES 1407#ifdef CONFIG_MODULES
1409 if (request_module("nfct-helper-%s", helpname) < 0) { 1408 if (request_module("nfct-helper-%s", helpname) < 0) {
1410 spin_lock_bh(&nf_conntrack_lock);
1411 err = -EOPNOTSUPP; 1409 err = -EOPNOTSUPP;
1412 goto err1; 1410 goto err1;
1413 } 1411 }
1414 1412
1415 spin_lock_bh(&nf_conntrack_lock);
1416 rcu_read_lock(); 1413 rcu_read_lock();
1417 helper = __nf_conntrack_helper_find(helpname, 1414 helper = __nf_conntrack_helper_find(helpname,
1418 nf_ct_l3num(ct), 1415 nf_ct_l3num(ct),
@@ -1505,8 +1502,10 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
1505 if (tstamp) 1502 if (tstamp)
1506 tstamp->start = ktime_to_ns(ktime_get_real()); 1503 tstamp->start = ktime_to_ns(ktime_get_real());
1507 1504
1508 add_timer(&ct->timeout); 1505 err = nf_conntrack_hash_check_insert(ct);
1509 nf_conntrack_hash_insert(ct); 1506 if (err < 0)
1507 goto err2;
1508
1510 rcu_read_unlock(); 1509 rcu_read_unlock();
1511 1510
1512 return ct; 1511 return ct;
@@ -1527,6 +1526,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1527 struct nf_conntrack_tuple otuple, rtuple; 1526 struct nf_conntrack_tuple otuple, rtuple;
1528 struct nf_conntrack_tuple_hash *h = NULL; 1527 struct nf_conntrack_tuple_hash *h = NULL;
1529 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1528 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1529 struct nf_conn *ct;
1530 u_int8_t u3 = nfmsg->nfgen_family; 1530 u_int8_t u3 = nfmsg->nfgen_family;
1531 u16 zone; 1531 u16 zone;
1532 int err; 1532 int err;
@@ -1547,27 +1547,22 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1547 return err; 1547 return err;
1548 } 1548 }
1549 1549
1550 spin_lock_bh(&nf_conntrack_lock);
1551 if (cda[CTA_TUPLE_ORIG]) 1550 if (cda[CTA_TUPLE_ORIG])
1552 h = __nf_conntrack_find(net, zone, &otuple); 1551 h = nf_conntrack_find_get(net, zone, &otuple);
1553 else if (cda[CTA_TUPLE_REPLY]) 1552 else if (cda[CTA_TUPLE_REPLY])
1554 h = __nf_conntrack_find(net, zone, &rtuple); 1553 h = nf_conntrack_find_get(net, zone, &rtuple);
1555 1554
1556 if (h == NULL) { 1555 if (h == NULL) {
1557 err = -ENOENT; 1556 err = -ENOENT;
1558 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1557 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1559 struct nf_conn *ct;
1560 enum ip_conntrack_events events; 1558 enum ip_conntrack_events events;
1561 1559
1562 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, 1560 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1563 &rtuple, u3); 1561 &rtuple, u3);
1564 if (IS_ERR(ct)) { 1562 if (IS_ERR(ct))
1565 err = PTR_ERR(ct); 1563 return PTR_ERR(ct);
1566 goto out_unlock; 1564
1567 }
1568 err = 0; 1565 err = 0;
1569 nf_conntrack_get(&ct->ct_general);
1570 spin_unlock_bh(&nf_conntrack_lock);
1571 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1566 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1572 events = IPCT_RELATED; 1567 events = IPCT_RELATED;
1573 else 1568 else
@@ -1582,23 +1577,19 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1582 ct, NETLINK_CB(skb).pid, 1577 ct, NETLINK_CB(skb).pid,
1583 nlmsg_report(nlh)); 1578 nlmsg_report(nlh));
1584 nf_ct_put(ct); 1579 nf_ct_put(ct);
1585 } else 1580 }
1586 spin_unlock_bh(&nf_conntrack_lock);
1587 1581
1588 return err; 1582 return err;
1589 } 1583 }
1590 /* implicit 'else' */ 1584 /* implicit 'else' */
1591 1585
1592 /* We manipulate the conntrack inside the global conntrack table lock,
1593 * so there's no need to increase the refcount */
1594 err = -EEXIST; 1586 err = -EEXIST;
1587 ct = nf_ct_tuplehash_to_ctrack(h);
1595 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { 1588 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1596 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 1589 spin_lock_bh(&nf_conntrack_lock);
1597
1598 err = ctnetlink_change_conntrack(ct, cda); 1590 err = ctnetlink_change_conntrack(ct, cda);
1591 spin_unlock_bh(&nf_conntrack_lock);
1599 if (err == 0) { 1592 if (err == 0) {
1600 nf_conntrack_get(&ct->ct_general);
1601 spin_unlock_bh(&nf_conntrack_lock);
1602 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 1593 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1603 (1 << IPCT_ASSURED) | 1594 (1 << IPCT_ASSURED) |
1604 (1 << IPCT_HELPER) | 1595 (1 << IPCT_HELPER) |
@@ -1607,15 +1598,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1607 (1 << IPCT_MARK), 1598 (1 << IPCT_MARK),
1608 ct, NETLINK_CB(skb).pid, 1599 ct, NETLINK_CB(skb).pid,
1609 nlmsg_report(nlh)); 1600 nlmsg_report(nlh));
1610 nf_ct_put(ct); 1601 }
1611 } else
1612 spin_unlock_bh(&nf_conntrack_lock);
1613
1614 return err;
1615 } 1602 }
1616 1603
1617out_unlock: 1604 nf_ct_put(ct);
1618 spin_unlock_bh(&nf_conntrack_lock);
1619 return err; 1605 return err;
1620} 1606}
1621 1607
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index b3a7db678b8d..ce60cf0f6c11 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -203,6 +203,27 @@ err:
203 return status; 203 return status;
204} 204}
205 205
206#ifdef CONFIG_BRIDGE_NETFILTER
207/* When called from bridge netfilter, skb->data must point to MAC header
208 * before calling skb_gso_segment(). Else, original MAC header is lost
209 * and segmented skbs will be sent to wrong destination.
210 */
211static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
212{
213 if (skb->nf_bridge)
214 __skb_push(skb, skb->network_header - skb->mac_header);
215}
216
217static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
218{
219 if (skb->nf_bridge)
220 __skb_pull(skb, skb->network_header - skb->mac_header);
221}
222#else
223#define nf_bridge_adjust_skb_data(s) do {} while (0)
224#define nf_bridge_adjust_segmented_data(s) do {} while (0)
225#endif
226
206int nf_queue(struct sk_buff *skb, 227int nf_queue(struct sk_buff *skb,
207 struct list_head *elem, 228 struct list_head *elem,
208 u_int8_t pf, unsigned int hook, 229 u_int8_t pf, unsigned int hook,
@@ -212,7 +233,7 @@ int nf_queue(struct sk_buff *skb,
212 unsigned int queuenum) 233 unsigned int queuenum)
213{ 234{
214 struct sk_buff *segs; 235 struct sk_buff *segs;
215 int err; 236 int err = -EINVAL;
216 unsigned int queued; 237 unsigned int queued;
217 238
218 if (!skb_is_gso(skb)) 239 if (!skb_is_gso(skb))
@@ -228,23 +249,25 @@ int nf_queue(struct sk_buff *skb,
228 break; 249 break;
229 } 250 }
230 251
252 nf_bridge_adjust_skb_data(skb);
231 segs = skb_gso_segment(skb, 0); 253 segs = skb_gso_segment(skb, 0);
232 /* Does not use PTR_ERR to limit the number of error codes that can be 254 /* Does not use PTR_ERR to limit the number of error codes that can be
233 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean 255 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
234 * 'ignore this hook'. 256 * 'ignore this hook'.
235 */ 257 */
236 if (IS_ERR(segs)) 258 if (IS_ERR(segs))
237 return -EINVAL; 259 goto out_err;
238
239 queued = 0; 260 queued = 0;
240 err = 0; 261 err = 0;
241 do { 262 do {
242 struct sk_buff *nskb = segs->next; 263 struct sk_buff *nskb = segs->next;
243 264
244 segs->next = NULL; 265 segs->next = NULL;
245 if (err == 0) 266 if (err == 0) {
267 nf_bridge_adjust_segmented_data(segs);
246 err = __nf_queue(segs, elem, pf, hook, indev, 268 err = __nf_queue(segs, elem, pf, hook, indev,
247 outdev, okfn, queuenum); 269 outdev, okfn, queuenum);
270 }
248 if (err == 0) 271 if (err == 0)
249 queued++; 272 queued++;
250 else 273 else
@@ -252,11 +275,12 @@ int nf_queue(struct sk_buff *skb,
252 segs = nskb; 275 segs = nskb;
253 } while (segs); 276 } while (segs);
254 277
255 /* also free orig skb if only some segments were queued */ 278 if (queued) {
256 if (unlikely(err && queued))
257 err = 0;
258 if (err == 0)
259 kfree_skb(skb); 279 kfree_skb(skb);
280 return 0;
281 }
282 out_err:
283 nf_bridge_adjust_segmented_data(skb);
260 return err; 284 return err;
261} 285}
262 286
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 3aae66facf9f..4d5057902839 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -152,9 +152,10 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
152 fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) | 152 fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
153 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]; 153 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
154 dst = ip6_route_output(net, NULL, &fl6); 154 dst = ip6_route_output(net, NULL, &fl6);
155 if (dst == NULL) 155 if (dst->error) {
156 dst_release(dst);
156 return false; 157 return false;
157 158 }
158 skb_dst_drop(skb); 159 skb_dst_drop(skb);
159 skb_dst_set(skb, dst); 160 skb_dst_set(skb, dst);
160 skb->dev = dst->dev; 161 skb->dev = dst->dev;