diff options
| author | David S. Miller <davem@davemloft.net> | 2012-02-23 00:20:14 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2012-02-23 00:20:14 -0500 |
| commit | 4a2258ddddefeef3291c0fc66437c73d84261a1e (patch) | |
| tree | 292008df656adc6c8412a79da6bedaff54458eab | |
| parent | ee932bf9acb2e2c6a309e808000f24856330e3f9 (diff) | |
| parent | af14cca162ddcdea017b648c21b9b091e4bf1fa4 (diff) | |
Merge branch 'nf' of git://1984.lsi.us.es/net
| -rw-r--r-- | include/linux/netfilter_bridge/ebtables.h | 4 | ||||
| -rw-r--r-- | net/netfilter/ipvs/ip_vs_core.c | 2 | ||||
| -rw-r--r-- | net/netfilter/nf_conntrack_netlink.c | 43 | ||||
| -rw-r--r-- | net/netfilter/nf_queue.c | 40 |
4 files changed, 51 insertions, 38 deletions
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 8797ed16feb2..4dd5bd6994a8 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h | |||
| @@ -285,8 +285,8 @@ struct ebt_table { | |||
| 285 | struct module *me; | 285 | struct module *me; |
| 286 | }; | 286 | }; |
| 287 | 287 | ||
| 288 | #define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \ | 288 | #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ |
| 289 | ~(__alignof__(struct ebt_replace)-1)) | 289 | ~(__alignof__(struct _xt_align)-1)) |
| 290 | extern struct ebt_table *ebt_register_table(struct net *net, | 290 | extern struct ebt_table *ebt_register_table(struct net *net, |
| 291 | const struct ebt_table *table); | 291 | const struct ebt_table *table); |
| 292 | extern void ebt_unregister_table(struct net *net, struct ebt_table *table); | 292 | extern void ebt_unregister_table(struct net *net, struct ebt_table *table); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 611c3359b94d..2555816e7788 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
| @@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
| 232 | __be16 dport = 0; /* destination port to forward */ | 232 | __be16 dport = 0; /* destination port to forward */ |
| 233 | unsigned int flags; | 233 | unsigned int flags; |
| 234 | struct ip_vs_conn_param param; | 234 | struct ip_vs_conn_param param; |
| 235 | const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; | ||
| 235 | union nf_inet_addr snet; /* source network of the client, | 236 | union nf_inet_addr snet; /* source network of the client, |
| 236 | after masking */ | 237 | after masking */ |
| 237 | 238 | ||
| @@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
| 267 | { | 268 | { |
| 268 | int protocol = iph.protocol; | 269 | int protocol = iph.protocol; |
| 269 | const union nf_inet_addr *vaddr = &iph.daddr; | 270 | const union nf_inet_addr *vaddr = &iph.daddr; |
| 270 | const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; | ||
| 271 | __be16 vport = 0; | 271 | __be16 vport = 0; |
| 272 | 272 | ||
| 273 | if (dst_port == svc->port) { | 273 | if (dst_port == svc->port) { |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 9307b033c0c9..cc705175765c 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
| @@ -1367,15 +1367,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, | |||
| 1367 | nf_ct_protonum(ct)); | 1367 | nf_ct_protonum(ct)); |
| 1368 | if (helper == NULL) { | 1368 | if (helper == NULL) { |
| 1369 | rcu_read_unlock(); | 1369 | rcu_read_unlock(); |
| 1370 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1371 | #ifdef CONFIG_MODULES | 1370 | #ifdef CONFIG_MODULES |
| 1372 | if (request_module("nfct-helper-%s", helpname) < 0) { | 1371 | if (request_module("nfct-helper-%s", helpname) < 0) { |
| 1373 | spin_lock_bh(&nf_conntrack_lock); | ||
| 1374 | err = -EOPNOTSUPP; | 1372 | err = -EOPNOTSUPP; |
| 1375 | goto err1; | 1373 | goto err1; |
| 1376 | } | 1374 | } |
| 1377 | 1375 | ||
| 1378 | spin_lock_bh(&nf_conntrack_lock); | ||
| 1379 | rcu_read_lock(); | 1376 | rcu_read_lock(); |
| 1380 | helper = __nf_conntrack_helper_find(helpname, | 1377 | helper = __nf_conntrack_helper_find(helpname, |
| 1381 | nf_ct_l3num(ct), | 1378 | nf_ct_l3num(ct), |
| @@ -1469,7 +1466,10 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, | |||
| 1469 | tstamp->start = ktime_to_ns(ktime_get_real()); | 1466 | tstamp->start = ktime_to_ns(ktime_get_real()); |
| 1470 | 1467 | ||
| 1471 | add_timer(&ct->timeout); | 1468 | add_timer(&ct->timeout); |
| 1469 | spin_lock_bh(&nf_conntrack_lock); | ||
| 1472 | nf_conntrack_hash_insert(ct); | 1470 | nf_conntrack_hash_insert(ct); |
| 1471 | nf_conntrack_get(&ct->ct_general); | ||
| 1472 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1473 | rcu_read_unlock(); | 1473 | rcu_read_unlock(); |
| 1474 | 1474 | ||
| 1475 | return ct; | 1475 | return ct; |
| @@ -1490,6 +1490,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
| 1490 | struct nf_conntrack_tuple otuple, rtuple; | 1490 | struct nf_conntrack_tuple otuple, rtuple; |
| 1491 | struct nf_conntrack_tuple_hash *h = NULL; | 1491 | struct nf_conntrack_tuple_hash *h = NULL; |
| 1492 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1492 | struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 1493 | struct nf_conn *ct; | ||
| 1493 | u_int8_t u3 = nfmsg->nfgen_family; | 1494 | u_int8_t u3 = nfmsg->nfgen_family; |
| 1494 | u16 zone; | 1495 | u16 zone; |
| 1495 | int err; | 1496 | int err; |
| @@ -1512,25 +1513,22 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
| 1512 | 1513 | ||
| 1513 | spin_lock_bh(&nf_conntrack_lock); | 1514 | spin_lock_bh(&nf_conntrack_lock); |
| 1514 | if (cda[CTA_TUPLE_ORIG]) | 1515 | if (cda[CTA_TUPLE_ORIG]) |
| 1515 | h = __nf_conntrack_find(net, zone, &otuple); | 1516 | h = nf_conntrack_find_get(net, zone, &otuple); |
| 1516 | else if (cda[CTA_TUPLE_REPLY]) | 1517 | else if (cda[CTA_TUPLE_REPLY]) |
| 1517 | h = __nf_conntrack_find(net, zone, &rtuple); | 1518 | h = nf_conntrack_find_get(net, zone, &rtuple); |
| 1519 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1518 | 1520 | ||
| 1519 | if (h == NULL) { | 1521 | if (h == NULL) { |
| 1520 | err = -ENOENT; | 1522 | err = -ENOENT; |
| 1521 | if (nlh->nlmsg_flags & NLM_F_CREATE) { | 1523 | if (nlh->nlmsg_flags & NLM_F_CREATE) { |
| 1522 | struct nf_conn *ct; | ||
| 1523 | enum ip_conntrack_events events; | 1524 | enum ip_conntrack_events events; |
| 1524 | 1525 | ||
| 1525 | ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, | 1526 | ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, |
| 1526 | &rtuple, u3); | 1527 | &rtuple, u3); |
| 1527 | if (IS_ERR(ct)) { | 1528 | if (IS_ERR(ct)) |
| 1528 | err = PTR_ERR(ct); | 1529 | return PTR_ERR(ct); |
| 1529 | goto out_unlock; | 1530 | |
| 1530 | } | ||
| 1531 | err = 0; | 1531 | err = 0; |
| 1532 | nf_conntrack_get(&ct->ct_general); | ||
| 1533 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1534 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) | 1532 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) |
| 1535 | events = IPCT_RELATED; | 1533 | events = IPCT_RELATED; |
| 1536 | else | 1534 | else |
| @@ -1545,23 +1543,19 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
| 1545 | ct, NETLINK_CB(skb).pid, | 1543 | ct, NETLINK_CB(skb).pid, |
| 1546 | nlmsg_report(nlh)); | 1544 | nlmsg_report(nlh)); |
| 1547 | nf_ct_put(ct); | 1545 | nf_ct_put(ct); |
| 1548 | } else | 1546 | } |
| 1549 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1550 | 1547 | ||
| 1551 | return err; | 1548 | return err; |
| 1552 | } | 1549 | } |
| 1553 | /* implicit 'else' */ | 1550 | /* implicit 'else' */ |
| 1554 | 1551 | ||
| 1555 | /* We manipulate the conntrack inside the global conntrack table lock, | ||
| 1556 | * so there's no need to increase the refcount */ | ||
| 1557 | err = -EEXIST; | 1552 | err = -EEXIST; |
| 1553 | ct = nf_ct_tuplehash_to_ctrack(h); | ||
| 1558 | if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { | 1554 | if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { |
| 1559 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); | 1555 | spin_lock_bh(&nf_conntrack_lock); |
| 1560 | |||
| 1561 | err = ctnetlink_change_conntrack(ct, cda); | 1556 | err = ctnetlink_change_conntrack(ct, cda); |
| 1557 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1562 | if (err == 0) { | 1558 | if (err == 0) { |
| 1563 | nf_conntrack_get(&ct->ct_general); | ||
| 1564 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1565 | nf_conntrack_eventmask_report((1 << IPCT_REPLY) | | 1559 | nf_conntrack_eventmask_report((1 << IPCT_REPLY) | |
| 1566 | (1 << IPCT_ASSURED) | | 1560 | (1 << IPCT_ASSURED) | |
| 1567 | (1 << IPCT_HELPER) | | 1561 | (1 << IPCT_HELPER) | |
| @@ -1570,15 +1564,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
| 1570 | (1 << IPCT_MARK), | 1564 | (1 << IPCT_MARK), |
| 1571 | ct, NETLINK_CB(skb).pid, | 1565 | ct, NETLINK_CB(skb).pid, |
| 1572 | nlmsg_report(nlh)); | 1566 | nlmsg_report(nlh)); |
| 1573 | nf_ct_put(ct); | 1567 | } |
| 1574 | } else | ||
| 1575 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1576 | |||
| 1577 | return err; | ||
| 1578 | } | 1568 | } |
| 1579 | 1569 | ||
| 1580 | out_unlock: | 1570 | nf_ct_put(ct); |
| 1581 | spin_unlock_bh(&nf_conntrack_lock); | ||
| 1582 | return err; | 1571 | return err; |
| 1583 | } | 1572 | } |
| 1584 | 1573 | ||
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index b3a7db678b8d..ce60cf0f6c11 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
| @@ -203,6 +203,27 @@ err: | |||
| 203 | return status; | 203 | return status; |
| 204 | } | 204 | } |
| 205 | 205 | ||
| 206 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
| 207 | /* When called from bridge netfilter, skb->data must point to MAC header | ||
| 208 | * before calling skb_gso_segment(). Else, original MAC header is lost | ||
| 209 | * and segmented skbs will be sent to wrong destination. | ||
| 210 | */ | ||
| 211 | static void nf_bridge_adjust_skb_data(struct sk_buff *skb) | ||
| 212 | { | ||
| 213 | if (skb->nf_bridge) | ||
| 214 | __skb_push(skb, skb->network_header - skb->mac_header); | ||
| 215 | } | ||
| 216 | |||
| 217 | static void nf_bridge_adjust_segmented_data(struct sk_buff *skb) | ||
| 218 | { | ||
| 219 | if (skb->nf_bridge) | ||
| 220 | __skb_pull(skb, skb->network_header - skb->mac_header); | ||
| 221 | } | ||
| 222 | #else | ||
| 223 | #define nf_bridge_adjust_skb_data(s) do {} while (0) | ||
| 224 | #define nf_bridge_adjust_segmented_data(s) do {} while (0) | ||
| 225 | #endif | ||
| 226 | |||
| 206 | int nf_queue(struct sk_buff *skb, | 227 | int nf_queue(struct sk_buff *skb, |
| 207 | struct list_head *elem, | 228 | struct list_head *elem, |
| 208 | u_int8_t pf, unsigned int hook, | 229 | u_int8_t pf, unsigned int hook, |
| @@ -212,7 +233,7 @@ int nf_queue(struct sk_buff *skb, | |||
| 212 | unsigned int queuenum) | 233 | unsigned int queuenum) |
| 213 | { | 234 | { |
| 214 | struct sk_buff *segs; | 235 | struct sk_buff *segs; |
| 215 | int err; | 236 | int err = -EINVAL; |
| 216 | unsigned int queued; | 237 | unsigned int queued; |
| 217 | 238 | ||
| 218 | if (!skb_is_gso(skb)) | 239 | if (!skb_is_gso(skb)) |
| @@ -228,23 +249,25 @@ int nf_queue(struct sk_buff *skb, | |||
| 228 | break; | 249 | break; |
| 229 | } | 250 | } |
| 230 | 251 | ||
| 252 | nf_bridge_adjust_skb_data(skb); | ||
| 231 | segs = skb_gso_segment(skb, 0); | 253 | segs = skb_gso_segment(skb, 0); |
| 232 | /* Does not use PTR_ERR to limit the number of error codes that can be | 254 | /* Does not use PTR_ERR to limit the number of error codes that can be |
| 233 | * returned by nf_queue. For instance, callers rely on -ECANCELED to mean | 255 | * returned by nf_queue. For instance, callers rely on -ECANCELED to mean |
| 234 | * 'ignore this hook'. | 256 | * 'ignore this hook'. |
| 235 | */ | 257 | */ |
| 236 | if (IS_ERR(segs)) | 258 | if (IS_ERR(segs)) |
| 237 | return -EINVAL; | 259 | goto out_err; |
| 238 | |||
| 239 | queued = 0; | 260 | queued = 0; |
| 240 | err = 0; | 261 | err = 0; |
| 241 | do { | 262 | do { |
| 242 | struct sk_buff *nskb = segs->next; | 263 | struct sk_buff *nskb = segs->next; |
| 243 | 264 | ||
| 244 | segs->next = NULL; | 265 | segs->next = NULL; |
| 245 | if (err == 0) | 266 | if (err == 0) { |
| 267 | nf_bridge_adjust_segmented_data(segs); | ||
| 246 | err = __nf_queue(segs, elem, pf, hook, indev, | 268 | err = __nf_queue(segs, elem, pf, hook, indev, |
| 247 | outdev, okfn, queuenum); | 269 | outdev, okfn, queuenum); |
| 270 | } | ||
| 248 | if (err == 0) | 271 | if (err == 0) |
| 249 | queued++; | 272 | queued++; |
| 250 | else | 273 | else |
| @@ -252,11 +275,12 @@ int nf_queue(struct sk_buff *skb, | |||
| 252 | segs = nskb; | 275 | segs = nskb; |
| 253 | } while (segs); | 276 | } while (segs); |
| 254 | 277 | ||
| 255 | /* also free orig skb if only some segments were queued */ | 278 | if (queued) { |
| 256 | if (unlikely(err && queued)) | ||
| 257 | err = 0; | ||
| 258 | if (err == 0) | ||
| 259 | kfree_skb(skb); | 279 | kfree_skb(skb); |
| 280 | return 0; | ||
| 281 | } | ||
| 282 | out_err: | ||
| 283 | nf_bridge_adjust_segmented_data(skb); | ||
| 260 | return err; | 284 | return err; |
| 261 | } | 285 | } |
| 262 | 286 | ||
