diff options
author | Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> | 2007-07-08 01:25:51 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-07-11 01:17:35 -0400 |
commit | dacd2a1a5cf621288833aa3c6e815b86a1536538 (patch) | |
tree | c615b95aa1518c6518df35acd481ed4af19acd61 /net | |
parent | ff09b7493c8f433d3ffd6a31ad58d190f82ef0c5 (diff) |
[NETFILTER]: nf_conntrack: remove old memory allocator of conntrack
Now memory space for help and NAT are allocated by extension
infrastructure.
Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 6 | ||||
-rw-r--r-- | net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 6 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 222 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_l3proto_generic.c | 7 |
4 files changed, 15 insertions, 226 deletions
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 7411dd16d779..129a8cccf4a1 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -103,11 +103,6 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, | |||
103 | return NF_ACCEPT; | 103 | return NF_ACCEPT; |
104 | } | 104 | } |
105 | 105 | ||
106 | static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple) | ||
107 | { | ||
108 | return NF_CT_F_BASIC; | ||
109 | } | ||
110 | |||
111 | static unsigned int ipv4_confirm(unsigned int hooknum, | 106 | static unsigned int ipv4_confirm(unsigned int hooknum, |
112 | struct sk_buff **pskb, | 107 | struct sk_buff **pskb, |
113 | const struct net_device *in, | 108 | const struct net_device *in, |
@@ -419,7 +414,6 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { | |||
419 | .print_tuple = ipv4_print_tuple, | 414 | .print_tuple = ipv4_print_tuple, |
420 | .print_conntrack = ipv4_print_conntrack, | 415 | .print_conntrack = ipv4_print_conntrack, |
421 | .prepare = ipv4_prepare, | 416 | .prepare = ipv4_prepare, |
422 | .get_features = ipv4_get_features, | ||
423 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 417 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
424 | .tuple_to_nfattr = ipv4_tuple_to_nfattr, | 418 | .tuple_to_nfattr = ipv4_tuple_to_nfattr, |
425 | .nfattr_to_tuple = ipv4_nfattr_to_tuple, | 419 | .nfattr_to_tuple = ipv4_nfattr_to_tuple, |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 1b1797f1f33d..747b01e53132 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -147,11 +147,6 @@ ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, | |||
147 | return NF_ACCEPT; | 147 | return NF_ACCEPT; |
148 | } | 148 | } |
149 | 149 | ||
150 | static u_int32_t ipv6_get_features(const struct nf_conntrack_tuple *tuple) | ||
151 | { | ||
152 | return NF_CT_F_BASIC; | ||
153 | } | ||
154 | |||
155 | static unsigned int ipv6_confirm(unsigned int hooknum, | 150 | static unsigned int ipv6_confirm(unsigned int hooknum, |
156 | struct sk_buff **pskb, | 151 | struct sk_buff **pskb, |
157 | const struct net_device *in, | 152 | const struct net_device *in, |
@@ -397,7 +392,6 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { | |||
397 | .ctl_table_path = nf_net_netfilter_sysctl_path, | 392 | .ctl_table_path = nf_net_netfilter_sysctl_path, |
398 | .ctl_table = nf_ct_ipv6_sysctl_table, | 393 | .ctl_table = nf_ct_ipv6_sysctl_table, |
399 | #endif | 394 | #endif |
400 | .get_features = ipv6_get_features, | ||
401 | .me = THIS_MODULE, | 395 | .me = THIS_MODULE, |
402 | }; | 396 | }; |
403 | 397 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 914506e6c787..a71366652938 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -71,39 +71,12 @@ EXPORT_SYMBOL_GPL(nf_conntrack_untracked); | |||
71 | unsigned int nf_ct_log_invalid __read_mostly; | 71 | unsigned int nf_ct_log_invalid __read_mostly; |
72 | LIST_HEAD(unconfirmed); | 72 | LIST_HEAD(unconfirmed); |
73 | static int nf_conntrack_vmalloc __read_mostly; | 73 | static int nf_conntrack_vmalloc __read_mostly; |
74 | 74 | static struct kmem_cache *nf_conntrack_cachep __read_mostly; | |
75 | static unsigned int nf_conntrack_next_id; | 75 | static unsigned int nf_conntrack_next_id; |
76 | 76 | ||
77 | DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); | 77 | DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); |
78 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); | 78 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); |
79 | 79 | ||
80 | /* | ||
81 | * This scheme offers various size of "struct nf_conn" dependent on | ||
82 | * features(helper, nat, ...) | ||
83 | */ | ||
84 | |||
85 | #define NF_CT_FEATURES_NAMELEN 256 | ||
86 | static struct { | ||
87 | /* name of slab cache. printed in /proc/slabinfo */ | ||
88 | char *name; | ||
89 | |||
90 | /* size of slab cache */ | ||
91 | size_t size; | ||
92 | |||
93 | /* slab cache pointer */ | ||
94 | struct kmem_cache *cachep; | ||
95 | |||
96 | /* allocated slab cache + modules which uses this slab cache */ | ||
97 | int use; | ||
98 | |||
99 | } nf_ct_cache[NF_CT_F_NUM]; | ||
100 | |||
101 | /* protect members of nf_ct_cache except of "use" */ | ||
102 | DEFINE_RWLOCK(nf_ct_cache_lock); | ||
103 | |||
104 | /* This avoids calling kmem_cache_create() with same name simultaneously */ | ||
105 | static DEFINE_MUTEX(nf_ct_cache_mutex); | ||
106 | |||
107 | static int nf_conntrack_hash_rnd_initted; | 80 | static int nf_conntrack_hash_rnd_initted; |
108 | static unsigned int nf_conntrack_hash_rnd; | 81 | static unsigned int nf_conntrack_hash_rnd; |
109 | 82 | ||
@@ -126,122 +99,6 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) | |||
126 | nf_conntrack_hash_rnd); | 99 | nf_conntrack_hash_rnd); |
127 | } | 100 | } |
128 | 101 | ||
129 | int nf_conntrack_register_cache(u_int32_t features, const char *name, | ||
130 | size_t size) | ||
131 | { | ||
132 | int ret = 0; | ||
133 | char *cache_name; | ||
134 | struct kmem_cache *cachep; | ||
135 | |||
136 | DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", | ||
137 | features, name, size); | ||
138 | |||
139 | if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) { | ||
140 | DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n", | ||
141 | features); | ||
142 | return -EINVAL; | ||
143 | } | ||
144 | |||
145 | mutex_lock(&nf_ct_cache_mutex); | ||
146 | |||
147 | write_lock_bh(&nf_ct_cache_lock); | ||
148 | /* e.g: multiple helpers are loaded */ | ||
149 | if (nf_ct_cache[features].use > 0) { | ||
150 | DEBUGP("nf_conntrack_register_cache: already resisterd.\n"); | ||
151 | if ((!strncmp(nf_ct_cache[features].name, name, | ||
152 | NF_CT_FEATURES_NAMELEN)) | ||
153 | && nf_ct_cache[features].size == size) { | ||
154 | DEBUGP("nf_conntrack_register_cache: reusing.\n"); | ||
155 | nf_ct_cache[features].use++; | ||
156 | ret = 0; | ||
157 | } else | ||
158 | ret = -EBUSY; | ||
159 | |||
160 | write_unlock_bh(&nf_ct_cache_lock); | ||
161 | mutex_unlock(&nf_ct_cache_mutex); | ||
162 | return ret; | ||
163 | } | ||
164 | write_unlock_bh(&nf_ct_cache_lock); | ||
165 | |||
166 | /* | ||
167 | * The memory space for name of slab cache must be alive until | ||
168 | * cache is destroyed. | ||
169 | */ | ||
170 | cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC); | ||
171 | if (cache_name == NULL) { | ||
172 | DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n"); | ||
173 | ret = -ENOMEM; | ||
174 | goto out_up_mutex; | ||
175 | } | ||
176 | |||
177 | if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN) | ||
178 | >= NF_CT_FEATURES_NAMELEN) { | ||
179 | printk("nf_conntrack_register_cache: name too long\n"); | ||
180 | ret = -EINVAL; | ||
181 | goto out_free_name; | ||
182 | } | ||
183 | |||
184 | cachep = kmem_cache_create(cache_name, size, 0, 0, | ||
185 | NULL, NULL); | ||
186 | if (!cachep) { | ||
187 | printk("nf_conntrack_register_cache: Can't create slab cache " | ||
188 | "for the features = 0x%x\n", features); | ||
189 | ret = -ENOMEM; | ||
190 | goto out_free_name; | ||
191 | } | ||
192 | |||
193 | write_lock_bh(&nf_ct_cache_lock); | ||
194 | nf_ct_cache[features].use = 1; | ||
195 | nf_ct_cache[features].size = size; | ||
196 | nf_ct_cache[features].cachep = cachep; | ||
197 | nf_ct_cache[features].name = cache_name; | ||
198 | write_unlock_bh(&nf_ct_cache_lock); | ||
199 | |||
200 | goto out_up_mutex; | ||
201 | |||
202 | out_free_name: | ||
203 | kfree(cache_name); | ||
204 | out_up_mutex: | ||
205 | mutex_unlock(&nf_ct_cache_mutex); | ||
206 | return ret; | ||
207 | } | ||
208 | EXPORT_SYMBOL_GPL(nf_conntrack_register_cache); | ||
209 | |||
210 | /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ | ||
211 | void nf_conntrack_unregister_cache(u_int32_t features) | ||
212 | { | ||
213 | struct kmem_cache *cachep; | ||
214 | char *name; | ||
215 | |||
216 | /* | ||
217 | * This assures that kmem_cache_create() isn't called before destroying | ||
218 | * slab cache. | ||
219 | */ | ||
220 | DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features); | ||
221 | mutex_lock(&nf_ct_cache_mutex); | ||
222 | |||
223 | write_lock_bh(&nf_ct_cache_lock); | ||
224 | if (--nf_ct_cache[features].use > 0) { | ||
225 | write_unlock_bh(&nf_ct_cache_lock); | ||
226 | mutex_unlock(&nf_ct_cache_mutex); | ||
227 | return; | ||
228 | } | ||
229 | cachep = nf_ct_cache[features].cachep; | ||
230 | name = nf_ct_cache[features].name; | ||
231 | nf_ct_cache[features].cachep = NULL; | ||
232 | nf_ct_cache[features].name = NULL; | ||
233 | nf_ct_cache[features].size = 0; | ||
234 | write_unlock_bh(&nf_ct_cache_lock); | ||
235 | |||
236 | synchronize_net(); | ||
237 | |||
238 | kmem_cache_destroy(cachep); | ||
239 | kfree(name); | ||
240 | |||
241 | mutex_unlock(&nf_ct_cache_mutex); | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(nf_conntrack_unregister_cache); | ||
244 | |||
245 | int | 102 | int |
246 | nf_ct_get_tuple(const struct sk_buff *skb, | 103 | nf_ct_get_tuple(const struct sk_buff *skb, |
247 | unsigned int nhoff, | 104 | unsigned int nhoff, |
@@ -559,11 +416,8 @@ static int early_drop(struct list_head *chain) | |||
559 | return dropped; | 416 | return dropped; |
560 | } | 417 | } |
561 | 418 | ||
562 | static struct nf_conn * | 419 | struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, |
563 | __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | 420 | const struct nf_conntrack_tuple *repl) |
564 | const struct nf_conntrack_tuple *repl, | ||
565 | const struct nf_conntrack_l3proto *l3proto, | ||
566 | u_int32_t features) | ||
567 | { | 421 | { |
568 | struct nf_conn *conntrack = NULL; | 422 | struct nf_conn *conntrack = NULL; |
569 | 423 | ||
@@ -589,65 +443,28 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
589 | } | 443 | } |
590 | } | 444 | } |
591 | 445 | ||
592 | /* find features needed by this conntrack. */ | 446 | conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC); |
593 | features |= l3proto->get_features(orig); | ||
594 | |||
595 | DEBUGP("nf_conntrack_alloc: features=0x%x\n", features); | ||
596 | |||
597 | read_lock_bh(&nf_ct_cache_lock); | ||
598 | |||
599 | if (unlikely(!nf_ct_cache[features].use)) { | ||
600 | DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n", | ||
601 | features); | ||
602 | goto out; | ||
603 | } | ||
604 | |||
605 | conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC); | ||
606 | if (conntrack == NULL) { | 447 | if (conntrack == NULL) { |
607 | DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n"); | 448 | DEBUGP("nf_conntrack_alloc: Can't alloc conntrack.\n"); |
608 | goto out; | 449 | atomic_dec(&nf_conntrack_count); |
450 | return ERR_PTR(-ENOMEM); | ||
609 | } | 451 | } |
610 | 452 | ||
611 | memset(conntrack, 0, nf_ct_cache[features].size); | ||
612 | conntrack->features = features; | ||
613 | atomic_set(&conntrack->ct_general.use, 1); | 453 | atomic_set(&conntrack->ct_general.use, 1); |
614 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; | 454 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; |
615 | conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; | 455 | conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; |
616 | /* Don't set timer yet: wait for confirmation */ | 456 | /* Don't set timer yet: wait for confirmation */ |
617 | setup_timer(&conntrack->timeout, death_by_timeout, | 457 | setup_timer(&conntrack->timeout, death_by_timeout, |
618 | (unsigned long)conntrack); | 458 | (unsigned long)conntrack); |
619 | read_unlock_bh(&nf_ct_cache_lock); | ||
620 | 459 | ||
621 | return conntrack; | 460 | return conntrack; |
622 | out: | ||
623 | read_unlock_bh(&nf_ct_cache_lock); | ||
624 | atomic_dec(&nf_conntrack_count); | ||
625 | return conntrack; | ||
626 | } | ||
627 | |||
628 | struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | ||
629 | const struct nf_conntrack_tuple *repl) | ||
630 | { | ||
631 | struct nf_conntrack_l3proto *l3proto; | ||
632 | struct nf_conn *ct; | ||
633 | |||
634 | rcu_read_lock(); | ||
635 | l3proto = __nf_ct_l3proto_find(orig->src.l3num); | ||
636 | ct = __nf_conntrack_alloc(orig, repl, l3proto, 0); | ||
637 | rcu_read_unlock(); | ||
638 | |||
639 | return ct; | ||
640 | } | 461 | } |
641 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | 462 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); |
642 | 463 | ||
643 | void nf_conntrack_free(struct nf_conn *conntrack) | 464 | void nf_conntrack_free(struct nf_conn *conntrack) |
644 | { | 465 | { |
645 | u_int32_t features = conntrack->features; | ||
646 | NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM); | ||
647 | nf_ct_ext_free(conntrack); | 466 | nf_ct_ext_free(conntrack); |
648 | DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features, | 467 | kmem_cache_free(nf_conntrack_cachep, conntrack); |
649 | conntrack); | ||
650 | kmem_cache_free(nf_ct_cache[features].cachep, conntrack); | ||
651 | atomic_dec(&nf_conntrack_count); | 468 | atomic_dec(&nf_conntrack_count); |
652 | } | 469 | } |
653 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | 470 | EXPORT_SYMBOL_GPL(nf_conntrack_free); |
@@ -665,14 +482,13 @@ init_conntrack(const struct nf_conntrack_tuple *tuple, | |||
665 | struct nf_conn_help *help; | 482 | struct nf_conn_help *help; |
666 | struct nf_conntrack_tuple repl_tuple; | 483 | struct nf_conntrack_tuple repl_tuple; |
667 | struct nf_conntrack_expect *exp; | 484 | struct nf_conntrack_expect *exp; |
668 | u_int32_t features = 0; | ||
669 | 485 | ||
670 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { | 486 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { |
671 | DEBUGP("Can't invert tuple.\n"); | 487 | DEBUGP("Can't invert tuple.\n"); |
672 | return NULL; | 488 | return NULL; |
673 | } | 489 | } |
674 | 490 | ||
675 | conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, features); | 491 | conntrack = nf_conntrack_alloc(tuple, &repl_tuple); |
676 | if (conntrack == NULL || IS_ERR(conntrack)) { | 492 | if (conntrack == NULL || IS_ERR(conntrack)) { |
677 | DEBUGP("Can't allocate conntrack.\n"); | 493 | DEBUGP("Can't allocate conntrack.\n"); |
678 | return (struct nf_conntrack_tuple_hash *)conntrack; | 494 | return (struct nf_conntrack_tuple_hash *)conntrack; |
@@ -1128,8 +944,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_flush); | |||
1128 | supposed to kill the mall. */ | 944 | supposed to kill the mall. */ |
1129 | void nf_conntrack_cleanup(void) | 945 | void nf_conntrack_cleanup(void) |
1130 | { | 946 | { |
1131 | int i; | ||
1132 | |||
1133 | rcu_assign_pointer(ip_ct_attach, NULL); | 947 | rcu_assign_pointer(ip_ct_attach, NULL); |
1134 | 948 | ||
1135 | /* This makes sure all current packets have passed through | 949 | /* This makes sure all current packets have passed through |
@@ -1150,14 +964,7 @@ void nf_conntrack_cleanup(void) | |||
1150 | 964 | ||
1151 | rcu_assign_pointer(nf_ct_destroy, NULL); | 965 | rcu_assign_pointer(nf_ct_destroy, NULL); |
1152 | 966 | ||
1153 | for (i = 0; i < NF_CT_F_NUM; i++) { | 967 | kmem_cache_destroy(nf_conntrack_cachep); |
1154 | if (nf_ct_cache[i].use == 0) | ||
1155 | continue; | ||
1156 | |||
1157 | NF_CT_ASSERT(nf_ct_cache[i].use == 1); | ||
1158 | nf_ct_cache[i].use = 1; | ||
1159 | nf_conntrack_unregister_cache(i); | ||
1160 | } | ||
1161 | kmem_cache_destroy(nf_conntrack_expect_cachep); | 968 | kmem_cache_destroy(nf_conntrack_expect_cachep); |
1162 | free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, | 969 | free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, |
1163 | nf_conntrack_htable_size); | 970 | nf_conntrack_htable_size); |
@@ -1267,9 +1074,10 @@ int __init nf_conntrack_init(void) | |||
1267 | goto err_out; | 1074 | goto err_out; |
1268 | } | 1075 | } |
1269 | 1076 | ||
1270 | ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic", | 1077 | nf_conntrack_cachep = kmem_cache_create("nf_conntrack", |
1271 | sizeof(struct nf_conn)); | 1078 | sizeof(struct nf_conn), |
1272 | if (ret < 0) { | 1079 | 0, 0, NULL, NULL); |
1080 | if (!nf_conntrack_cachep) { | ||
1273 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); | 1081 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); |
1274 | goto err_free_hash; | 1082 | goto err_free_hash; |
1275 | } | 1083 | } |
@@ -1307,7 +1115,7 @@ out_fini_proto: | |||
1307 | out_free_expect_slab: | 1115 | out_free_expect_slab: |
1308 | kmem_cache_destroy(nf_conntrack_expect_cachep); | 1116 | kmem_cache_destroy(nf_conntrack_expect_cachep); |
1309 | err_free_conntrack_slab: | 1117 | err_free_conntrack_slab: |
1310 | nf_conntrack_unregister_cache(NF_CT_F_BASIC); | 1118 | kmem_cache_destroy(nf_conntrack_cachep); |
1311 | err_free_hash: | 1119 | err_free_hash: |
1312 | free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, | 1120 | free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, |
1313 | nf_conntrack_htable_size); | 1121 | nf_conntrack_htable_size); |
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c index cbd96f3c1b89..2fd0f11b8fb2 100644 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ b/net/netfilter/nf_conntrack_l3proto_generic.c | |||
@@ -76,12 +76,6 @@ generic_prepare(struct sk_buff **pskb, unsigned int hooknum, | |||
76 | } | 76 | } |
77 | 77 | ||
78 | 78 | ||
79 | static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple) | ||
80 | |||
81 | { | ||
82 | return NF_CT_F_BASIC; | ||
83 | } | ||
84 | |||
85 | struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = { | 79 | struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = { |
86 | .l3proto = PF_UNSPEC, | 80 | .l3proto = PF_UNSPEC, |
87 | .name = "unknown", | 81 | .name = "unknown", |
@@ -90,6 +84,5 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = { | |||
90 | .print_tuple = generic_print_tuple, | 84 | .print_tuple = generic_print_tuple, |
91 | .print_conntrack = generic_print_conntrack, | 85 | .print_conntrack = generic_print_conntrack, |
92 | .prepare = generic_prepare, | 86 | .prepare = generic_prepare, |
93 | .get_features = generic_get_features, | ||
94 | }; | 87 | }; |
95 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); | 88 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); |