aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2005-12-14 02:13:48 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-01-03 16:10:29 -0500
commit318360646941d6f3d4c6e4ee99107392728a4079 (patch)
tree26ab4ddc68f917dd4e8813ace504956620eba3a8 /net/ipv6
parentdf3271f3361b61ce02da0026b4a53e63bc2720cb (diff)
[NETFILTER] ip_tables: NUMA-aware allocation
Part of a performance problem with ip_tables is that memory allocation is not NUMA aware, but 'only' SMP aware (ie each CPU normally touch separate cache lines) Even with small iptables rules, the cost of this misplacement can be high on common workloads. Instead of using one vmalloc() area (located in the node of the iptables process), we now allocate an area for each possible CPU, using vmalloc_node() so that memory should be allocated in the CPU's node if possible. Port to arp_tables and ip6_tables by Harald Welte. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/netfilter/ip6_tables.c190
1 files changed, 126 insertions, 64 deletions
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 95d469271c4d..dd80020d8740 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -86,11 +86,6 @@ static DECLARE_MUTEX(ip6t_mutex);
86 context stops packets coming through and allows user context to read 86 context stops packets coming through and allows user context to read
87 the counters or update the rules. 87 the counters or update the rules.
88 88
89 To be cache friendly on SMP, we arrange them like so:
90 [ n-entries ]
91 ... cache-align padding ...
92 [ n-entries ]
93
94 Hence the start of any table is given by get_table() below. */ 89 Hence the start of any table is given by get_table() below. */
95 90
96/* The table itself */ 91/* The table itself */
@@ -108,20 +103,15 @@ struct ip6t_table_info
108 unsigned int underflow[NF_IP6_NUMHOOKS]; 103 unsigned int underflow[NF_IP6_NUMHOOKS];
109 104
110 /* ip6t_entry tables: one per CPU */ 105 /* ip6t_entry tables: one per CPU */
111 char entries[0] ____cacheline_aligned; 106 void *entries[NR_CPUS];
112}; 107};
113 108
114static LIST_HEAD(ip6t_target); 109static LIST_HEAD(ip6t_target);
115static LIST_HEAD(ip6t_match); 110static LIST_HEAD(ip6t_match);
116static LIST_HEAD(ip6t_tables); 111static LIST_HEAD(ip6t_tables);
112#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
117#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0) 113#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
118 114
119#ifdef CONFIG_SMP
120#define TABLE_OFFSET(t,p) (SMP_ALIGN((t)->size)*(p))
121#else
122#define TABLE_OFFSET(t,p) 0
123#endif
124
125#if 0 115#if 0
126#define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0) 116#define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
127#define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; }) 117#define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
@@ -376,8 +366,7 @@ ip6t_do_table(struct sk_buff **pskb,
376 366
377 read_lock_bh(&table->lock); 367 read_lock_bh(&table->lock);
378 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 368 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
379 table_base = (void *)table->private->entries 369 table_base = (void *)table->private->entries[smp_processor_id()];
380 + TABLE_OFFSET(table->private, smp_processor_id());
381 e = get_entry(table_base, table->private->hook_entry[hook]); 370 e = get_entry(table_base, table->private->hook_entry[hook]);
382 371
383#ifdef CONFIG_NETFILTER_DEBUG 372#ifdef CONFIG_NETFILTER_DEBUG
@@ -649,7 +638,8 @@ unconditional(const struct ip6t_ip6 *ipv6)
649/* Figures out from what hook each rule can be called: returns 0 if 638/* Figures out from what hook each rule can be called: returns 0 if
650 there are loops. Puts hook bitmask in comefrom. */ 639 there are loops. Puts hook bitmask in comefrom. */
651static int 640static int
652mark_source_chains(struct ip6t_table_info *newinfo, unsigned int valid_hooks) 641mark_source_chains(struct ip6t_table_info *newinfo,
642 unsigned int valid_hooks, void *entry0)
653{ 643{
654 unsigned int hook; 644 unsigned int hook;
655 645
@@ -658,7 +648,7 @@ mark_source_chains(struct ip6t_table_info *newinfo, unsigned int valid_hooks)
658 for (hook = 0; hook < NF_IP6_NUMHOOKS; hook++) { 648 for (hook = 0; hook < NF_IP6_NUMHOOKS; hook++) {
659 unsigned int pos = newinfo->hook_entry[hook]; 649 unsigned int pos = newinfo->hook_entry[hook];
660 struct ip6t_entry *e 650 struct ip6t_entry *e
661 = (struct ip6t_entry *)(newinfo->entries + pos); 651 = (struct ip6t_entry *)(entry0 + pos);
662 652
663 if (!(valid_hooks & (1 << hook))) 653 if (!(valid_hooks & (1 << hook)))
664 continue; 654 continue;
@@ -708,13 +698,13 @@ mark_source_chains(struct ip6t_table_info *newinfo, unsigned int valid_hooks)
708 goto next; 698 goto next;
709 699
710 e = (struct ip6t_entry *) 700 e = (struct ip6t_entry *)
711 (newinfo->entries + pos); 701 (entry0 + pos);
712 } while (oldpos == pos + e->next_offset); 702 } while (oldpos == pos + e->next_offset);
713 703
714 /* Move along one */ 704 /* Move along one */
715 size = e->next_offset; 705 size = e->next_offset;
716 e = (struct ip6t_entry *) 706 e = (struct ip6t_entry *)
717 (newinfo->entries + pos + size); 707 (entry0 + pos + size);
718 e->counters.pcnt = pos; 708 e->counters.pcnt = pos;
719 pos += size; 709 pos += size;
720 } else { 710 } else {
@@ -731,7 +721,7 @@ mark_source_chains(struct ip6t_table_info *newinfo, unsigned int valid_hooks)
731 newpos = pos + e->next_offset; 721 newpos = pos + e->next_offset;
732 } 722 }
733 e = (struct ip6t_entry *) 723 e = (struct ip6t_entry *)
734 (newinfo->entries + newpos); 724 (entry0 + newpos);
735 e->counters.pcnt = pos; 725 e->counters.pcnt = pos;
736 pos = newpos; 726 pos = newpos;
737 } 727 }
@@ -941,6 +931,7 @@ static int
941translate_table(const char *name, 931translate_table(const char *name,
942 unsigned int valid_hooks, 932 unsigned int valid_hooks,
943 struct ip6t_table_info *newinfo, 933 struct ip6t_table_info *newinfo,
934 void *entry0,
944 unsigned int size, 935 unsigned int size,
945 unsigned int number, 936 unsigned int number,
946 const unsigned int *hook_entries, 937 const unsigned int *hook_entries,
@@ -961,11 +952,11 @@ translate_table(const char *name,
961 duprintf("translate_table: size %u\n", newinfo->size); 952 duprintf("translate_table: size %u\n", newinfo->size);
962 i = 0; 953 i = 0;
963 /* Walk through entries, checking offsets. */ 954 /* Walk through entries, checking offsets. */
964 ret = IP6T_ENTRY_ITERATE(newinfo->entries, newinfo->size, 955 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
965 check_entry_size_and_hooks, 956 check_entry_size_and_hooks,
966 newinfo, 957 newinfo,
967 newinfo->entries, 958 entry0,
968 newinfo->entries + size, 959 entry0 + size,
969 hook_entries, underflows, &i); 960 hook_entries, underflows, &i);
970 if (ret != 0) 961 if (ret != 0)
971 return ret; 962 return ret;
@@ -993,27 +984,24 @@ translate_table(const char *name,
993 } 984 }
994 } 985 }
995 986
996 if (!mark_source_chains(newinfo, valid_hooks)) 987 if (!mark_source_chains(newinfo, valid_hooks, entry0))
997 return -ELOOP; 988 return -ELOOP;
998 989
999 /* Finally, each sanity check must pass */ 990 /* Finally, each sanity check must pass */
1000 i = 0; 991 i = 0;
1001 ret = IP6T_ENTRY_ITERATE(newinfo->entries, newinfo->size, 992 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
1002 check_entry, name, size, &i); 993 check_entry, name, size, &i);
1003 994
1004 if (ret != 0) { 995 if (ret != 0) {
1005 IP6T_ENTRY_ITERATE(newinfo->entries, newinfo->size, 996 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
1006 cleanup_entry, &i); 997 cleanup_entry, &i);
1007 return ret; 998 return ret;
1008 } 999 }
1009 1000
1010 /* And one copy for every other CPU */ 1001 /* And one copy for every other CPU */
1011 for_each_cpu(i) { 1002 for_each_cpu(i) {
1012 if (i == 0) 1003 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
1013 continue; 1004 memcpy(newinfo->entries[i], entry0, newinfo->size);
1014 memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
1015 newinfo->entries,
1016 SMP_ALIGN(newinfo->size));
1017 } 1005 }
1018 1006
1019 return ret; 1007 return ret;
@@ -1029,15 +1017,12 @@ replace_table(struct ip6t_table *table,
1029 1017
1030#ifdef CONFIG_NETFILTER_DEBUG 1018#ifdef CONFIG_NETFILTER_DEBUG
1031 { 1019 {
1032 struct ip6t_entry *table_base; 1020 int cpu;
1033 unsigned int i;
1034 1021
1035 for_each_cpu(i) { 1022 for_each_cpu(cpu) {
1036 table_base = 1023 struct ip6t_entry *table_base = newinfo->entries[cpu];
1037 (void *)newinfo->entries 1024 if (table_base)
1038 + TABLE_OFFSET(newinfo, i); 1025 table_base->comefrom = 0xdead57ac;
1039
1040 table_base->comefrom = 0xdead57ac;
1041 } 1026 }
1042 } 1027 }
1043#endif 1028#endif
@@ -1072,16 +1057,44 @@ add_entry_to_counter(const struct ip6t_entry *e,
1072 return 0; 1057 return 0;
1073} 1058}
1074 1059
1060static inline int
1061set_entry_to_counter(const struct ip6t_entry *e,
1062 struct ip6t_counters total[],
1063 unsigned int *i)
1064{
1065 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
1066
1067 (*i)++;
1068 return 0;
1069}
1070
1075static void 1071static void
1076get_counters(const struct ip6t_table_info *t, 1072get_counters(const struct ip6t_table_info *t,
1077 struct ip6t_counters counters[]) 1073 struct ip6t_counters counters[])
1078{ 1074{
1079 unsigned int cpu; 1075 unsigned int cpu;
1080 unsigned int i; 1076 unsigned int i;
1077 unsigned int curcpu;
1078
1079 /* Instead of clearing (by a previous call to memset())
1080 * the counters and using adds, we set the counters
1081 * with data used by 'current' CPU
1082 * We dont care about preemption here.
1083 */
1084 curcpu = raw_smp_processor_id();
1085
1086 i = 0;
1087 IP6T_ENTRY_ITERATE(t->entries[curcpu],
1088 t->size,
1089 set_entry_to_counter,
1090 counters,
1091 &i);
1081 1092
1082 for_each_cpu(cpu) { 1093 for_each_cpu(cpu) {
1094 if (cpu == curcpu)
1095 continue;
1083 i = 0; 1096 i = 0;
1084 IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), 1097 IP6T_ENTRY_ITERATE(t->entries[cpu],
1085 t->size, 1098 t->size,
1086 add_entry_to_counter, 1099 add_entry_to_counter,
1087 counters, 1100 counters,
@@ -1098,6 +1111,7 @@ copy_entries_to_user(unsigned int total_size,
1098 struct ip6t_entry *e; 1111 struct ip6t_entry *e;
1099 struct ip6t_counters *counters; 1112 struct ip6t_counters *counters;
1100 int ret = 0; 1113 int ret = 0;
1114 void *loc_cpu_entry;
1101 1115
1102 /* We need atomic snapshot of counters: rest doesn't change 1116 /* We need atomic snapshot of counters: rest doesn't change
1103 (other than comefrom, which userspace doesn't care 1117 (other than comefrom, which userspace doesn't care
@@ -1109,13 +1123,13 @@ copy_entries_to_user(unsigned int total_size,
1109 return -ENOMEM; 1123 return -ENOMEM;
1110 1124
1111 /* First, sum counters... */ 1125 /* First, sum counters... */
1112 memset(counters, 0, countersize);
1113 write_lock_bh(&table->lock); 1126 write_lock_bh(&table->lock);
1114 get_counters(table->private, counters); 1127 get_counters(table->private, counters);
1115 write_unlock_bh(&table->lock); 1128 write_unlock_bh(&table->lock);
1116 1129
1117 /* ... then copy entire thing from CPU 0... */ 1130 /* choose the copy that is on ourc node/cpu */
1118 if (copy_to_user(userptr, table->private->entries, total_size) != 0) { 1131 loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
1132 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1119 ret = -EFAULT; 1133 ret = -EFAULT;
1120 goto free_counters; 1134 goto free_counters;
1121 } 1135 }
@@ -1127,7 +1141,7 @@ copy_entries_to_user(unsigned int total_size,
1127 struct ip6t_entry_match *m; 1141 struct ip6t_entry_match *m;
1128 struct ip6t_entry_target *t; 1142 struct ip6t_entry_target *t;
1129 1143
1130 e = (struct ip6t_entry *)(table->private->entries + off); 1144 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1131 if (copy_to_user(userptr + off 1145 if (copy_to_user(userptr + off
1132 + offsetof(struct ip6t_entry, counters), 1146 + offsetof(struct ip6t_entry, counters),
1133 &counters[num], 1147 &counters[num],
@@ -1196,6 +1210,46 @@ get_entries(const struct ip6t_get_entries *entries,
1196 return ret; 1210 return ret;
1197} 1211}
1198 1212
1213static void free_table_info(struct ip6t_table_info *info)
1214{
1215 int cpu;
1216 for_each_cpu(cpu) {
1217 if (info->size <= PAGE_SIZE)
1218 kfree(info->entries[cpu]);
1219 else
1220 vfree(info->entries[cpu]);
1221 }
1222 kfree(info);
1223}
1224
1225static struct ip6t_table_info *alloc_table_info(unsigned int size)
1226{
1227 struct ip6t_table_info *newinfo;
1228 int cpu;
1229
1230 newinfo = kzalloc(sizeof(struct ip6t_table_info), GFP_KERNEL);
1231 if (!newinfo)
1232 return NULL;
1233
1234 newinfo->size = size;
1235
1236 for_each_cpu(cpu) {
1237 if (size <= PAGE_SIZE)
1238 newinfo->entries[cpu] = kmalloc_node(size,
1239 GFP_KERNEL,
1240 cpu_to_node(cpu));
1241 else
1242 newinfo->entries[cpu] = vmalloc_node(size,
1243 cpu_to_node(cpu));
1244 if (newinfo->entries[cpu] == NULL) {
1245 free_table_info(newinfo);
1246 return NULL;
1247 }
1248 }
1249
1250 return newinfo;
1251}
1252
1199static int 1253static int
1200do_replace(void __user *user, unsigned int len) 1254do_replace(void __user *user, unsigned int len)
1201{ 1255{
@@ -1204,6 +1258,7 @@ do_replace(void __user *user, unsigned int len)
1204 struct ip6t_table *t; 1258 struct ip6t_table *t;
1205 struct ip6t_table_info *newinfo, *oldinfo; 1259 struct ip6t_table_info *newinfo, *oldinfo;
1206 struct ip6t_counters *counters; 1260 struct ip6t_counters *counters;
1261 void *loc_cpu_entry, *loc_cpu_old_entry;
1207 1262
1208 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1263 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1209 return -EFAULT; 1264 return -EFAULT;
@@ -1212,13 +1267,13 @@ do_replace(void __user *user, unsigned int len)
1212 if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages) 1267 if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
1213 return -ENOMEM; 1268 return -ENOMEM;
1214 1269
1215 newinfo = vmalloc(sizeof(struct ip6t_table_info) 1270 newinfo = alloc_table_info(tmp.size);
1216 + SMP_ALIGN(tmp.size) *
1217 (highest_possible_processor_id()+1));
1218 if (!newinfo) 1271 if (!newinfo)
1219 return -ENOMEM; 1272 return -ENOMEM;
1220 1273
1221 if (copy_from_user(newinfo->entries, user + sizeof(tmp), 1274 /* choose the copy that is on our node/cpu */
1275 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1276 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1222 tmp.size) != 0) { 1277 tmp.size) != 0) {
1223 ret = -EFAULT; 1278 ret = -EFAULT;
1224 goto free_newinfo; 1279 goto free_newinfo;
@@ -1229,10 +1284,9 @@ do_replace(void __user *user, unsigned int len)
1229 ret = -ENOMEM; 1284 ret = -ENOMEM;
1230 goto free_newinfo; 1285 goto free_newinfo;
1231 } 1286 }
1232 memset(counters, 0, tmp.num_counters * sizeof(struct ip6t_counters));
1233 1287
1234 ret = translate_table(tmp.name, tmp.valid_hooks, 1288 ret = translate_table(tmp.name, tmp.valid_hooks,
1235 newinfo, tmp.size, tmp.num_entries, 1289 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1236 tmp.hook_entry, tmp.underflow); 1290 tmp.hook_entry, tmp.underflow);
1237 if (ret != 0) 1291 if (ret != 0)
1238 goto free_newinfo_counters; 1292 goto free_newinfo_counters;
@@ -1271,8 +1325,9 @@ do_replace(void __user *user, unsigned int len)
1271 /* Get the old counters. */ 1325 /* Get the old counters. */
1272 get_counters(oldinfo, counters); 1326 get_counters(oldinfo, counters);
1273 /* Decrease module usage counts and free resource */ 1327 /* Decrease module usage counts and free resource */
1274 IP6T_ENTRY_ITERATE(oldinfo->entries, oldinfo->size, cleanup_entry,NULL); 1328 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1275 vfree(oldinfo); 1329 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1330 free_table_info(oldinfo);
1276 if (copy_to_user(tmp.counters, counters, 1331 if (copy_to_user(tmp.counters, counters,
1277 sizeof(struct ip6t_counters) * tmp.num_counters) != 0) 1332 sizeof(struct ip6t_counters) * tmp.num_counters) != 0)
1278 ret = -EFAULT; 1333 ret = -EFAULT;
@@ -1284,11 +1339,11 @@ do_replace(void __user *user, unsigned int len)
1284 module_put(t->me); 1339 module_put(t->me);
1285 up(&ip6t_mutex); 1340 up(&ip6t_mutex);
1286 free_newinfo_counters_untrans: 1341 free_newinfo_counters_untrans:
1287 IP6T_ENTRY_ITERATE(newinfo->entries, newinfo->size, cleanup_entry,NULL); 1342 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1288 free_newinfo_counters: 1343 free_newinfo_counters:
1289 vfree(counters); 1344 vfree(counters);
1290 free_newinfo: 1345 free_newinfo:
1291 vfree(newinfo); 1346 free_table_info(newinfo);
1292 return ret; 1347 return ret;
1293} 1348}
1294 1349
@@ -1321,6 +1376,7 @@ do_add_counters(void __user *user, unsigned int len)
1321 struct ip6t_counters_info tmp, *paddc; 1376 struct ip6t_counters_info tmp, *paddc;
1322 struct ip6t_table *t; 1377 struct ip6t_table *t;
1323 int ret = 0; 1378 int ret = 0;
1379 void *loc_cpu_entry;
1324 1380
1325 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1381 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1326 return -EFAULT; 1382 return -EFAULT;
@@ -1350,7 +1406,9 @@ do_add_counters(void __user *user, unsigned int len)
1350 } 1406 }
1351 1407
1352 i = 0; 1408 i = 0;
1353 IP6T_ENTRY_ITERATE(t->private->entries, 1409 /* Choose the copy that is on our node */
1410 loc_cpu_entry = t->private->entries[smp_processor_id()];
1411 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1354 t->private->size, 1412 t->private->size,
1355 add_counter_to_entry, 1413 add_counter_to_entry,
1356 paddc->counters, 1414 paddc->counters,
@@ -1543,28 +1601,29 @@ int ip6t_register_table(struct ip6t_table *table,
1543 struct ip6t_table_info *newinfo; 1601 struct ip6t_table_info *newinfo;
1544 static struct ip6t_table_info bootstrap 1602 static struct ip6t_table_info bootstrap
1545 = { 0, 0, 0, { 0 }, { 0 }, { } }; 1603 = { 0, 0, 0, { 0 }, { 0 }, { } };
1604 void *loc_cpu_entry;
1546 1605
1547 newinfo = vmalloc(sizeof(struct ip6t_table_info) 1606 newinfo = alloc_table_info(repl->size);
1548 + SMP_ALIGN(repl->size) *
1549 (highest_possible_processor_id()+1));
1550 if (!newinfo) 1607 if (!newinfo)
1551 return -ENOMEM; 1608 return -ENOMEM;
1552 1609
1553 memcpy(newinfo->entries, repl->entries, repl->size); 1610 /* choose the copy on our node/cpu */
1611 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1612 memcpy(loc_cpu_entry, repl->entries, repl->size);
1554 1613
1555 ret = translate_table(table->name, table->valid_hooks, 1614 ret = translate_table(table->name, table->valid_hooks,
1556 newinfo, repl->size, 1615 newinfo, loc_cpu_entry, repl->size,
1557 repl->num_entries, 1616 repl->num_entries,
1558 repl->hook_entry, 1617 repl->hook_entry,
1559 repl->underflow); 1618 repl->underflow);
1560 if (ret != 0) { 1619 if (ret != 0) {
1561 vfree(newinfo); 1620 free_table_info(newinfo);
1562 return ret; 1621 return ret;
1563 } 1622 }
1564 1623
1565 ret = down_interruptible(&ip6t_mutex); 1624 ret = down_interruptible(&ip6t_mutex);
1566 if (ret != 0) { 1625 if (ret != 0) {
1567 vfree(newinfo); 1626 free_table_info(newinfo);
1568 return ret; 1627 return ret;
1569 } 1628 }
1570 1629
@@ -1593,20 +1652,23 @@ int ip6t_register_table(struct ip6t_table *table,
1593 return ret; 1652 return ret;
1594 1653
1595 free_unlock: 1654 free_unlock:
1596 vfree(newinfo); 1655 free_table_info(newinfo);
1597 goto unlock; 1656 goto unlock;
1598} 1657}
1599 1658
1600void ip6t_unregister_table(struct ip6t_table *table) 1659void ip6t_unregister_table(struct ip6t_table *table)
1601{ 1660{
1661 void *loc_cpu_entry;
1662
1602 down(&ip6t_mutex); 1663 down(&ip6t_mutex);
1603 LIST_DELETE(&ip6t_tables, table); 1664 LIST_DELETE(&ip6t_tables, table);
1604 up(&ip6t_mutex); 1665 up(&ip6t_mutex);
1605 1666
1606 /* Decrease module usage counts and free resources */ 1667 /* Decrease module usage counts and free resources */
1607 IP6T_ENTRY_ITERATE(table->private->entries, table->private->size, 1668 loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
1669 IP6T_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
1608 cleanup_entry, NULL); 1670 cleanup_entry, NULL);
1609 vfree(table->private); 1671 free_table_info(table->private);
1610} 1672}
1611 1673
1612/* Returns 1 if the port is matched by the range, 0 otherwise */ 1674/* Returns 1 if the port is matched by the range, 0 otherwise */