aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDmitry Mishin <dim@openvz.org>2006-04-01 05:25:19 -0500
committerDavid S. Miller <davem@davemloft.net>2006-04-01 05:25:19 -0500
commit2722971cbe831117686039d5c334f2c0f560be13 (patch)
treeb810ea96778e4f5de2a7713685c0551aa34c8f97 /net
parente64a70be5175ac2c209fa742123a6ce845852e0e (diff)
[NETFILTER]: iptables 32bit compat layer
This patch extends current iptables compatibility layer in order to get 32bit iptables to work on 64bit kernel. Current layer is insufficient due to alignment checks both in kernel and user space tools. Patch is for current net-2.6.17 with addition of move of ipt_entry_{match| target} definitions to xt_entry_{match|target}. Signed-off-by: Dmitry Mishin <dim@openvz.org> Acked-off-by: Kirill Korotaev <dev@openvz.org> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/compat.c3
-rw-r--r--net/ipv4/netfilter/ip_tables.c1138
-rw-r--r--net/netfilter/x_tables.c113
3 files changed, 1121 insertions, 133 deletions
diff --git a/net/compat.c b/net/compat.c
index 8fd37cd7b501..d5d69fa15d07 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -476,8 +476,7 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
476 int err; 476 int err;
477 struct socket *sock; 477 struct socket *sock;
478 478
479 /* SO_SET_REPLACE seems to be the same in all levels */ 479 if (level == SOL_IPV6 && optname == IPT_SO_SET_REPLACE)
480 if (optname == IPT_SO_SET_REPLACE)
481 return do_netfilter_replace(fd, level, optname, 480 return do_netfilter_replace(fd, level, optname,
482 optval, optlen); 481 optval, optlen);
483 482
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 460fd905fad0..d5b8cdd361ce 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/icmp.h> 25#include <linux/icmp.h>
26#include <net/ip.h> 26#include <net/ip.h>
27#include <net/compat.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28#include <linux/mutex.h> 29#include <linux/mutex.h>
29#include <linux/proc_fs.h> 30#include <linux/proc_fs.h>
@@ -799,17 +800,11 @@ get_counters(const struct xt_table_info *t,
799 } 800 }
800} 801}
801 802
802static int 803static inline struct xt_counters * alloc_counters(struct ipt_table *table)
803copy_entries_to_user(unsigned int total_size,
804 struct ipt_table *table,
805 void __user *userptr)
806{ 804{
807 unsigned int off, num, countersize; 805 unsigned int countersize;
808 struct ipt_entry *e;
809 struct xt_counters *counters; 806 struct xt_counters *counters;
810 struct xt_table_info *private = table->private; 807 struct xt_table_info *private = table->private;
811 int ret = 0;
812 void *loc_cpu_entry;
813 808
814 /* We need atomic snapshot of counters: rest doesn't change 809 /* We need atomic snapshot of counters: rest doesn't change
815 (other than comefrom, which userspace doesn't care 810 (other than comefrom, which userspace doesn't care
@@ -818,13 +813,32 @@ copy_entries_to_user(unsigned int total_size,
818 counters = vmalloc_node(countersize, numa_node_id()); 813 counters = vmalloc_node(countersize, numa_node_id());
819 814
820 if (counters == NULL) 815 if (counters == NULL)
821 return -ENOMEM; 816 return ERR_PTR(-ENOMEM);
822 817
823 /* First, sum counters... */ 818 /* First, sum counters... */
824 write_lock_bh(&table->lock); 819 write_lock_bh(&table->lock);
825 get_counters(private, counters); 820 get_counters(private, counters);
826 write_unlock_bh(&table->lock); 821 write_unlock_bh(&table->lock);
827 822
823 return counters;
824}
825
826static int
827copy_entries_to_user(unsigned int total_size,
828 struct ipt_table *table,
829 void __user *userptr)
830{
831 unsigned int off, num;
832 struct ipt_entry *e;
833 struct xt_counters *counters;
834 struct xt_table_info *private = table->private;
835 int ret = 0;
836 void *loc_cpu_entry;
837
838 counters = alloc_counters(table);
839 if (IS_ERR(counters))
840 return PTR_ERR(counters);
841
828 /* choose the copy that is on our node/cpu, ... 842 /* choose the copy that is on our node/cpu, ...
829 * This choice is lazy (because current thread is 843 * This choice is lazy (because current thread is
830 * allowed to migrate to another cpu) 844 * allowed to migrate to another cpu)
@@ -884,25 +898,278 @@ copy_entries_to_user(unsigned int total_size,
884 return ret; 898 return ret;
885} 899}
886 900
901#ifdef CONFIG_COMPAT
902struct compat_delta {
903 struct compat_delta *next;
904 u_int16_t offset;
905 short delta;
906};
907
908static struct compat_delta *compat_offsets = NULL;
909
910static int compat_add_offset(u_int16_t offset, short delta)
911{
912 struct compat_delta *tmp;
913
914 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
915 if (!tmp)
916 return -ENOMEM;
917 tmp->offset = offset;
918 tmp->delta = delta;
919 if (compat_offsets) {
920 tmp->next = compat_offsets->next;
921 compat_offsets->next = tmp;
922 } else {
923 compat_offsets = tmp;
924 tmp->next = NULL;
925 }
926 return 0;
927}
928
929static void compat_flush_offsets(void)
930{
931 struct compat_delta *tmp, *next;
932
933 if (compat_offsets) {
934 for(tmp = compat_offsets; tmp; tmp = next) {
935 next = tmp->next;
936 kfree(tmp);
937 }
938 compat_offsets = NULL;
939 }
940}
941
942static short compat_calc_jump(u_int16_t offset)
943{
944 struct compat_delta *tmp;
945 short delta;
946
947 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
948 if (tmp->offset < offset)
949 delta += tmp->delta;
950 return delta;
951}
952
953struct compat_ipt_standard_target
954{
955 struct compat_xt_entry_target target;
956 compat_int_t verdict;
957};
958
959#define IPT_ST_OFFSET (sizeof(struct ipt_standard_target) - \
960 sizeof(struct compat_ipt_standard_target))
961
962struct compat_ipt_standard
963{
964 struct compat_ipt_entry entry;
965 struct compat_ipt_standard_target target;
966};
967
968static int compat_ipt_standard_fn(void *target,
969 void **dstptr, int *size, int convert)
970{
971 struct compat_ipt_standard_target compat_st, *pcompat_st;
972 struct ipt_standard_target st, *pst;
973 int ret;
974
975 ret = 0;
976 switch (convert) {
977 case COMPAT_TO_USER:
978 pst = (struct ipt_standard_target *)target;
979 memcpy(&compat_st.target, &pst->target,
980 sizeof(struct ipt_entry_target));
981 compat_st.verdict = pst->verdict;
982 if (compat_st.verdict > 0)
983 compat_st.verdict -=
984 compat_calc_jump(compat_st.verdict);
985 compat_st.target.u.user.target_size =
986 sizeof(struct compat_ipt_standard_target);
987 if (__copy_to_user(*dstptr, &compat_st,
988 sizeof(struct compat_ipt_standard_target)))
989 ret = -EFAULT;
990 *size -= IPT_ST_OFFSET;
991 *dstptr += sizeof(struct compat_ipt_standard_target);
992 break;
993 case COMPAT_FROM_USER:
994 pcompat_st =
995 (struct compat_ipt_standard_target *)target;
996 memcpy(&st.target, &pcompat_st->target,
997 sizeof(struct ipt_entry_target));
998 st.verdict = pcompat_st->verdict;
999 if (st.verdict > 0)
1000 st.verdict += compat_calc_jump(st.verdict);
1001 st.target.u.user.target_size =
1002 sizeof(struct ipt_standard_target);
1003 memcpy(*dstptr, &st,
1004 sizeof(struct ipt_standard_target));
1005 *size += IPT_ST_OFFSET;
1006 *dstptr += sizeof(struct ipt_standard_target);
1007 break;
1008 case COMPAT_CALC_SIZE:
1009 *size += IPT_ST_OFFSET;
1010 break;
1011 default:
1012 ret = -ENOPROTOOPT;
1013 break;
1014 }
1015 return ret;
1016}
1017
1018static inline int
1019compat_calc_match(struct ipt_entry_match *m, int * size)
1020{
1021 if (m->u.kernel.match->compat)
1022 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1023 else
1024 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1025 return 0;
1026}
1027
1028static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1029 void *base, struct xt_table_info *newinfo)
1030{
1031 struct ipt_entry_target *t;
1032 u_int16_t entry_offset;
1033 int off, i, ret;
1034
1035 off = 0;
1036 entry_offset = (void *)e - base;
1037 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1038 t = ipt_get_target(e);
1039 if (t->u.kernel.target->compat)
1040 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1041 else
1042 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1043 newinfo->size -= off;
1044 ret = compat_add_offset(entry_offset, off);
1045 if (ret)
1046 return ret;
1047
1048 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1049 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1050 (base + info->hook_entry[i])))
1051 newinfo->hook_entry[i] -= off;
1052 if (info->underflow[i] && (e < (struct ipt_entry *)
1053 (base + info->underflow[i])))
1054 newinfo->underflow[i] -= off;
1055 }
1056 return 0;
1057}
1058
1059static int compat_table_info(struct xt_table_info *info,
1060 struct xt_table_info *newinfo)
1061{
1062 void *loc_cpu_entry;
1063 int i;
1064
1065 if (!newinfo || !info)
1066 return -EINVAL;
1067
1068 memset(newinfo, 0, sizeof(struct xt_table_info));
1069 newinfo->size = info->size;
1070 newinfo->number = info->number;
1071 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1072 newinfo->hook_entry[i] = info->hook_entry[i];
1073 newinfo->underflow[i] = info->underflow[i];
1074 }
1075 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1076 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1077 compat_calc_entry, info, loc_cpu_entry, newinfo);
1078}
1079#endif
1080
1081static int get_info(void __user *user, int *len, int compat)
1082{
1083 char name[IPT_TABLE_MAXNAMELEN];
1084 struct ipt_table *t;
1085 int ret;
1086
1087 if (*len != sizeof(struct ipt_getinfo)) {
1088 duprintf("length %u != %u\n", *len,
1089 (unsigned int)sizeof(struct ipt_getinfo));
1090 return -EINVAL;
1091 }
1092
1093 if (copy_from_user(name, user, sizeof(name)) != 0)
1094 return -EFAULT;
1095
1096 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1097#ifdef CONFIG_COMPAT
1098 if (compat)
1099 xt_compat_lock(AF_INET);
1100#endif
1101 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1102 "iptable_%s", name);
1103 if (t && !IS_ERR(t)) {
1104 struct ipt_getinfo info;
1105 struct xt_table_info *private = t->private;
1106
1107#ifdef CONFIG_COMPAT
1108 if (compat) {
1109 struct xt_table_info tmp;
1110 ret = compat_table_info(private, &tmp);
1111 compat_flush_offsets();
1112 private = &tmp;
1113 }
1114#endif
1115 info.valid_hooks = t->valid_hooks;
1116 memcpy(info.hook_entry, private->hook_entry,
1117 sizeof(info.hook_entry));
1118 memcpy(info.underflow, private->underflow,
1119 sizeof(info.underflow));
1120 info.num_entries = private->number;
1121 info.size = private->size;
1122 strcpy(info.name, name);
1123
1124 if (copy_to_user(user, &info, *len) != 0)
1125 ret = -EFAULT;
1126 else
1127 ret = 0;
1128
1129 xt_table_unlock(t);
1130 module_put(t->me);
1131 } else
1132 ret = t ? PTR_ERR(t) : -ENOENT;
1133#ifdef CONFIG_COMPAT
1134 if (compat)
1135 xt_compat_unlock(AF_INET);
1136#endif
1137 return ret;
1138}
1139
887static int 1140static int
888get_entries(const struct ipt_get_entries *entries, 1141get_entries(struct ipt_get_entries __user *uptr, int *len)
889 struct ipt_get_entries __user *uptr)
890{ 1142{
891 int ret; 1143 int ret;
1144 struct ipt_get_entries get;
892 struct ipt_table *t; 1145 struct ipt_table *t;
893 1146
894 t = xt_find_table_lock(AF_INET, entries->name); 1147 if (*len < sizeof(get)) {
1148 duprintf("get_entries: %u < %d\n", *len,
1149 (unsigned int)sizeof(get));
1150 return -EINVAL;
1151 }
1152 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1153 return -EFAULT;
1154 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1155 duprintf("get_entries: %u != %u\n", *len,
1156 (unsigned int)(sizeof(struct ipt_get_entries) +
1157 get.size));
1158 return -EINVAL;
1159 }
1160
1161 t = xt_find_table_lock(AF_INET, get.name);
895 if (t && !IS_ERR(t)) { 1162 if (t && !IS_ERR(t)) {
896 struct xt_table_info *private = t->private; 1163 struct xt_table_info *private = t->private;
897 duprintf("t->private->number = %u\n", 1164 duprintf("t->private->number = %u\n",
898 private->number); 1165 private->number);
899 if (entries->size == private->size) 1166 if (get.size == private->size)
900 ret = copy_entries_to_user(private->size, 1167 ret = copy_entries_to_user(private->size,
901 t, uptr->entrytable); 1168 t, uptr->entrytable);
902 else { 1169 else {
903 duprintf("get_entries: I've got %u not %u!\n", 1170 duprintf("get_entries: I've got %u not %u!\n",
904 private->size, 1171 private->size,
905 entries->size); 1172 get.size);
906 ret = -EINVAL; 1173 ret = -EINVAL;
907 } 1174 }
908 module_put(t->me); 1175 module_put(t->me);
@@ -914,79 +1181,47 @@ get_entries(const struct ipt_get_entries *entries,
914} 1181}
915 1182
916static int 1183static int
917do_replace(void __user *user, unsigned int len) 1184__do_replace(const char *name, unsigned int valid_hooks,
1185 struct xt_table_info *newinfo, unsigned int num_counters,
1186 void __user *counters_ptr)
918{ 1187{
919 int ret; 1188 int ret;
920 struct ipt_replace tmp;
921 struct ipt_table *t; 1189 struct ipt_table *t;
922 struct xt_table_info *newinfo, *oldinfo; 1190 struct xt_table_info *oldinfo;
923 struct xt_counters *counters; 1191 struct xt_counters *counters;
924 void *loc_cpu_entry, *loc_cpu_old_entry; 1192 void *loc_cpu_old_entry;
925 1193
926 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1194 ret = 0;
927 return -EFAULT; 1195 counters = vmalloc(num_counters * sizeof(struct xt_counters));
928
929 /* Hack: Causes ipchains to give correct error msg --RR */
930 if (len != sizeof(tmp) + tmp.size)
931 return -ENOPROTOOPT;
932
933 /* overflow check */
934 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
935 SMP_CACHE_BYTES)
936 return -ENOMEM;
937 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
938 return -ENOMEM;
939
940 newinfo = xt_alloc_table_info(tmp.size);
941 if (!newinfo)
942 return -ENOMEM;
943
944 /* choose the copy that is our node/cpu */
945 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
946 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
947 tmp.size) != 0) {
948 ret = -EFAULT;
949 goto free_newinfo;
950 }
951
952 counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
953 if (!counters) { 1196 if (!counters) {
954 ret = -ENOMEM; 1197 ret = -ENOMEM;
955 goto free_newinfo; 1198 goto out;
956 } 1199 }
957 1200
958 ret = translate_table(tmp.name, tmp.valid_hooks, 1201 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
959 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, 1202 "iptable_%s", name);
960 tmp.hook_entry, tmp.underflow);
961 if (ret != 0)
962 goto free_newinfo_counters;
963
964 duprintf("ip_tables: Translated table\n");
965
966 t = try_then_request_module(xt_find_table_lock(AF_INET, tmp.name),
967 "iptable_%s", tmp.name);
968 if (!t || IS_ERR(t)) { 1203 if (!t || IS_ERR(t)) {
969 ret = t ? PTR_ERR(t) : -ENOENT; 1204 ret = t ? PTR_ERR(t) : -ENOENT;
970 goto free_newinfo_counters_untrans; 1205 goto free_newinfo_counters_untrans;
971 } 1206 }
972 1207
973 /* You lied! */ 1208 /* You lied! */
974 if (tmp.valid_hooks != t->valid_hooks) { 1209 if (valid_hooks != t->valid_hooks) {
975 duprintf("Valid hook crap: %08X vs %08X\n", 1210 duprintf("Valid hook crap: %08X vs %08X\n",
976 tmp.valid_hooks, t->valid_hooks); 1211 valid_hooks, t->valid_hooks);
977 ret = -EINVAL; 1212 ret = -EINVAL;
978 goto put_module; 1213 goto put_module;
979 } 1214 }
980 1215
981 oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret); 1216 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
982 if (!oldinfo) 1217 if (!oldinfo)
983 goto put_module; 1218 goto put_module;
984 1219
985 /* Update module usage count based on number of rules */ 1220 /* Update module usage count based on number of rules */
986 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", 1221 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
987 oldinfo->number, oldinfo->initial_entries, newinfo->number); 1222 oldinfo->number, oldinfo->initial_entries, newinfo->number);
988 if ((oldinfo->number > oldinfo->initial_entries) || 1223 if ((oldinfo->number > oldinfo->initial_entries) ||
989 (newinfo->number <= oldinfo->initial_entries)) 1224 (newinfo->number <= oldinfo->initial_entries))
990 module_put(t->me); 1225 module_put(t->me);
991 if ((oldinfo->number > oldinfo->initial_entries) && 1226 if ((oldinfo->number > oldinfo->initial_entries) &&
992 (newinfo->number <= oldinfo->initial_entries)) 1227 (newinfo->number <= oldinfo->initial_entries))
@@ -998,8 +1233,8 @@ do_replace(void __user *user, unsigned int len)
998 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1233 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
999 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL); 1234 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1000 xt_free_table_info(oldinfo); 1235 xt_free_table_info(oldinfo);
1001 if (copy_to_user(tmp.counters, counters, 1236 if (copy_to_user(counters_ptr, counters,
1002 sizeof(struct xt_counters) * tmp.num_counters) != 0) 1237 sizeof(struct xt_counters) * num_counters) != 0)
1003 ret = -EFAULT; 1238 ret = -EFAULT;
1004 vfree(counters); 1239 vfree(counters);
1005 xt_table_unlock(t); 1240 xt_table_unlock(t);
@@ -1009,9 +1244,62 @@ do_replace(void __user *user, unsigned int len)
1009 module_put(t->me); 1244 module_put(t->me);
1010 xt_table_unlock(t); 1245 xt_table_unlock(t);
1011 free_newinfo_counters_untrans: 1246 free_newinfo_counters_untrans:
1012 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1013 free_newinfo_counters:
1014 vfree(counters); 1247 vfree(counters);
1248 out:
1249 return ret;
1250}
1251
1252static int
1253do_replace(void __user *user, unsigned int len)
1254{
1255 int ret;
1256 struct ipt_replace tmp;
1257 struct xt_table_info *newinfo;
1258 void *loc_cpu_entry;
1259
1260 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1261 return -EFAULT;
1262
1263 /* Hack: Causes ipchains to give correct error msg --RR */
1264 if (len != sizeof(tmp) + tmp.size)
1265 return -ENOPROTOOPT;
1266
1267 /* overflow check */
1268 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1269 SMP_CACHE_BYTES)
1270 return -ENOMEM;
1271 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1272 return -ENOMEM;
1273
1274 newinfo = xt_alloc_table_info(tmp.size);
1275 if (!newinfo)
1276 return -ENOMEM;
1277
1278 /* choose the copy that is our node/cpu */
1279 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1280 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1281 tmp.size) != 0) {
1282 ret = -EFAULT;
1283 goto free_newinfo;
1284 }
1285
1286 ret = translate_table(tmp.name, tmp.valid_hooks,
1287 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1288 tmp.hook_entry, tmp.underflow);
1289 if (ret != 0)
1290 goto free_newinfo;
1291
1292 duprintf("ip_tables: Translated table\n");
1293
1294 ret = __do_replace(tmp.name, tmp.valid_hooks,
1295 newinfo, tmp.num_counters,
1296 tmp.counters);
1297 if (ret)
1298 goto free_newinfo_untrans;
1299 return 0;
1300
1301 free_newinfo_untrans:
1302 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1015 free_newinfo: 1303 free_newinfo:
1016 xt_free_table_info(newinfo); 1304 xt_free_table_info(newinfo);
1017 return ret; 1305 return ret;
@@ -1040,31 +1328,59 @@ add_counter_to_entry(struct ipt_entry *e,
1040} 1328}
1041 1329
1042static int 1330static int
1043do_add_counters(void __user *user, unsigned int len) 1331do_add_counters(void __user *user, unsigned int len, int compat)
1044{ 1332{
1045 unsigned int i; 1333 unsigned int i;
1046 struct xt_counters_info tmp, *paddc; 1334 struct xt_counters_info tmp;
1335 struct xt_counters *paddc;
1336 unsigned int num_counters;
1337 char *name;
1338 int size;
1339 void *ptmp;
1047 struct ipt_table *t; 1340 struct ipt_table *t;
1048 struct xt_table_info *private; 1341 struct xt_table_info *private;
1049 int ret = 0; 1342 int ret = 0;
1050 void *loc_cpu_entry; 1343 void *loc_cpu_entry;
1344#ifdef CONFIG_COMPAT
1345 struct compat_xt_counters_info compat_tmp;
1051 1346
1052 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1347 if (compat) {
1348 ptmp = &compat_tmp;
1349 size = sizeof(struct compat_xt_counters_info);
1350 } else
1351#endif
1352 {
1353 ptmp = &tmp;
1354 size = sizeof(struct xt_counters_info);
1355 }
1356
1357 if (copy_from_user(ptmp, user, size) != 0)
1053 return -EFAULT; 1358 return -EFAULT;
1054 1359
1055 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters)) 1360#ifdef CONFIG_COMPAT
1361 if (compat) {
1362 num_counters = compat_tmp.num_counters;
1363 name = compat_tmp.name;
1364 } else
1365#endif
1366 {
1367 num_counters = tmp.num_counters;
1368 name = tmp.name;
1369 }
1370
1371 if (len != size + num_counters * sizeof(struct xt_counters))
1056 return -EINVAL; 1372 return -EINVAL;
1057 1373
1058 paddc = vmalloc_node(len, numa_node_id()); 1374 paddc = vmalloc_node(len - size, numa_node_id());
1059 if (!paddc) 1375 if (!paddc)
1060 return -ENOMEM; 1376 return -ENOMEM;
1061 1377
1062 if (copy_from_user(paddc, user, len) != 0) { 1378 if (copy_from_user(paddc, user + size, len - size) != 0) {
1063 ret = -EFAULT; 1379 ret = -EFAULT;
1064 goto free; 1380 goto free;
1065 } 1381 }
1066 1382
1067 t = xt_find_table_lock(AF_INET, tmp.name); 1383 t = xt_find_table_lock(AF_INET, name);
1068 if (!t || IS_ERR(t)) { 1384 if (!t || IS_ERR(t)) {
1069 ret = t ? PTR_ERR(t) : -ENOENT; 1385 ret = t ? PTR_ERR(t) : -ENOENT;
1070 goto free; 1386 goto free;
@@ -1072,7 +1388,7 @@ do_add_counters(void __user *user, unsigned int len)
1072 1388
1073 write_lock_bh(&t->lock); 1389 write_lock_bh(&t->lock);
1074 private = t->private; 1390 private = t->private;
1075 if (private->number != paddc->num_counters) { 1391 if (private->number != num_counters) {
1076 ret = -EINVAL; 1392 ret = -EINVAL;
1077 goto unlock_up_free; 1393 goto unlock_up_free;
1078 } 1394 }
@@ -1083,7 +1399,7 @@ do_add_counters(void __user *user, unsigned int len)
1083 IPT_ENTRY_ITERATE(loc_cpu_entry, 1399 IPT_ENTRY_ITERATE(loc_cpu_entry,
1084 private->size, 1400 private->size,
1085 add_counter_to_entry, 1401 add_counter_to_entry,
1086 paddc->counters, 1402 paddc,
1087 &i); 1403 &i);
1088 unlock_up_free: 1404 unlock_up_free:
1089 write_unlock_bh(&t->lock); 1405 write_unlock_bh(&t->lock);
@@ -1095,8 +1411,438 @@ do_add_counters(void __user *user, unsigned int len)
1095 return ret; 1411 return ret;
1096} 1412}
1097 1413
1414#ifdef CONFIG_COMPAT
1415struct compat_ipt_replace {
1416 char name[IPT_TABLE_MAXNAMELEN];
1417 u32 valid_hooks;
1418 u32 num_entries;
1419 u32 size;
1420 u32 hook_entry[NF_IP_NUMHOOKS];
1421 u32 underflow[NF_IP_NUMHOOKS];
1422 u32 num_counters;
1423 compat_uptr_t counters; /* struct ipt_counters * */
1424 struct compat_ipt_entry entries[0];
1425};
1426
1427static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1428 void __user **dstptr, compat_uint_t *size)
1429{
1430 if (m->u.kernel.match->compat)
1431 return m->u.kernel.match->compat(m, dstptr, size,
1432 COMPAT_TO_USER);
1433 else
1434 return xt_compat_match(m, dstptr, size, COMPAT_TO_USER);
1435}
1436
1437static int compat_copy_entry_to_user(struct ipt_entry *e,
1438 void __user **dstptr, compat_uint_t *size)
1439{
1440 struct ipt_entry_target __user *t;
1441 struct compat_ipt_entry __user *ce;
1442 u_int16_t target_offset, next_offset;
1443 compat_uint_t origsize;
1444 int ret;
1445
1446 ret = -EFAULT;
1447 origsize = *size;
1448 ce = (struct compat_ipt_entry __user *)*dstptr;
1449 if (__copy_to_user(ce, e, sizeof(struct ipt_entry)))
1450 goto out;
1451
1452 *dstptr += sizeof(struct compat_ipt_entry);
1453 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1454 target_offset = e->target_offset - (origsize - *size);
1455 if (ret)
1456 goto out;
1457 t = ipt_get_target(e);
1458 if (t->u.kernel.target->compat)
1459 ret = t->u.kernel.target->compat(t, dstptr, size,
1460 COMPAT_TO_USER);
1461 else
1462 ret = xt_compat_target(t, dstptr, size, COMPAT_TO_USER);
1463 if (ret)
1464 goto out;
1465 ret = -EFAULT;
1466 next_offset = e->next_offset - (origsize - *size);
1467 if (__put_user(target_offset, &ce->target_offset))
1468 goto out;
1469 if (__put_user(next_offset, &ce->next_offset))
1470 goto out;
1471 return 0;
1472out:
1473 return ret;
1474}
1475
1476static inline int
1477compat_check_calc_match(struct ipt_entry_match *m,
1478 const char *name,
1479 const struct ipt_ip *ip,
1480 unsigned int hookmask,
1481 int *size, int *i)
1482{
1483 struct ipt_match *match;
1484
1485 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1486 m->u.user.revision),
1487 "ipt_%s", m->u.user.name);
1488 if (IS_ERR(match) || !match) {
1489 duprintf("compat_check_calc_match: `%s' not found\n",
1490 m->u.user.name);
1491 return match ? PTR_ERR(match) : -ENOENT;
1492 }
1493 m->u.kernel.match = match;
1494
1495 if (m->u.kernel.match->compat)
1496 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1497 else
1498 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1499
1500 (*i)++;
1501 return 0;
1502}
1503
1504static inline int
1505check_compat_entry_size_and_hooks(struct ipt_entry *e,
1506 struct xt_table_info *newinfo,
1507 unsigned int *size,
1508 unsigned char *base,
1509 unsigned char *limit,
1510 unsigned int *hook_entries,
1511 unsigned int *underflows,
1512 unsigned int *i,
1513 const char *name)
1514{
1515 struct ipt_entry_target *t;
1516 struct ipt_target *target;
1517 u_int16_t entry_offset;
1518 int ret, off, h, j;
1519
1520 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1521 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1522 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1523 duprintf("Bad offset %p, limit = %p\n", e, limit);
1524 return -EINVAL;
1525 }
1526
1527 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1528 sizeof(struct compat_xt_entry_target)) {
1529 duprintf("checking: element %p size %u\n",
1530 e, e->next_offset);
1531 return -EINVAL;
1532 }
1533
1534 if (!ip_checkentry(&e->ip)) {
1535 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1536 return -EINVAL;
1537 }
1538
1539 off = 0;
1540 entry_offset = (void *)e - (void *)base;
1541 j = 0;
1542 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1543 e->comefrom, &off, &j);
1544 if (ret != 0)
1545 goto out;
1546
1547 t = ipt_get_target(e);
1548 target = try_then_request_module(xt_find_target(AF_INET,
1549 t->u.user.name,
1550 t->u.user.revision),
1551 "ipt_%s", t->u.user.name);
1552 if (IS_ERR(target) || !target) {
1553 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1554 ret = target ? PTR_ERR(target) : -ENOENT;
1555 goto out;
1556 }
1557 t->u.kernel.target = target;
1558
1559 if (t->u.kernel.target->compat)
1560 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1561 else
1562 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1563 *size += off;
1564 ret = compat_add_offset(entry_offset, off);
1565 if (ret)
1566 goto out;
1567
1568 /* Check hooks & underflows */
1569 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1570 if ((unsigned char *)e - base == hook_entries[h])
1571 newinfo->hook_entry[h] = hook_entries[h];
1572 if ((unsigned char *)e - base == underflows[h])
1573 newinfo->underflow[h] = underflows[h];
1574 }
1575
1576 /* Clear counters and comefrom */
1577 e->counters = ((struct ipt_counters) { 0, 0 });
1578 e->comefrom = 0;
1579
1580 (*i)++;
1581 return 0;
1582out:
1583 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1584 return ret;
1585}
1586
1587static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1588 void **dstptr, compat_uint_t *size, const char *name,
1589 const struct ipt_ip *ip, unsigned int hookmask)
1590{
1591 struct ipt_entry_match *dm;
1592 struct ipt_match *match;
1593 int ret;
1594
1595 dm = (struct ipt_entry_match *)*dstptr;
1596 match = m->u.kernel.match;
1597 if (match->compat)
1598 match->compat(m, dstptr, size, COMPAT_FROM_USER);
1599 else
1600 xt_compat_match(m, dstptr, size, COMPAT_FROM_USER);
1601
1602 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1603 name, hookmask, ip->proto,
1604 ip->invflags & IPT_INV_PROTO);
1605 if (ret)
1606 return ret;
1607
1608 if (m->u.kernel.match->checkentry
1609 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1610 dm->u.match_size - sizeof(*dm),
1611 hookmask)) {
1612 duprintf("ip_tables: check failed for `%s'.\n",
1613 m->u.kernel.match->name);
1614 return -EINVAL;
1615 }
1616 return 0;
1617}
1618
1619static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1620 unsigned int *size, const char *name,
1621 struct xt_table_info *newinfo, unsigned char *base)
1622{
1623 struct ipt_entry_target *t;
1624 struct ipt_target *target;
1625 struct ipt_entry *de;
1626 unsigned int origsize;
1627 int ret, h;
1628
1629 ret = 0;
1630 origsize = *size;
1631 de = (struct ipt_entry *)*dstptr;
1632 memcpy(de, e, sizeof(struct ipt_entry));
1633
1634 *dstptr += sizeof(struct compat_ipt_entry);
1635 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1636 name, &de->ip, de->comefrom);
1637 if (ret)
1638 goto out;
1639 de->target_offset = e->target_offset - (origsize - *size);
1640 t = ipt_get_target(e);
1641 target = t->u.kernel.target;
1642 if (target->compat)
1643 target->compat(t, dstptr, size, COMPAT_FROM_USER);
1644 else
1645 xt_compat_target(t, dstptr, size, COMPAT_FROM_USER);
1646
1647 de->next_offset = e->next_offset - (origsize - *size);
1648 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1649 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1650 newinfo->hook_entry[h] -= origsize - *size;
1651 if ((unsigned char *)de - base < newinfo->underflow[h])
1652 newinfo->underflow[h] -= origsize - *size;
1653 }
1654
1655 t = ipt_get_target(de);
1656 target = t->u.kernel.target;
1657 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1658 name, e->comefrom, e->ip.proto,
1659 e->ip.invflags & IPT_INV_PROTO);
1660 if (ret)
1661 goto out;
1662
1663 ret = -EINVAL;
1664 if (t->u.kernel.target == &ipt_standard_target) {
1665 if (!standard_check(t, *size))
1666 goto out;
1667 } else if (t->u.kernel.target->checkentry
1668 && !t->u.kernel.target->checkentry(name, de, target,
1669 t->data, t->u.target_size - sizeof(*t),
1670 de->comefrom)) {
1671 duprintf("ip_tables: compat: check failed for `%s'.\n",
1672 t->u.kernel.target->name);
1673 goto out;
1674 }
1675 ret = 0;
1676out:
1677 return ret;
1678}
1679
1098static int 1680static int
1099do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 1681translate_compat_table(const char *name,
1682 unsigned int valid_hooks,
1683 struct xt_table_info **pinfo,
1684 void **pentry0,
1685 unsigned int total_size,
1686 unsigned int number,
1687 unsigned int *hook_entries,
1688 unsigned int *underflows)
1689{
1690 unsigned int i;
1691 struct xt_table_info *newinfo, *info;
1692 void *pos, *entry0, *entry1;
1693 unsigned int size;
1694 int ret;
1695
1696 info = *pinfo;
1697 entry0 = *pentry0;
1698 size = total_size;
1699 info->number = number;
1700
1701 /* Init all hooks to impossible value. */
1702 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1703 info->hook_entry[i] = 0xFFFFFFFF;
1704 info->underflow[i] = 0xFFFFFFFF;
1705 }
1706
1707 duprintf("translate_compat_table: size %u\n", info->size);
1708 i = 0;
1709 xt_compat_lock(AF_INET);
1710 /* Walk through entries, checking offsets. */
1711 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1712 check_compat_entry_size_and_hooks,
1713 info, &size, entry0,
1714 entry0 + total_size,
1715 hook_entries, underflows, &i, name);
1716 if (ret != 0)
1717 goto out_unlock;
1718
1719 ret = -EINVAL;
1720 if (i != number) {
1721 duprintf("translate_compat_table: %u not %u entries\n",
1722 i, number);
1723 goto out_unlock;
1724 }
1725
1726 /* Check hooks all assigned */
1727 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1728 /* Only hooks which are valid */
1729 if (!(valid_hooks & (1 << i)))
1730 continue;
1731 if (info->hook_entry[i] == 0xFFFFFFFF) {
1732 duprintf("Invalid hook entry %u %u\n",
1733 i, hook_entries[i]);
1734 goto out_unlock;
1735 }
1736 if (info->underflow[i] == 0xFFFFFFFF) {
1737 duprintf("Invalid underflow %u %u\n",
1738 i, underflows[i]);
1739 goto out_unlock;
1740 }
1741 }
1742
1743 ret = -ENOMEM;
1744 newinfo = xt_alloc_table_info(size);
1745 if (!newinfo)
1746 goto out_unlock;
1747
1748 newinfo->number = number;
1749 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1750 newinfo->hook_entry[i] = info->hook_entry[i];
1751 newinfo->underflow[i] = info->underflow[i];
1752 }
1753 entry1 = newinfo->entries[raw_smp_processor_id()];
1754 pos = entry1;
1755 size = total_size;
1756 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1757 compat_copy_entry_from_user, &pos, &size,
1758 name, newinfo, entry1);
1759 compat_flush_offsets();
1760 xt_compat_unlock(AF_INET);
1761 if (ret)
1762 goto free_newinfo;
1763
1764 ret = -ELOOP;
1765 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1766 goto free_newinfo;
1767
1768 /* And one copy for every other CPU */
1769 for_each_cpu(i)
1770 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1771 memcpy(newinfo->entries[i], entry1, newinfo->size);
1772
1773 *pinfo = newinfo;
1774 *pentry0 = entry1;
1775 xt_free_table_info(info);
1776 return 0;
1777
1778free_newinfo:
1779 xt_free_table_info(newinfo);
1780out:
1781 return ret;
1782out_unlock:
1783 xt_compat_unlock(AF_INET);
1784 goto out;
1785}
1786
1787static int
1788compat_do_replace(void __user *user, unsigned int len)
1789{
1790 int ret;
1791 struct compat_ipt_replace tmp;
1792 struct xt_table_info *newinfo;
1793 void *loc_cpu_entry;
1794
1795 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1796 return -EFAULT;
1797
1798 /* Hack: Causes ipchains to give correct error msg --RR */
1799 if (len != sizeof(tmp) + tmp.size)
1800 return -ENOPROTOOPT;
1801
1802 /* overflow check */
1803 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1804 SMP_CACHE_BYTES)
1805 return -ENOMEM;
1806 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1807 return -ENOMEM;
1808
1809 newinfo = xt_alloc_table_info(tmp.size);
1810 if (!newinfo)
1811 return -ENOMEM;
1812
1813 /* choose the copy that is our node/cpu */
1814 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1816 tmp.size) != 0) {
1817 ret = -EFAULT;
1818 goto free_newinfo;
1819 }
1820
1821 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1822 &newinfo, &loc_cpu_entry, tmp.size,
1823 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1824 if (ret != 0)
1825 goto free_newinfo;
1826
1827 duprintf("compat_do_replace: Translated table\n");
1828
1829 ret = __do_replace(tmp.name, tmp.valid_hooks,
1830 newinfo, tmp.num_counters,
1831 compat_ptr(tmp.counters));
1832 if (ret)
1833 goto free_newinfo_untrans;
1834 return 0;
1835
1836 free_newinfo_untrans:
1837 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1838 free_newinfo:
1839 xt_free_table_info(newinfo);
1840 return ret;
1841}
1842
1843static int
1844compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1845 unsigned int len)
1100{ 1846{
1101 int ret; 1847 int ret;
1102 1848
@@ -1105,11 +1851,11 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1105 1851
1106 switch (cmd) { 1852 switch (cmd) {
1107 case IPT_SO_SET_REPLACE: 1853 case IPT_SO_SET_REPLACE:
1108 ret = do_replace(user, len); 1854 ret = compat_do_replace(user, len);
1109 break; 1855 break;
1110 1856
1111 case IPT_SO_SET_ADD_COUNTERS: 1857 case IPT_SO_SET_ADD_COUNTERS:
1112 ret = do_add_counters(user, len); 1858 ret = do_add_counters(user, len, 1);
1113 break; 1859 break;
1114 1860
1115 default: 1861 default:
@@ -1120,75 +1866,196 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1120 return ret; 1866 return ret;
1121} 1867}
1122 1868
1869struct compat_ipt_get_entries
1870{
1871 char name[IPT_TABLE_MAXNAMELEN];
1872 compat_uint_t size;
1873 struct compat_ipt_entry entrytable[0];
1874};
1875
1876static int compat_copy_entries_to_user(unsigned int total_size,
1877 struct ipt_table *table, void __user *userptr)
1878{
1879 unsigned int off, num;
1880 struct compat_ipt_entry e;
1881 struct xt_counters *counters;
1882 struct xt_table_info *private = table->private;
1883 void __user *pos;
1884 unsigned int size;
1885 int ret = 0;
1886 void *loc_cpu_entry;
1887
1888 counters = alloc_counters(table);
1889 if (IS_ERR(counters))
1890 return PTR_ERR(counters);
1891
1892 /* choose the copy that is on our node/cpu, ...
1893 * This choice is lazy (because current thread is
1894 * allowed to migrate to another cpu)
1895 */
1896 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1897 pos = userptr;
1898 size = total_size;
1899 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1900 compat_copy_entry_to_user, &pos, &size);
1901 if (ret)
1902 goto free_counters;
1903
1904 /* ... then go back and fix counters and names */
1905 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1906 unsigned int i;
1907 struct ipt_entry_match m;
1908 struct ipt_entry_target t;
1909
1910 ret = -EFAULT;
1911 if (copy_from_user(&e, userptr + off,
1912 sizeof(struct compat_ipt_entry)))
1913 goto free_counters;
1914 if (copy_to_user(userptr + off +
1915 offsetof(struct compat_ipt_entry, counters),
1916 &counters[num], sizeof(counters[num])))
1917 goto free_counters;
1918
1919 for (i = sizeof(struct compat_ipt_entry);
1920 i < e.target_offset; i += m.u.match_size) {
1921 if (copy_from_user(&m, userptr + off + i,
1922 sizeof(struct ipt_entry_match)))
1923 goto free_counters;
1924 if (copy_to_user(userptr + off + i +
1925 offsetof(struct ipt_entry_match, u.user.name),
1926 m.u.kernel.match->name,
1927 strlen(m.u.kernel.match->name) + 1))
1928 goto free_counters;
1929 }
1930
1931 if (copy_from_user(&t, userptr + off + e.target_offset,
1932 sizeof(struct ipt_entry_target)))
1933 goto free_counters;
1934 if (copy_to_user(userptr + off + e.target_offset +
1935 offsetof(struct ipt_entry_target, u.user.name),
1936 t.u.kernel.target->name,
1937 strlen(t.u.kernel.target->name) + 1))
1938 goto free_counters;
1939 }
1940 ret = 0;
1941free_counters:
1942 vfree(counters);
1943 return ret;
1944}
1945
1123static int 1946static int
1124do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 1947compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1125{ 1948{
1126 int ret; 1949 int ret;
1950 struct compat_ipt_get_entries get;
1951 struct ipt_table *t;
1127 1952
1128 if (!capable(CAP_NET_ADMIN))
1129 return -EPERM;
1130 1953
1131 switch (cmd) { 1954 if (*len < sizeof(get)) {
1132 case IPT_SO_GET_INFO: { 1955 duprintf("compat_get_entries: %u < %u\n",
1133 char name[IPT_TABLE_MAXNAMELEN]; 1956 *len, (unsigned int)sizeof(get));
1134 struct ipt_table *t; 1957 return -EINVAL;
1958 }
1959
1960 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1961 return -EFAULT;
1962
1963 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1964 duprintf("compat_get_entries: %u != %u\n", *len,
1965 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1966 get.size));
1967 return -EINVAL;
1968 }
1135 1969
1136 if (*len != sizeof(struct ipt_getinfo)) { 1970 xt_compat_lock(AF_INET);
1137 duprintf("length %u != %u\n", *len, 1971 t = xt_find_table_lock(AF_INET, get.name);
1138 sizeof(struct ipt_getinfo)); 1972 if (t && !IS_ERR(t)) {
1973 struct xt_table_info *private = t->private;
1974 struct xt_table_info info;
1975 duprintf("t->private->number = %u\n",
1976 private->number);
1977 ret = compat_table_info(private, &info);
1978 if (!ret && get.size == info.size) {
1979 ret = compat_copy_entries_to_user(private->size,
1980 t, uptr->entrytable);
1981 } else if (!ret) {
1982 duprintf("compat_get_entries: I've got %u not %u!\n",
1983 private->size,
1984 get.size);
1139 ret = -EINVAL; 1985 ret = -EINVAL;
1140 break;
1141 } 1986 }
1987 compat_flush_offsets();
1988 module_put(t->me);
1989 xt_table_unlock(t);
1990 } else
1991 ret = t ? PTR_ERR(t) : -ENOENT;
1142 1992
1143 if (copy_from_user(name, user, sizeof(name)) != 0) { 1993 xt_compat_unlock(AF_INET);
1144 ret = -EFAULT; 1994 return ret;
1145 break; 1995}
1146 } 1996
1147 name[IPT_TABLE_MAXNAMELEN-1] = '\0'; 1997static int
1148 1998compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1149 t = try_then_request_module(xt_find_table_lock(AF_INET, name), 1999{
1150 "iptable_%s", name); 2000 int ret;
1151 if (t && !IS_ERR(t)) { 2001
1152 struct ipt_getinfo info; 2002 switch (cmd) {
1153 struct xt_table_info *private = t->private; 2003 case IPT_SO_GET_INFO:
1154 2004 ret = get_info(user, len, 1);
1155 info.valid_hooks = t->valid_hooks; 2005 break;
1156 memcpy(info.hook_entry, private->hook_entry, 2006 case IPT_SO_GET_ENTRIES:
1157 sizeof(info.hook_entry)); 2007 ret = compat_get_entries(user, len);
1158 memcpy(info.underflow, private->underflow, 2008 break;
1159 sizeof(info.underflow)); 2009 default:
1160 info.num_entries = private->number; 2010 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd);
1161 info.size = private->size; 2011 ret = -EINVAL;
1162 memcpy(info.name, name, sizeof(info.name));
1163
1164 if (copy_to_user(user, &info, *len) != 0)
1165 ret = -EFAULT;
1166 else
1167 ret = 0;
1168 xt_table_unlock(t);
1169 module_put(t->me);
1170 } else
1171 ret = t ? PTR_ERR(t) : -ENOENT;
1172 } 2012 }
1173 break; 2013 return ret;
2014}
2015#endif
1174 2016
1175 case IPT_SO_GET_ENTRIES: { 2017static int
1176 struct ipt_get_entries get; 2018do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2019{
2020 int ret;
1177 2021
1178 if (*len < sizeof(get)) { 2022 if (!capable(CAP_NET_ADMIN))
1179 duprintf("get_entries: %u < %u\n", *len, sizeof(get)); 2023 return -EPERM;
1180 ret = -EINVAL; 2024
1181 } else if (copy_from_user(&get, user, sizeof(get)) != 0) { 2025 switch (cmd) {
1182 ret = -EFAULT; 2026 case IPT_SO_SET_REPLACE:
1183 } else if (*len != sizeof(struct ipt_get_entries) + get.size) { 2027 ret = do_replace(user, len);
1184 duprintf("get_entries: %u != %u\n", *len,
1185 sizeof(struct ipt_get_entries) + get.size);
1186 ret = -EINVAL;
1187 } else
1188 ret = get_entries(&get, user);
1189 break; 2028 break;
2029
2030 case IPT_SO_SET_ADD_COUNTERS:
2031 ret = do_add_counters(user, len, 0);
2032 break;
2033
2034 default:
2035 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2036 ret = -EINVAL;
1190 } 2037 }
1191 2038
2039 return ret;
2040}
2041
2042static int
2043do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2044{
2045 int ret;
2046
2047 if (!capable(CAP_NET_ADMIN))
2048 return -EPERM;
2049
2050 switch (cmd) {
2051 case IPT_SO_GET_INFO:
2052 ret = get_info(user, len, 0);
2053 break;
2054
2055 case IPT_SO_GET_ENTRIES:
2056 ret = get_entries(user, len);
2057 break;
2058
1192 case IPT_SO_GET_REVISION_MATCH: 2059 case IPT_SO_GET_REVISION_MATCH:
1193 case IPT_SO_GET_REVISION_TARGET: { 2060 case IPT_SO_GET_REVISION_TARGET: {
1194 struct ipt_get_revision rev; 2061 struct ipt_get_revision rev;
@@ -1336,6 +2203,9 @@ static struct ipt_target ipt_standard_target = {
1336 .name = IPT_STANDARD_TARGET, 2203 .name = IPT_STANDARD_TARGET,
1337 .targetsize = sizeof(int), 2204 .targetsize = sizeof(int),
1338 .family = AF_INET, 2205 .family = AF_INET,
2206#ifdef CONFIG_COMPAT
2207 .compat = &compat_ipt_standard_fn,
2208#endif
1339}; 2209};
1340 2210
1341static struct ipt_target ipt_error_target = { 2211static struct ipt_target ipt_error_target = {
@@ -1350,9 +2220,15 @@ static struct nf_sockopt_ops ipt_sockopts = {
1350 .set_optmin = IPT_BASE_CTL, 2220 .set_optmin = IPT_BASE_CTL,
1351 .set_optmax = IPT_SO_SET_MAX+1, 2221 .set_optmax = IPT_SO_SET_MAX+1,
1352 .set = do_ipt_set_ctl, 2222 .set = do_ipt_set_ctl,
2223#ifdef CONFIG_COMPAT
2224 .compat_set = compat_do_ipt_set_ctl,
2225#endif
1353 .get_optmin = IPT_BASE_CTL, 2226 .get_optmin = IPT_BASE_CTL,
1354 .get_optmax = IPT_SO_GET_MAX+1, 2227 .get_optmax = IPT_SO_GET_MAX+1,
1355 .get = do_ipt_get_ctl, 2228 .get = do_ipt_get_ctl,
2229#ifdef CONFIG_COMPAT
2230 .compat_get = compat_do_ipt_get_ctl,
2231#endif
1356}; 2232};
1357 2233
1358static struct ipt_match icmp_matchstruct = { 2234static struct ipt_match icmp_matchstruct = {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a657ab5394c3..feb8a9e066b0 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -38,6 +38,7 @@ struct xt_af {
38 struct list_head match; 38 struct list_head match;
39 struct list_head target; 39 struct list_head target;
40 struct list_head tables; 40 struct list_head tables;
41 struct mutex compat_mutex;
41}; 42};
42 43
43static struct xt_af *xt; 44static struct xt_af *xt;
@@ -272,6 +273,54 @@ int xt_check_match(const struct xt_match *match, unsigned short family,
272} 273}
273EXPORT_SYMBOL_GPL(xt_check_match); 274EXPORT_SYMBOL_GPL(xt_check_match);
274 275
276#ifdef CONFIG_COMPAT
277int xt_compat_match(void *match, void **dstptr, int *size, int convert)
278{
279 struct xt_match *m;
280 struct compat_xt_entry_match *pcompat_m;
281 struct xt_entry_match *pm;
282 u_int16_t msize;
283 int off, ret;
284
285 ret = 0;
286 m = ((struct xt_entry_match *)match)->u.kernel.match;
287 off = XT_ALIGN(m->matchsize) - COMPAT_XT_ALIGN(m->matchsize);
288 switch (convert) {
289 case COMPAT_TO_USER:
290 pm = (struct xt_entry_match *)match;
291 msize = pm->u.user.match_size;
292 if (__copy_to_user(*dstptr, pm, msize)) {
293 ret = -EFAULT;
294 break;
295 }
296 msize -= off;
297 if (put_user(msize, (u_int16_t *)*dstptr))
298 ret = -EFAULT;
299 *size -= off;
300 *dstptr += msize;
301 break;
302 case COMPAT_FROM_USER:
303 pcompat_m = (struct compat_xt_entry_match *)match;
304 pm = (struct xt_entry_match *)*dstptr;
305 msize = pcompat_m->u.user.match_size;
306 memcpy(pm, pcompat_m, msize);
307 msize += off;
308 pm->u.user.match_size = msize;
309 *size += off;
310 *dstptr += msize;
311 break;
312 case COMPAT_CALC_SIZE:
313 *size += off;
314 break;
315 default:
316 ret = -ENOPROTOOPT;
317 break;
318 }
319 return ret;
320}
321EXPORT_SYMBOL_GPL(xt_compat_match);
322#endif
323
275int xt_check_target(const struct xt_target *target, unsigned short family, 324int xt_check_target(const struct xt_target *target, unsigned short family,
276 unsigned int size, const char *table, unsigned int hook_mask, 325 unsigned int size, const char *table, unsigned int hook_mask,
277 unsigned short proto, int inv_proto) 326 unsigned short proto, int inv_proto)
@@ -301,6 +350,54 @@ int xt_check_target(const struct xt_target *target, unsigned short family,
301} 350}
302EXPORT_SYMBOL_GPL(xt_check_target); 351EXPORT_SYMBOL_GPL(xt_check_target);
303 352
353#ifdef CONFIG_COMPAT
354int xt_compat_target(void *target, void **dstptr, int *size, int convert)
355{
356 struct xt_target *t;
357 struct compat_xt_entry_target *pcompat;
358 struct xt_entry_target *pt;
359 u_int16_t tsize;
360 int off, ret;
361
362 ret = 0;
363 t = ((struct xt_entry_target *)target)->u.kernel.target;
364 off = XT_ALIGN(t->targetsize) - COMPAT_XT_ALIGN(t->targetsize);
365 switch (convert) {
366 case COMPAT_TO_USER:
367 pt = (struct xt_entry_target *)target;
368 tsize = pt->u.user.target_size;
369 if (__copy_to_user(*dstptr, pt, tsize)) {
370 ret = -EFAULT;
371 break;
372 }
373 tsize -= off;
374 if (put_user(tsize, (u_int16_t *)*dstptr))
375 ret = -EFAULT;
376 *size -= off;
377 *dstptr += tsize;
378 break;
379 case COMPAT_FROM_USER:
380 pcompat = (struct compat_xt_entry_target *)target;
381 pt = (struct xt_entry_target *)*dstptr;
382 tsize = pcompat->u.user.target_size;
383 memcpy(pt, pcompat, tsize);
384 tsize += off;
385 pt->u.user.target_size = tsize;
386 *size += off;
387 *dstptr += tsize;
388 break;
389 case COMPAT_CALC_SIZE:
390 *size += off;
391 break;
392 default:
393 ret = -ENOPROTOOPT;
394 break;
395 }
396 return ret;
397}
398EXPORT_SYMBOL_GPL(xt_compat_target);
399#endif
400
304struct xt_table_info *xt_alloc_table_info(unsigned int size) 401struct xt_table_info *xt_alloc_table_info(unsigned int size)
305{ 402{
306 struct xt_table_info *newinfo; 403 struct xt_table_info *newinfo;
@@ -371,6 +468,19 @@ void xt_table_unlock(struct xt_table *table)
371} 468}
372EXPORT_SYMBOL_GPL(xt_table_unlock); 469EXPORT_SYMBOL_GPL(xt_table_unlock);
373 470
471#ifdef CONFIG_COMPAT
472void xt_compat_lock(int af)
473{
474 mutex_lock(&xt[af].compat_mutex);
475}
476EXPORT_SYMBOL_GPL(xt_compat_lock);
477
478void xt_compat_unlock(int af)
479{
480 mutex_unlock(&xt[af].compat_mutex);
481}
482EXPORT_SYMBOL_GPL(xt_compat_unlock);
483#endif
374 484
375struct xt_table_info * 485struct xt_table_info *
376xt_replace_table(struct xt_table *table, 486xt_replace_table(struct xt_table *table,
@@ -671,6 +781,9 @@ static int __init xt_init(void)
671 781
672 for (i = 0; i < NPROTO; i++) { 782 for (i = 0; i < NPROTO; i++) {
673 mutex_init(&xt[i].mutex); 783 mutex_init(&xt[i].mutex);
784#ifdef CONFIG_COMPAT
785 mutex_init(&xt[i].compat_mutex);
786#endif
674 INIT_LIST_HEAD(&xt[i].target); 787 INIT_LIST_HEAD(&xt[i].target);
675 INIT_LIST_HEAD(&xt[i].match); 788 INIT_LIST_HEAD(&xt[i].match);
676 INIT_LIST_HEAD(&xt[i].tables); 789 INIT_LIST_HEAD(&xt[i].tables);