aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2014-05-29 04:22:50 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-02 01:16:58 -0400
commit3480593131e0b781287dae0139bf7ccee7cba7ff (patch)
tree6e259a45b3767bd80b789814e4d484ee0ac069bf /net
parentd50bc1575096250aa37f17299c86ea548156efe8 (diff)
net: filter: get rid of BPF_S_* enum
This patch finally allows us to get rid of the BPF_S_* enum. Currently, the code performs unnecessary encode and decode workarounds in seccomp and filter migration itself when a filter is being attached in order to overcome BPF_S_* encoding which is not used anymore by the new interpreter resp. JIT compilers. Keeping it around would mean that also in future we would need to extend and maintain this enum and related encoders/decoders. We can get rid of all that and save us these operations during filter attaching. Naturally, also JIT compilers need to be updated by this. Before JIT conversion is being done, each compiler checks if A is being loaded at startup to obtain information if it needs to emit instructions to clear A first. Since BPF extensions are a subset of BPF_LD | BPF_{W,H,B} | BPF_ABS variants, case statements for extensions can be removed at that point. To ease and minimalize code changes in the classic JITs, we have introduced bpf_anc_helper(). Tested with test_bpf on x86_64 (JIT, int), s390x (JIT, int), arm (JIT, int), i368 (int), ppc64 (JIT, int); for sparc we unfortunately didn't have access, but changes are analogous to the rest. Joint work with Alexei Starovoitov. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Mircea Gherzan <mgherzan@gmail.com> Cc: Kees Cook <keescook@chromium.org> Acked-by: Chema Gonzalez <chemag@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c341
1 files changed, 125 insertions, 216 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 2c2d35d9d101..328aaf6ff4d1 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -536,11 +536,13 @@ load_word:
536 * Output: 536 * Output:
537 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness 537 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
538 */ 538 */
539
539 ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); 540 ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
540 if (likely(ptr != NULL)) { 541 if (likely(ptr != NULL)) {
541 BPF_R0 = get_unaligned_be32(ptr); 542 BPF_R0 = get_unaligned_be32(ptr);
542 CONT; 543 CONT;
543 } 544 }
545
544 return 0; 546 return 0;
545 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ 547 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */
546 off = K; 548 off = K;
@@ -550,6 +552,7 @@ load_half:
550 BPF_R0 = get_unaligned_be16(ptr); 552 BPF_R0 = get_unaligned_be16(ptr);
551 CONT; 553 CONT;
552 } 554 }
555
553 return 0; 556 return 0;
554 LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ 557 LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */
555 off = K; 558 off = K;
@@ -559,6 +562,7 @@ load_byte:
559 BPF_R0 = *(u8 *)ptr; 562 BPF_R0 = *(u8 *)ptr;
560 CONT; 563 CONT;
561 } 564 }
565
562 return 0; 566 return 0;
563 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ 567 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */
564 off = K + X; 568 off = K + X;
@@ -1136,44 +1140,46 @@ err:
1136 */ 1140 */
1137static int check_load_and_stores(struct sock_filter *filter, int flen) 1141static int check_load_and_stores(struct sock_filter *filter, int flen)
1138{ 1142{
1139 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ 1143 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
1140 int pc, ret = 0; 1144 int pc, ret = 0;
1141 1145
1142 BUILD_BUG_ON(BPF_MEMWORDS > 16); 1146 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1147
1143 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); 1148 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
1144 if (!masks) 1149 if (!masks)
1145 return -ENOMEM; 1150 return -ENOMEM;
1151
1146 memset(masks, 0xff, flen * sizeof(*masks)); 1152 memset(masks, 0xff, flen * sizeof(*masks));
1147 1153
1148 for (pc = 0; pc < flen; pc++) { 1154 for (pc = 0; pc < flen; pc++) {
1149 memvalid &= masks[pc]; 1155 memvalid &= masks[pc];
1150 1156
1151 switch (filter[pc].code) { 1157 switch (filter[pc].code) {
1152 case BPF_S_ST: 1158 case BPF_ST:
1153 case BPF_S_STX: 1159 case BPF_STX:
1154 memvalid |= (1 << filter[pc].k); 1160 memvalid |= (1 << filter[pc].k);
1155 break; 1161 break;
1156 case BPF_S_LD_MEM: 1162 case BPF_LD | BPF_MEM:
1157 case BPF_S_LDX_MEM: 1163 case BPF_LDX | BPF_MEM:
1158 if (!(memvalid & (1 << filter[pc].k))) { 1164 if (!(memvalid & (1 << filter[pc].k))) {
1159 ret = -EINVAL; 1165 ret = -EINVAL;
1160 goto error; 1166 goto error;
1161 } 1167 }
1162 break; 1168 break;
1163 case BPF_S_JMP_JA: 1169 case BPF_JMP | BPF_JA:
1164 /* a jump must set masks on target */ 1170 /* A jump must set masks on target */
1165 masks[pc + 1 + filter[pc].k] &= memvalid; 1171 masks[pc + 1 + filter[pc].k] &= memvalid;
1166 memvalid = ~0; 1172 memvalid = ~0;
1167 break; 1173 break;
1168 case BPF_S_JMP_JEQ_K: 1174 case BPF_JMP | BPF_JEQ | BPF_K:
1169 case BPF_S_JMP_JEQ_X: 1175 case BPF_JMP | BPF_JEQ | BPF_X:
1170 case BPF_S_JMP_JGE_K: 1176 case BPF_JMP | BPF_JGE | BPF_K:
1171 case BPF_S_JMP_JGE_X: 1177 case BPF_JMP | BPF_JGE | BPF_X:
1172 case BPF_S_JMP_JGT_K: 1178 case BPF_JMP | BPF_JGT | BPF_K:
1173 case BPF_S_JMP_JGT_X: 1179 case BPF_JMP | BPF_JGT | BPF_X:
1174 case BPF_S_JMP_JSET_X: 1180 case BPF_JMP | BPF_JSET | BPF_K:
1175 case BPF_S_JMP_JSET_K: 1181 case BPF_JMP | BPF_JSET | BPF_X:
1176 /* a jump must set masks on targets */ 1182 /* A jump must set masks on targets */
1177 masks[pc + 1 + filter[pc].jt] &= memvalid; 1183 masks[pc + 1 + filter[pc].jt] &= memvalid;
1178 masks[pc + 1 + filter[pc].jf] &= memvalid; 1184 masks[pc + 1 + filter[pc].jf] &= memvalid;
1179 memvalid = ~0; 1185 memvalid = ~0;
@@ -1185,6 +1191,72 @@ error:
1185 return ret; 1191 return ret;
1186} 1192}
1187 1193
1194static bool chk_code_allowed(u16 code_to_probe)
1195{
1196 static const bool codes[] = {
1197 /* 32 bit ALU operations */
1198 [BPF_ALU | BPF_ADD | BPF_K] = true,
1199 [BPF_ALU | BPF_ADD | BPF_X] = true,
1200 [BPF_ALU | BPF_SUB | BPF_K] = true,
1201 [BPF_ALU | BPF_SUB | BPF_X] = true,
1202 [BPF_ALU | BPF_MUL | BPF_K] = true,
1203 [BPF_ALU | BPF_MUL | BPF_X] = true,
1204 [BPF_ALU | BPF_DIV | BPF_K] = true,
1205 [BPF_ALU | BPF_DIV | BPF_X] = true,
1206 [BPF_ALU | BPF_MOD | BPF_K] = true,
1207 [BPF_ALU | BPF_MOD | BPF_X] = true,
1208 [BPF_ALU | BPF_AND | BPF_K] = true,
1209 [BPF_ALU | BPF_AND | BPF_X] = true,
1210 [BPF_ALU | BPF_OR | BPF_K] = true,
1211 [BPF_ALU | BPF_OR | BPF_X] = true,
1212 [BPF_ALU | BPF_XOR | BPF_K] = true,
1213 [BPF_ALU | BPF_XOR | BPF_X] = true,
1214 [BPF_ALU | BPF_LSH | BPF_K] = true,
1215 [BPF_ALU | BPF_LSH | BPF_X] = true,
1216 [BPF_ALU | BPF_RSH | BPF_K] = true,
1217 [BPF_ALU | BPF_RSH | BPF_X] = true,
1218 [BPF_ALU | BPF_NEG] = true,
1219 /* Load instructions */
1220 [BPF_LD | BPF_W | BPF_ABS] = true,
1221 [BPF_LD | BPF_H | BPF_ABS] = true,
1222 [BPF_LD | BPF_B | BPF_ABS] = true,
1223 [BPF_LD | BPF_W | BPF_LEN] = true,
1224 [BPF_LD | BPF_W | BPF_IND] = true,
1225 [BPF_LD | BPF_H | BPF_IND] = true,
1226 [BPF_LD | BPF_B | BPF_IND] = true,
1227 [BPF_LD | BPF_IMM] = true,
1228 [BPF_LD | BPF_MEM] = true,
1229 [BPF_LDX | BPF_W | BPF_LEN] = true,
1230 [BPF_LDX | BPF_B | BPF_MSH] = true,
1231 [BPF_LDX | BPF_IMM] = true,
1232 [BPF_LDX | BPF_MEM] = true,
1233 /* Store instructions */
1234 [BPF_ST] = true,
1235 [BPF_STX] = true,
1236 /* Misc instructions */
1237 [BPF_MISC | BPF_TAX] = true,
1238 [BPF_MISC | BPF_TXA] = true,
1239 /* Return instructions */
1240 [BPF_RET | BPF_K] = true,
1241 [BPF_RET | BPF_A] = true,
1242 /* Jump instructions */
1243 [BPF_JMP | BPF_JA] = true,
1244 [BPF_JMP | BPF_JEQ | BPF_K] = true,
1245 [BPF_JMP | BPF_JEQ | BPF_X] = true,
1246 [BPF_JMP | BPF_JGE | BPF_K] = true,
1247 [BPF_JMP | BPF_JGE | BPF_X] = true,
1248 [BPF_JMP | BPF_JGT | BPF_K] = true,
1249 [BPF_JMP | BPF_JGT | BPF_X] = true,
1250 [BPF_JMP | BPF_JSET | BPF_K] = true,
1251 [BPF_JMP | BPF_JSET | BPF_X] = true,
1252 };
1253
1254 if (code_to_probe >= ARRAY_SIZE(codes))
1255 return false;
1256
1257 return codes[code_to_probe];
1258}
1259
1188/** 1260/**
1189 * sk_chk_filter - verify socket filter code 1261 * sk_chk_filter - verify socket filter code
1190 * @filter: filter to verify 1262 * @filter: filter to verify
@@ -1201,154 +1273,76 @@ error:
1201 */ 1273 */
1202int sk_chk_filter(struct sock_filter *filter, unsigned int flen) 1274int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
1203{ 1275{
1204 /*
1205 * Valid instructions are initialized to non-0.
1206 * Invalid instructions are initialized to 0.
1207 */
1208 static const u8 codes[] = {
1209 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
1210 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
1211 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
1212 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
1213 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
1214 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
1215 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
1216 [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
1217 [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
1218 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
1219 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
1220 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
1221 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
1222 [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
1223 [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
1224 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
1225 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
1226 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
1227 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
1228 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
1229 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
1230 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
1231 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
1232 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
1233 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
1234 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
1235 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
1236 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
1237 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
1238 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
1239 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
1240 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
1241 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
1242 [BPF_RET|BPF_K] = BPF_S_RET_K,
1243 [BPF_RET|BPF_A] = BPF_S_RET_A,
1244 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
1245 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
1246 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
1247 [BPF_ST] = BPF_S_ST,
1248 [BPF_STX] = BPF_S_STX,
1249 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
1250 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
1251 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
1252 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
1253 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
1254 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
1255 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
1256 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
1257 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
1258 };
1259 int pc;
1260 bool anc_found; 1276 bool anc_found;
1277 int pc;
1261 1278
1262 if (flen == 0 || flen > BPF_MAXINSNS) 1279 if (flen == 0 || flen > BPF_MAXINSNS)
1263 return -EINVAL; 1280 return -EINVAL;
1264 1281
1265 /* check the filter code now */ 1282 /* Check the filter code now */
1266 for (pc = 0; pc < flen; pc++) { 1283 for (pc = 0; pc < flen; pc++) {
1267 struct sock_filter *ftest = &filter[pc]; 1284 struct sock_filter *ftest = &filter[pc];
1268 u16 code = ftest->code;
1269 1285
1270 if (code >= ARRAY_SIZE(codes)) 1286 /* May we actually operate on this code? */
1271 return -EINVAL; 1287 if (!chk_code_allowed(ftest->code))
1272 code = codes[code];
1273 if (!code)
1274 return -EINVAL; 1288 return -EINVAL;
1289
1275 /* Some instructions need special checks */ 1290 /* Some instructions need special checks */
1276 switch (code) { 1291 switch (ftest->code) {
1277 case BPF_S_ALU_DIV_K: 1292 case BPF_ALU | BPF_DIV | BPF_K:
1278 case BPF_S_ALU_MOD_K: 1293 case BPF_ALU | BPF_MOD | BPF_K:
1279 /* check for division by zero */ 1294 /* Check for division by zero */
1280 if (ftest->k == 0) 1295 if (ftest->k == 0)
1281 return -EINVAL; 1296 return -EINVAL;
1282 break; 1297 break;
1283 case BPF_S_LD_MEM: 1298 case BPF_LD | BPF_MEM:
1284 case BPF_S_LDX_MEM: 1299 case BPF_LDX | BPF_MEM:
1285 case BPF_S_ST: 1300 case BPF_ST:
1286 case BPF_S_STX: 1301 case BPF_STX:
1287 /* check for invalid memory addresses */ 1302 /* Check for invalid memory addresses */
1288 if (ftest->k >= BPF_MEMWORDS) 1303 if (ftest->k >= BPF_MEMWORDS)
1289 return -EINVAL; 1304 return -EINVAL;
1290 break; 1305 break;
1291 case BPF_S_JMP_JA: 1306 case BPF_JMP | BPF_JA:
1292 /* 1307 /* Note, the large ftest->k might cause loops.
1293 * Note, the large ftest->k might cause loops.
1294 * Compare this with conditional jumps below, 1308 * Compare this with conditional jumps below,
1295 * where offsets are limited. --ANK (981016) 1309 * where offsets are limited. --ANK (981016)
1296 */ 1310 */
1297 if (ftest->k >= (unsigned int)(flen-pc-1)) 1311 if (ftest->k >= (unsigned int)(flen - pc - 1))
1298 return -EINVAL; 1312 return -EINVAL;
1299 break; 1313 break;
1300 case BPF_S_JMP_JEQ_K: 1314 case BPF_JMP | BPF_JEQ | BPF_K:
1301 case BPF_S_JMP_JEQ_X: 1315 case BPF_JMP | BPF_JEQ | BPF_X:
1302 case BPF_S_JMP_JGE_K: 1316 case BPF_JMP | BPF_JGE | BPF_K:
1303 case BPF_S_JMP_JGE_X: 1317 case BPF_JMP | BPF_JGE | BPF_X:
1304 case BPF_S_JMP_JGT_K: 1318 case BPF_JMP | BPF_JGT | BPF_K:
1305 case BPF_S_JMP_JGT_X: 1319 case BPF_JMP | BPF_JGT | BPF_X:
1306 case BPF_S_JMP_JSET_X: 1320 case BPF_JMP | BPF_JSET | BPF_K:
1307 case BPF_S_JMP_JSET_K: 1321 case BPF_JMP | BPF_JSET | BPF_X:
1308 /* for conditionals both must be safe */ 1322 /* Both conditionals must be safe */
1309 if (pc + ftest->jt + 1 >= flen || 1323 if (pc + ftest->jt + 1 >= flen ||
1310 pc + ftest->jf + 1 >= flen) 1324 pc + ftest->jf + 1 >= flen)
1311 return -EINVAL; 1325 return -EINVAL;
1312 break; 1326 break;
1313 case BPF_S_LD_W_ABS: 1327 case BPF_LD | BPF_W | BPF_ABS:
1314 case BPF_S_LD_H_ABS: 1328 case BPF_LD | BPF_H | BPF_ABS:
1315 case BPF_S_LD_B_ABS: 1329 case BPF_LD | BPF_B | BPF_ABS:
1316 anc_found = false; 1330 anc_found = false;
1317#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ 1331 if (bpf_anc_helper(ftest) & BPF_ANC)
1318 code = BPF_S_ANC_##CODE; \ 1332 anc_found = true;
1319 anc_found = true; \ 1333 /* Ancillary operation unknown or unsupported */
1320 break
1321 switch (ftest->k) {
1322 ANCILLARY(PROTOCOL);
1323 ANCILLARY(PKTTYPE);
1324 ANCILLARY(IFINDEX);
1325 ANCILLARY(NLATTR);
1326 ANCILLARY(NLATTR_NEST);
1327 ANCILLARY(MARK);
1328 ANCILLARY(QUEUE);
1329 ANCILLARY(HATYPE);
1330 ANCILLARY(RXHASH);
1331 ANCILLARY(CPU);
1332 ANCILLARY(ALU_XOR_X);
1333 ANCILLARY(VLAN_TAG);
1334 ANCILLARY(VLAN_TAG_PRESENT);
1335 ANCILLARY(PAY_OFFSET);
1336 ANCILLARY(RANDOM);
1337 }
1338
1339 /* ancillary operation unknown or unsupported */
1340 if (anc_found == false && ftest->k >= SKF_AD_OFF) 1334 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1341 return -EINVAL; 1335 return -EINVAL;
1342 } 1336 }
1343 ftest->code = code;
1344 } 1337 }
1345 1338
1346 /* last instruction must be a RET code */ 1339 /* Last instruction must be a RET code */
1347 switch (filter[flen - 1].code) { 1340 switch (filter[flen - 1].code) {
1348 case BPF_S_RET_K: 1341 case BPF_RET | BPF_K:
1349 case BPF_S_RET_A: 1342 case BPF_RET | BPF_A:
1350 return check_load_and_stores(filter, flen); 1343 return check_load_and_stores(filter, flen);
1351 } 1344 }
1345
1352 return -EINVAL; 1346 return -EINVAL;
1353} 1347}
1354EXPORT_SYMBOL(sk_chk_filter); 1348EXPORT_SYMBOL(sk_chk_filter);
@@ -1448,7 +1442,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1448{ 1442{
1449 struct sock_filter *old_prog; 1443 struct sock_filter *old_prog;
1450 struct sk_filter *old_fp; 1444 struct sk_filter *old_fp;
1451 int i, err, new_len, old_len = fp->len; 1445 int err, new_len, old_len = fp->len;
1452 1446
1453 /* We are free to overwrite insns et al right here as it 1447 /* We are free to overwrite insns et al right here as it
1454 * won't be used at this point in time anymore internally 1448 * won't be used at this point in time anymore internally
@@ -1458,13 +1452,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1458 BUILD_BUG_ON(sizeof(struct sock_filter) != 1452 BUILD_BUG_ON(sizeof(struct sock_filter) !=
1459 sizeof(struct sock_filter_int)); 1453 sizeof(struct sock_filter_int));
1460 1454
1461 /* For now, we need to unfiddle BPF_S_* identifiers in place.
1462 * This can sooner or later on be subject to removal, e.g. when
1463 * JITs have been converted.
1464 */
1465 for (i = 0; i < fp->len; i++)
1466 sk_decode_filter(&fp->insns[i], &fp->insns[i]);
1467
1468 /* Conversion cannot happen on overlapping memory areas, 1455 /* Conversion cannot happen on overlapping memory areas,
1469 * so we need to keep the user BPF around until the 2nd 1456 * so we need to keep the user BPF around until the 2nd
1470 * pass. At this time, the user BPF is stored in fp->insns. 1457 * pass. At this time, the user BPF is stored in fp->insns.
@@ -1706,84 +1693,6 @@ int sk_detach_filter(struct sock *sk)
1706} 1693}
1707EXPORT_SYMBOL_GPL(sk_detach_filter); 1694EXPORT_SYMBOL_GPL(sk_detach_filter);
1708 1695
1709void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
1710{
1711 static const u16 decodes[] = {
1712 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
1713 [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
1714 [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
1715 [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
1716 [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
1717 [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
1718 [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
1719 [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
1720 [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
1721 [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
1722 [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
1723 [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
1724 [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
1725 [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
1726 [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
1727 [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
1728 [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
1729 [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
1730 [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
1731 [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
1732 [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
1733 [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
1734 [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
1735 [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
1736 [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
1737 [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
1738 [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
1739 [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
1740 [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
1741 [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
1742 [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
1743 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
1744 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
1745 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
1746 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
1747 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
1748 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
1749 [BPF_S_ANC_RANDOM] = BPF_LD|BPF_B|BPF_ABS,
1750 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
1751 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
1752 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
1753 [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
1754 [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
1755 [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
1756 [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
1757 [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
1758 [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
1759 [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
1760 [BPF_S_RET_K] = BPF_RET|BPF_K,
1761 [BPF_S_RET_A] = BPF_RET|BPF_A,
1762 [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
1763 [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
1764 [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
1765 [BPF_S_ST] = BPF_ST,
1766 [BPF_S_STX] = BPF_STX,
1767 [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
1768 [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
1769 [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
1770 [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
1771 [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
1772 [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
1773 [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
1774 [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
1775 [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
1776 };
1777 u16 code;
1778
1779 code = filt->code;
1780
1781 to->code = decodes[code];
1782 to->jt = filt->jt;
1783 to->jf = filt->jf;
1784 to->k = filt->k;
1785}
1786
1787int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, 1696int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1788 unsigned int len) 1697 unsigned int len)
1789{ 1698{