aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-12-01 15:46:24 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-06 15:59:09 -0500
commit2d5311e4e8272fd398fc1cf278f12fd6dee4074b (patch)
treed86fcab7044baa6ec41ef87b8eca186ae6f9ea89 /net/core
parentae9c416d686db74f67d73c1bebf1e3a7e8b3c5b5 (diff)
filter: add a security check at install time
We added some security checks in commit 57fe93b374a6 (filter: make sure filters dont read uninitialized memory) to close a potential leak of kernel information to user. This added a potential extra cost at run time, while we can perform a check of the filter itself, to make sure a malicious user doesnt try to abuse us. This patch adds a check_loads() function, whole unique purpose is to make this check, allocating a temporary array of mask. We scan the filter and propagate a bitmask information, telling us if a load M(K) is allowed because a previous store M(K) is guaranteed. (So that sk_run_filter() can possibly not read unitialized memory) Note: this can uncover application bug, denying a filter attach, previously allowed. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Dan Rosenberg <drosenberg@vsecurity.com> Cc: Changli Gao <xiaosuo@gmail.com> Acked-by: Changli Gao <xiaosuo@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c72
1 files changed, 63 insertions, 9 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 054e286861d2..ac4920a87be5 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -166,11 +166,9 @@ unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry
166 u32 A = 0; /* Accumulator */ 166 u32 A = 0; /* Accumulator */
167 u32 X = 0; /* Index Register */ 167 u32 X = 0; /* Index Register */
168 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 168 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
169 unsigned long memvalid = 0;
170 u32 tmp; 169 u32 tmp;
171 int k; 170 int k;
172 171
173 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
174 /* 172 /*
175 * Process array of filter instructions. 173 * Process array of filter instructions.
176 */ 174 */
@@ -318,12 +316,10 @@ load_b:
318 X = K; 316 X = K;
319 continue; 317 continue;
320 case BPF_S_LD_MEM: 318 case BPF_S_LD_MEM:
321 A = (memvalid & (1UL << K)) ? 319 A = mem[K];
322 mem[K] : 0;
323 continue; 320 continue;
324 case BPF_S_LDX_MEM: 321 case BPF_S_LDX_MEM:
325 X = (memvalid & (1UL << K)) ? 322 X = mem[K];
326 mem[K] : 0;
327 continue; 323 continue;
328 case BPF_S_MISC_TAX: 324 case BPF_S_MISC_TAX:
329 X = A; 325 X = A;
@@ -336,11 +332,9 @@ load_b:
336 case BPF_S_RET_A: 332 case BPF_S_RET_A:
337 return A; 333 return A;
338 case BPF_S_ST: 334 case BPF_S_ST:
339 memvalid |= 1UL << K;
340 mem[K] = A; 335 mem[K] = A;
341 continue; 336 continue;
342 case BPF_S_STX: 337 case BPF_S_STX:
343 memvalid |= 1UL << K;
344 mem[K] = X; 338 mem[K] = X;
345 continue; 339 continue;
346 default: 340 default:
@@ -425,6 +419,66 @@ load_b:
425} 419}
426EXPORT_SYMBOL(sk_run_filter); 420EXPORT_SYMBOL(sk_run_filter);
427 421
422/*
423 * Security :
424 * A BPF program is able to use 16 cells of memory to store intermediate
425 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
426 * As we dont want to clear mem[] array for each packet going through
427 * sk_run_filter(), we check that filter loaded by user never try to read
428 * a cell if not previously written, and we check all branches to be sure
429 * a malicious user doesnt try to abuse us.
430 */
431static int check_load_and_stores(struct sock_filter *filter, int flen)
432{
433 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
434 int pc, ret = 0;
435
436 BUILD_BUG_ON(BPF_MEMWORDS > 16);
437 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
438 if (!masks)
439 return -ENOMEM;
440 memset(masks, 0xff, flen * sizeof(*masks));
441
442 for (pc = 0; pc < flen; pc++) {
443 memvalid &= masks[pc];
444
445 switch (filter[pc].code) {
446 case BPF_S_ST:
447 case BPF_S_STX:
448 memvalid |= (1 << filter[pc].k);
449 break;
450 case BPF_S_LD_MEM:
451 case BPF_S_LDX_MEM:
452 if (!(memvalid & (1 << filter[pc].k))) {
453 ret = -EINVAL;
454 goto error;
455 }
456 break;
457 case BPF_S_JMP_JA:
458 /* a jump must set masks on target */
459 masks[pc + 1 + filter[pc].k] &= memvalid;
460 memvalid = ~0;
461 break;
462 case BPF_S_JMP_JEQ_K:
463 case BPF_S_JMP_JEQ_X:
464 case BPF_S_JMP_JGE_K:
465 case BPF_S_JMP_JGE_X:
466 case BPF_S_JMP_JGT_K:
467 case BPF_S_JMP_JGT_X:
468 case BPF_S_JMP_JSET_X:
469 case BPF_S_JMP_JSET_K:
470 /* a jump must set masks on targets */
471 masks[pc + 1 + filter[pc].jt] &= memvalid;
472 masks[pc + 1 + filter[pc].jf] &= memvalid;
473 memvalid = ~0;
474 break;
475 }
476 }
477error:
478 kfree(masks);
479 return ret;
480}
481
428/** 482/**
429 * sk_chk_filter - verify socket filter code 483 * sk_chk_filter - verify socket filter code
430 * @filter: filter to verify 484 * @filter: filter to verify
@@ -553,7 +607,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
553 switch (filter[flen - 1].code) { 607 switch (filter[flen - 1].code) {
554 case BPF_S_RET_K: 608 case BPF_S_RET_K:
555 case BPF_S_RET_A: 609 case BPF_S_RET_A:
556 return 0; 610 return check_load_and_stores(filter, flen);
557 } 611 }
558 return -EINVAL; 612 return -EINVAL;
559} 613}