aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/seccomp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/seccomp.c')
-rw-r--r--kernel/seccomp.c121
1 files changed, 59 insertions, 62 deletions
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index eda2da3df822..d8d046c0726a 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -55,60 +55,33 @@ struct seccomp_filter {
55 atomic_t usage; 55 atomic_t usage;
56 struct seccomp_filter *prev; 56 struct seccomp_filter *prev;
57 unsigned short len; /* Instruction count */ 57 unsigned short len; /* Instruction count */
58 struct sock_filter insns[]; 58 struct sock_filter_int insnsi[];
59}; 59};
60 60
61/* Limit any path through the tree to 256KB worth of instructions. */ 61/* Limit any path through the tree to 256KB worth of instructions. */
62#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) 62#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
63 63
64/** 64/*
65 * get_u32 - returns a u32 offset into data
66 * @data: a unsigned 64 bit value
67 * @index: 0 or 1 to return the first or second 32-bits
68 *
69 * This inline exists to hide the length of unsigned long. If a 32-bit
70 * unsigned long is passed in, it will be extended and the top 32-bits will be
71 * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
72 * properly returned.
73 *
74 * Endianness is explicitly ignored and left for BPF program authors to manage 65 * Endianness is explicitly ignored and left for BPF program authors to manage
75 * as per the specific architecture. 66 * as per the specific architecture.
76 */ 67 */
77static inline u32 get_u32(u64 data, int index) 68static void populate_seccomp_data(struct seccomp_data *sd)
78{ 69{
79 return ((u32 *)&data)[index]; 70 struct task_struct *task = current;
80} 71 struct pt_regs *regs = task_pt_regs(task);
81 72
82/* Helper for bpf_load below. */ 73 sd->nr = syscall_get_nr(task, regs);
83#define BPF_DATA(_name) offsetof(struct seccomp_data, _name) 74 sd->arch = syscall_get_arch();
84/** 75
85 * bpf_load: checks and returns a pointer to the requested offset 76 /* Unroll syscall_get_args to help gcc on arm. */
86 * @off: offset into struct seccomp_data to load from 77 syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]);
87 * 78 syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]);
88 * Returns the requested 32-bits of data. 79 syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]);
89 * seccomp_check_filter() should assure that @off is 32-bit aligned 80 syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]);
90 * and not out of bounds. Failure to do so is a BUG. 81 syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]);
91 */ 82 syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]);
92u32 seccomp_bpf_load(int off) 83
93{ 84 sd->instruction_pointer = KSTK_EIP(task);
94 struct pt_regs *regs = task_pt_regs(current);
95 if (off == BPF_DATA(nr))
96 return syscall_get_nr(current, regs);
97 if (off == BPF_DATA(arch))
98 return syscall_get_arch();
99 if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
100 unsigned long value;
101 int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
102 int index = !!(off % sizeof(u64));
103 syscall_get_arguments(current, regs, arg, 1, &value);
104 return get_u32(value, index);
105 }
106 if (off == BPF_DATA(instruction_pointer))
107 return get_u32(KSTK_EIP(current), 0);
108 if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
109 return get_u32(KSTK_EIP(current), 1);
110 /* seccomp_check_filter should make this impossible. */
111 BUG();
112} 85}
113 86
114/** 87/**
@@ -133,17 +106,17 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
133 106
134 switch (code) { 107 switch (code) {
135 case BPF_S_LD_W_ABS: 108 case BPF_S_LD_W_ABS:
136 ftest->code = BPF_S_ANC_SECCOMP_LD_W; 109 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
137 /* 32-bit aligned and not out of bounds. */ 110 /* 32-bit aligned and not out of bounds. */
138 if (k >= sizeof(struct seccomp_data) || k & 3) 111 if (k >= sizeof(struct seccomp_data) || k & 3)
139 return -EINVAL; 112 return -EINVAL;
140 continue; 113 continue;
141 case BPF_S_LD_W_LEN: 114 case BPF_S_LD_W_LEN:
142 ftest->code = BPF_S_LD_IMM; 115 ftest->code = BPF_LD | BPF_IMM;
143 ftest->k = sizeof(struct seccomp_data); 116 ftest->k = sizeof(struct seccomp_data);
144 continue; 117 continue;
145 case BPF_S_LDX_W_LEN: 118 case BPF_S_LDX_W_LEN:
146 ftest->code = BPF_S_LDX_IMM; 119 ftest->code = BPF_LDX | BPF_IMM;
147 ftest->k = sizeof(struct seccomp_data); 120 ftest->k = sizeof(struct seccomp_data);
148 continue; 121 continue;
149 /* Explicitly include allowed calls. */ 122 /* Explicitly include allowed calls. */
@@ -185,6 +158,7 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
185 case BPF_S_JMP_JGT_X: 158 case BPF_S_JMP_JGT_X:
186 case BPF_S_JMP_JSET_K: 159 case BPF_S_JMP_JSET_K:
187 case BPF_S_JMP_JSET_X: 160 case BPF_S_JMP_JSET_X:
161 sk_decode_filter(ftest, ftest);
188 continue; 162 continue;
189 default: 163 default:
190 return -EINVAL; 164 return -EINVAL;
@@ -202,18 +176,21 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
202static u32 seccomp_run_filters(int syscall) 176static u32 seccomp_run_filters(int syscall)
203{ 177{
204 struct seccomp_filter *f; 178 struct seccomp_filter *f;
179 struct seccomp_data sd;
205 u32 ret = SECCOMP_RET_ALLOW; 180 u32 ret = SECCOMP_RET_ALLOW;
206 181
207 /* Ensure unexpected behavior doesn't result in failing open. */ 182 /* Ensure unexpected behavior doesn't result in failing open. */
208 if (WARN_ON(current->seccomp.filter == NULL)) 183 if (WARN_ON(current->seccomp.filter == NULL))
209 return SECCOMP_RET_KILL; 184 return SECCOMP_RET_KILL;
210 185
186 populate_seccomp_data(&sd);
187
211 /* 188 /*
212 * All filters in the list are evaluated and the lowest BPF return 189 * All filters in the list are evaluated and the lowest BPF return
213 * value always takes priority (ignoring the DATA). 190 * value always takes priority (ignoring the DATA).
214 */ 191 */
215 for (f = current->seccomp.filter; f; f = f->prev) { 192 for (f = current->seccomp.filter; f; f = f->prev) {
216 u32 cur_ret = sk_run_filter(NULL, f->insns); 193 u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi);
217 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) 194 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
218 ret = cur_ret; 195 ret = cur_ret;
219 } 196 }
@@ -231,6 +208,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
231 struct seccomp_filter *filter; 208 struct seccomp_filter *filter;
232 unsigned long fp_size = fprog->len * sizeof(struct sock_filter); 209 unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
233 unsigned long total_insns = fprog->len; 210 unsigned long total_insns = fprog->len;
211 struct sock_filter *fp;
212 int new_len;
234 long ret; 213 long ret;
235 214
236 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) 215 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
@@ -252,28 +231,43 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
252 CAP_SYS_ADMIN) != 0) 231 CAP_SYS_ADMIN) != 0)
253 return -EACCES; 232 return -EACCES;
254 233
255 /* Allocate a new seccomp_filter */ 234 fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
256 filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, 235 if (!fp)
257 GFP_KERNEL|__GFP_NOWARN);
258 if (!filter)
259 return -ENOMEM; 236 return -ENOMEM;
260 atomic_set(&filter->usage, 1);
261 filter->len = fprog->len;
262 237
263 /* Copy the instructions from fprog. */ 238 /* Copy the instructions from fprog. */
264 ret = -EFAULT; 239 ret = -EFAULT;
265 if (copy_from_user(filter->insns, fprog->filter, fp_size)) 240 if (copy_from_user(fp, fprog->filter, fp_size))
266 goto fail; 241 goto free_prog;
267 242
268 /* Check and rewrite the fprog via the skb checker */ 243 /* Check and rewrite the fprog via the skb checker */
269 ret = sk_chk_filter(filter->insns, filter->len); 244 ret = sk_chk_filter(fp, fprog->len);
270 if (ret) 245 if (ret)
271 goto fail; 246 goto free_prog;
272 247
273 /* Check and rewrite the fprog for seccomp use */ 248 /* Check and rewrite the fprog for seccomp use */
274 ret = seccomp_check_filter(filter->insns, filter->len); 249 ret = seccomp_check_filter(fp, fprog->len);
250 if (ret)
251 goto free_prog;
252
253 /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
254 ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
255 if (ret)
256 goto free_prog;
257
258 /* Allocate a new seccomp_filter */
259 filter = kzalloc(sizeof(struct seccomp_filter) +
260 sizeof(struct sock_filter_int) * new_len,
261 GFP_KERNEL|__GFP_NOWARN);
262 if (!filter)
263 goto free_prog;
264
265 ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len);
275 if (ret) 266 if (ret)
276 goto fail; 267 goto free_filter;
268
269 atomic_set(&filter->usage, 1);
270 filter->len = new_len;
277 271
278 /* 272 /*
279 * If there is an existing filter, make it the prev and don't drop its 273 * If there is an existing filter, make it the prev and don't drop its
@@ -282,8 +276,11 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
282 filter->prev = current->seccomp.filter; 276 filter->prev = current->seccomp.filter;
283 current->seccomp.filter = filter; 277 current->seccomp.filter = filter;
284 return 0; 278 return 0;
285fail: 279
280free_filter:
286 kfree(filter); 281 kfree(filter);
282free_prog:
283 kfree(fp);
287 return ret; 284 return ret;
288} 285}
289 286
@@ -293,7 +290,7 @@ fail:
293 * 290 *
294 * Returns 0 on success and non-zero otherwise. 291 * Returns 0 on success and non-zero otherwise.
295 */ 292 */
296long seccomp_attach_user_filter(char __user *user_filter) 293static long seccomp_attach_user_filter(char __user *user_filter)
297{ 294{
298 struct sock_fprog fprog; 295 struct sock_fprog fprog;
299 long ret = -EFAULT; 296 long ret = -EFAULT;