diff options
Diffstat (limited to 'security/yama/yama_lsm.c')
| -rw-r--r-- | security/yama/yama_lsm.c | 100 |
1 files changed, 71 insertions, 29 deletions
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index b4c29848b49d..23414b93771f 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
| 18 | #include <linux/prctl.h> | 18 | #include <linux/prctl.h> |
| 19 | #include <linux/ratelimit.h> | 19 | #include <linux/ratelimit.h> |
| 20 | #include <linux/workqueue.h> | ||
| 20 | 21 | ||
| 21 | #define YAMA_SCOPE_DISABLED 0 | 22 | #define YAMA_SCOPE_DISABLED 0 |
| 22 | #define YAMA_SCOPE_RELATIONAL 1 | 23 | #define YAMA_SCOPE_RELATIONAL 1 |
| @@ -29,12 +30,37 @@ static int ptrace_scope = YAMA_SCOPE_RELATIONAL; | |||
| 29 | struct ptrace_relation { | 30 | struct ptrace_relation { |
| 30 | struct task_struct *tracer; | 31 | struct task_struct *tracer; |
| 31 | struct task_struct *tracee; | 32 | struct task_struct *tracee; |
| 33 | bool invalid; | ||
| 32 | struct list_head node; | 34 | struct list_head node; |
| 35 | struct rcu_head rcu; | ||
| 33 | }; | 36 | }; |
| 34 | 37 | ||
| 35 | static LIST_HEAD(ptracer_relations); | 38 | static LIST_HEAD(ptracer_relations); |
| 36 | static DEFINE_SPINLOCK(ptracer_relations_lock); | 39 | static DEFINE_SPINLOCK(ptracer_relations_lock); |
| 37 | 40 | ||
| 41 | static void yama_relation_cleanup(struct work_struct *work); | ||
| 42 | static DECLARE_WORK(yama_relation_work, yama_relation_cleanup); | ||
| 43 | |||
| 44 | /** | ||
| 45 | * yama_relation_cleanup - remove invalid entries from the relation list | ||
| 46 | * | ||
| 47 | */ | ||
| 48 | static void yama_relation_cleanup(struct work_struct *work) | ||
| 49 | { | ||
| 50 | struct ptrace_relation *relation; | ||
| 51 | |||
| 52 | spin_lock(&ptracer_relations_lock); | ||
| 53 | rcu_read_lock(); | ||
| 54 | list_for_each_entry_rcu(relation, &ptracer_relations, node) { | ||
| 55 | if (relation->invalid) { | ||
| 56 | list_del_rcu(&relation->node); | ||
| 57 | kfree_rcu(relation, rcu); | ||
| 58 | } | ||
| 59 | } | ||
| 60 | rcu_read_unlock(); | ||
| 61 | spin_unlock(&ptracer_relations_lock); | ||
| 62 | } | ||
| 63 | |||
| 38 | /** | 64 | /** |
| 39 | * yama_ptracer_add - add/replace an exception for this tracer/tracee pair | 65 | * yama_ptracer_add - add/replace an exception for this tracer/tracee pair |
| 40 | * @tracer: the task_struct of the process doing the ptrace | 66 | * @tracer: the task_struct of the process doing the ptrace |
| @@ -48,32 +74,34 @@ static DEFINE_SPINLOCK(ptracer_relations_lock); | |||
| 48 | static int yama_ptracer_add(struct task_struct *tracer, | 74 | static int yama_ptracer_add(struct task_struct *tracer, |
| 49 | struct task_struct *tracee) | 75 | struct task_struct *tracee) |
| 50 | { | 76 | { |
| 51 | int rc = 0; | 77 | struct ptrace_relation *relation, *added; |
| 52 | struct ptrace_relation *added; | ||
| 53 | struct ptrace_relation *entry, *relation = NULL; | ||
| 54 | 78 | ||
| 55 | added = kmalloc(sizeof(*added), GFP_KERNEL); | 79 | added = kmalloc(sizeof(*added), GFP_KERNEL); |
| 56 | if (!added) | 80 | if (!added) |
| 57 | return -ENOMEM; | 81 | return -ENOMEM; |
| 58 | 82 | ||
| 59 | spin_lock_bh(&ptracer_relations_lock); | 83 | added->tracee = tracee; |
| 60 | list_for_each_entry(entry, &ptracer_relations, node) | 84 | added->tracer = tracer; |
| 61 | if (entry->tracee == tracee) { | 85 | added->invalid = false; |
| 62 | relation = entry; | 86 | |
| 63 | break; | 87 | spin_lock(&ptracer_relations_lock); |
| 88 | rcu_read_lock(); | ||
| 89 | list_for_each_entry_rcu(relation, &ptracer_relations, node) { | ||
| 90 | if (relation->invalid) | ||
| 91 | continue; | ||
| 92 | if (relation->tracee == tracee) { | ||
| 93 | list_replace_rcu(&relation->node, &added->node); | ||
| 94 | kfree_rcu(relation, rcu); | ||
| 95 | goto out; | ||
| 64 | } | 96 | } |
| 65 | if (!relation) { | ||
| 66 | relation = added; | ||
| 67 | relation->tracee = tracee; | ||
| 68 | list_add(&relation->node, &ptracer_relations); | ||
| 69 | } | 97 | } |
| 70 | relation->tracer = tracer; | ||
| 71 | 98 | ||
| 72 | spin_unlock_bh(&ptracer_relations_lock); | 99 | list_add_rcu(&added->node, &ptracer_relations); |
| 73 | if (added != relation) | ||
| 74 | kfree(added); | ||
| 75 | 100 | ||
| 76 | return rc; | 101 | out: |
| 102 | rcu_read_unlock(); | ||
| 103 | spin_unlock(&ptracer_relations_lock); | ||
| 104 | return 0; | ||
| 77 | } | 105 | } |
| 78 | 106 | ||
| 79 | /** | 107 | /** |
| @@ -84,16 +112,23 @@ static int yama_ptracer_add(struct task_struct *tracer, | |||
| 84 | static void yama_ptracer_del(struct task_struct *tracer, | 112 | static void yama_ptracer_del(struct task_struct *tracer, |
| 85 | struct task_struct *tracee) | 113 | struct task_struct *tracee) |
| 86 | { | 114 | { |
| 87 | struct ptrace_relation *relation, *safe; | 115 | struct ptrace_relation *relation; |
| 116 | bool marked = false; | ||
| 88 | 117 | ||
| 89 | spin_lock_bh(&ptracer_relations_lock); | 118 | rcu_read_lock(); |
| 90 | list_for_each_entry_safe(relation, safe, &ptracer_relations, node) | 119 | list_for_each_entry_rcu(relation, &ptracer_relations, node) { |
| 120 | if (relation->invalid) | ||
| 121 | continue; | ||
| 91 | if (relation->tracee == tracee || | 122 | if (relation->tracee == tracee || |
| 92 | (tracer && relation->tracer == tracer)) { | 123 | (tracer && relation->tracer == tracer)) { |
| 93 | list_del(&relation->node); | 124 | relation->invalid = true; |
| 94 | kfree(relation); | 125 | marked = true; |
| 95 | } | 126 | } |
| 96 | spin_unlock_bh(&ptracer_relations_lock); | 127 | } |
| 128 | rcu_read_unlock(); | ||
| 129 | |||
| 130 | if (marked) | ||
| 131 | schedule_work(&yama_relation_work); | ||
| 97 | } | 132 | } |
| 98 | 133 | ||
| 99 | /** | 134 | /** |
| @@ -217,21 +252,22 @@ static int ptracer_exception_found(struct task_struct *tracer, | |||
| 217 | struct task_struct *parent = NULL; | 252 | struct task_struct *parent = NULL; |
| 218 | bool found = false; | 253 | bool found = false; |
| 219 | 254 | ||
| 220 | spin_lock_bh(&ptracer_relations_lock); | ||
| 221 | rcu_read_lock(); | 255 | rcu_read_lock(); |
| 222 | if (!thread_group_leader(tracee)) | 256 | if (!thread_group_leader(tracee)) |
| 223 | tracee = rcu_dereference(tracee->group_leader); | 257 | tracee = rcu_dereference(tracee->group_leader); |
| 224 | list_for_each_entry(relation, &ptracer_relations, node) | 258 | list_for_each_entry_rcu(relation, &ptracer_relations, node) { |
| 259 | if (relation->invalid) | ||
| 260 | continue; | ||
| 225 | if (relation->tracee == tracee) { | 261 | if (relation->tracee == tracee) { |
| 226 | parent = relation->tracer; | 262 | parent = relation->tracer; |
| 227 | found = true; | 263 | found = true; |
| 228 | break; | 264 | break; |
| 229 | } | 265 | } |
| 266 | } | ||
| 230 | 267 | ||
| 231 | if (found && (parent == NULL || task_is_descendant(parent, tracer))) | 268 | if (found && (parent == NULL || task_is_descendant(parent, tracer))) |
| 232 | rc = 1; | 269 | rc = 1; |
| 233 | rcu_read_unlock(); | 270 | rcu_read_unlock(); |
| 234 | spin_unlock_bh(&ptracer_relations_lock); | ||
| 235 | 271 | ||
| 236 | return rc; | 272 | return rc; |
| 237 | } | 273 | } |
| @@ -262,14 +298,18 @@ int yama_ptrace_access_check(struct task_struct *child, | |||
| 262 | /* No additional restrictions. */ | 298 | /* No additional restrictions. */ |
| 263 | break; | 299 | break; |
| 264 | case YAMA_SCOPE_RELATIONAL: | 300 | case YAMA_SCOPE_RELATIONAL: |
| 301 | rcu_read_lock(); | ||
| 265 | if (!task_is_descendant(current, child) && | 302 | if (!task_is_descendant(current, child) && |
| 266 | !ptracer_exception_found(current, child) && | 303 | !ptracer_exception_found(current, child) && |
| 267 | !ns_capable(task_user_ns(child), CAP_SYS_PTRACE)) | 304 | !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) |
| 268 | rc = -EPERM; | 305 | rc = -EPERM; |
| 306 | rcu_read_unlock(); | ||
| 269 | break; | 307 | break; |
| 270 | case YAMA_SCOPE_CAPABILITY: | 308 | case YAMA_SCOPE_CAPABILITY: |
| 271 | if (!ns_capable(task_user_ns(child), CAP_SYS_PTRACE)) | 309 | rcu_read_lock(); |
| 310 | if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) | ||
| 272 | rc = -EPERM; | 311 | rc = -EPERM; |
| 312 | rcu_read_unlock(); | ||
| 273 | break; | 313 | break; |
| 274 | case YAMA_SCOPE_NO_ATTACH: | 314 | case YAMA_SCOPE_NO_ATTACH: |
| 275 | default: | 315 | default: |
| @@ -307,8 +347,10 @@ int yama_ptrace_traceme(struct task_struct *parent) | |||
| 307 | /* Only disallow PTRACE_TRACEME on more aggressive settings. */ | 347 | /* Only disallow PTRACE_TRACEME on more aggressive settings. */ |
| 308 | switch (ptrace_scope) { | 348 | switch (ptrace_scope) { |
| 309 | case YAMA_SCOPE_CAPABILITY: | 349 | case YAMA_SCOPE_CAPABILITY: |
| 310 | if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE)) | 350 | rcu_read_lock(); |
| 351 | if (!ns_capable(__task_cred(parent)->user_ns, CAP_SYS_PTRACE)) | ||
| 311 | rc = -EPERM; | 352 | rc = -EPERM; |
| 353 | rcu_read_unlock(); | ||
| 312 | break; | 354 | break; |
| 313 | case YAMA_SCOPE_NO_ATTACH: | 355 | case YAMA_SCOPE_NO_ATTACH: |
| 314 | rc = -EPERM; | 356 | rc = -EPERM; |
