aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-06-10 14:47:26 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-06-10 14:47:26 -0400
commitf0cd91a68acdc9b49d7f6738b514a426da627649 (patch)
tree8ad73564015794197583b094217ae0a71e71e753 /kernel
parent60eef25701d25e99c991dd0f4a9f3832a0c3ad3e (diff)
parent128e6ced247cda88f96fa9f2e4ba8b2c4a681560 (diff)
Merge ../linux-2.6
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c160
-rw-r--r--kernel/audit.h10
-rw-r--r--kernel/auditfilter.c289
-rw-r--r--kernel/auditsc.c269
-rw-r--r--kernel/cpuset.c25
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/hrtimer.c10
-rw-r--r--kernel/irq/manage.c6
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/module.c12
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/pm.c20
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/ptrace.c64
-rw-r--r--kernel/rcupdate.c23
-rw-r--r--kernel/sched.c64
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/timer.c20
-rw-r--r--kernel/uid16.c59
-rw-r--r--kernel/workqueue.c2
25 files changed, 736 insertions, 343 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index c8ccbd09048f..df57b493e1cb 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -55,6 +55,9 @@
55#include <net/netlink.h> 55#include <net/netlink.h>
56#include <linux/skbuff.h> 56#include <linux/skbuff.h>
57#include <linux/netlink.h> 57#include <linux/netlink.h>
58#include <linux/selinux.h>
59
60#include "audit.h"
58 61
59/* No auditing will take place until audit_initialized != 0. 62/* No auditing will take place until audit_initialized != 0.
60 * (Initialization happens after skb_init is called.) */ 63 * (Initialization happens after skb_init is called.) */
@@ -227,49 +230,103 @@ void audit_log_lost(const char *message)
227 } 230 }
228} 231}
229 232
230static int audit_set_rate_limit(int limit, uid_t loginuid) 233static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sid)
231{ 234{
232 int old = audit_rate_limit; 235 int old = audit_rate_limit;
233 audit_rate_limit = limit; 236
234 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, 237 if (sid) {
238 char *ctx = NULL;
239 u32 len;
240 int rc;
241 if ((rc = selinux_ctxid_to_string(sid, &ctx, &len)))
242 return rc;
243 else
244 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
245 "audit_rate_limit=%d old=%d by auid=%u subj=%s",
246 limit, old, loginuid, ctx);
247 kfree(ctx);
248 } else
249 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
235 "audit_rate_limit=%d old=%d by auid=%u", 250 "audit_rate_limit=%d old=%d by auid=%u",
236 audit_rate_limit, old, loginuid); 251 limit, old, loginuid);
252 audit_rate_limit = limit;
237 return old; 253 return old;
238} 254}
239 255
240static int audit_set_backlog_limit(int limit, uid_t loginuid) 256static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sid)
241{ 257{
242 int old = audit_backlog_limit; 258 int old = audit_backlog_limit;
243 audit_backlog_limit = limit; 259
244 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, 260 if (sid) {
261 char *ctx = NULL;
262 u32 len;
263 int rc;
264 if ((rc = selinux_ctxid_to_string(sid, &ctx, &len)))
265 return rc;
266 else
267 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
268 "audit_backlog_limit=%d old=%d by auid=%u subj=%s",
269 limit, old, loginuid, ctx);
270 kfree(ctx);
271 } else
272 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
245 "audit_backlog_limit=%d old=%d by auid=%u", 273 "audit_backlog_limit=%d old=%d by auid=%u",
246 audit_backlog_limit, old, loginuid); 274 limit, old, loginuid);
275 audit_backlog_limit = limit;
247 return old; 276 return old;
248} 277}
249 278
250static int audit_set_enabled(int state, uid_t loginuid) 279static int audit_set_enabled(int state, uid_t loginuid, u32 sid)
251{ 280{
252 int old = audit_enabled; 281 int old = audit_enabled;
282
253 if (state != 0 && state != 1) 283 if (state != 0 && state != 1)
254 return -EINVAL; 284 return -EINVAL;
255 audit_enabled = state; 285
256 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, 286 if (sid) {
287 char *ctx = NULL;
288 u32 len;
289 int rc;
290 if ((rc = selinux_ctxid_to_string(sid, &ctx, &len)))
291 return rc;
292 else
293 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
294 "audit_enabled=%d old=%d by auid=%u subj=%s",
295 state, old, loginuid, ctx);
296 kfree(ctx);
297 } else
298 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
257 "audit_enabled=%d old=%d by auid=%u", 299 "audit_enabled=%d old=%d by auid=%u",
258 audit_enabled, old, loginuid); 300 state, old, loginuid);
301 audit_enabled = state;
259 return old; 302 return old;
260} 303}
261 304
262static int audit_set_failure(int state, uid_t loginuid) 305static int audit_set_failure(int state, uid_t loginuid, u32 sid)
263{ 306{
264 int old = audit_failure; 307 int old = audit_failure;
308
265 if (state != AUDIT_FAIL_SILENT 309 if (state != AUDIT_FAIL_SILENT
266 && state != AUDIT_FAIL_PRINTK 310 && state != AUDIT_FAIL_PRINTK
267 && state != AUDIT_FAIL_PANIC) 311 && state != AUDIT_FAIL_PANIC)
268 return -EINVAL; 312 return -EINVAL;
269 audit_failure = state; 313
270 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, 314 if (sid) {
315 char *ctx = NULL;
316 u32 len;
317 int rc;
318 if ((rc = selinux_ctxid_to_string(sid, &ctx, &len)))
319 return rc;
320 else
321 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
322 "audit_failure=%d old=%d by auid=%u subj=%s",
323 state, old, loginuid, ctx);
324 kfree(ctx);
325 } else
326 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
271 "audit_failure=%d old=%d by auid=%u", 327 "audit_failure=%d old=%d by auid=%u",
272 audit_failure, old, loginuid); 328 state, old, loginuid);
329 audit_failure = state;
273 return old; 330 return old;
274} 331}
275 332
@@ -387,7 +444,7 @@ static int audit_netlink_ok(kernel_cap_t eff_cap, u16 msg_type)
387 444
388static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 445static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
389{ 446{
390 u32 uid, pid, seq; 447 u32 uid, pid, seq, sid;
391 void *data; 448 void *data;
392 struct audit_status *status_get, status_set; 449 struct audit_status *status_get, status_set;
393 int err; 450 int err;
@@ -413,6 +470,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
413 pid = NETLINK_CREDS(skb)->pid; 470 pid = NETLINK_CREDS(skb)->pid;
414 uid = NETLINK_CREDS(skb)->uid; 471 uid = NETLINK_CREDS(skb)->uid;
415 loginuid = NETLINK_CB(skb).loginuid; 472 loginuid = NETLINK_CB(skb).loginuid;
473 sid = NETLINK_CB(skb).sid;
416 seq = nlh->nlmsg_seq; 474 seq = nlh->nlmsg_seq;
417 data = NLMSG_DATA(nlh); 475 data = NLMSG_DATA(nlh);
418 476
@@ -433,25 +491,43 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
433 return -EINVAL; 491 return -EINVAL;
434 status_get = (struct audit_status *)data; 492 status_get = (struct audit_status *)data;
435 if (status_get->mask & AUDIT_STATUS_ENABLED) { 493 if (status_get->mask & AUDIT_STATUS_ENABLED) {
436 err = audit_set_enabled(status_get->enabled, loginuid); 494 err = audit_set_enabled(status_get->enabled,
495 loginuid, sid);
437 if (err < 0) return err; 496 if (err < 0) return err;
438 } 497 }
439 if (status_get->mask & AUDIT_STATUS_FAILURE) { 498 if (status_get->mask & AUDIT_STATUS_FAILURE) {
440 err = audit_set_failure(status_get->failure, loginuid); 499 err = audit_set_failure(status_get->failure,
500 loginuid, sid);
441 if (err < 0) return err; 501 if (err < 0) return err;
442 } 502 }
443 if (status_get->mask & AUDIT_STATUS_PID) { 503 if (status_get->mask & AUDIT_STATUS_PID) {
444 int old = audit_pid; 504 int old = audit_pid;
505 if (sid) {
506 char *ctx = NULL;
507 u32 len;
508 int rc;
509 if ((rc = selinux_ctxid_to_string(
510 sid, &ctx, &len)))
511 return rc;
512 else
513 audit_log(NULL, GFP_KERNEL,
514 AUDIT_CONFIG_CHANGE,
515 "audit_pid=%d old=%d by auid=%u subj=%s",
516 status_get->pid, old,
517 loginuid, ctx);
518 kfree(ctx);
519 } else
520 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
521 "audit_pid=%d old=%d by auid=%u",
522 status_get->pid, old, loginuid);
445 audit_pid = status_get->pid; 523 audit_pid = status_get->pid;
446 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
447 "audit_pid=%d old=%d by auid=%u",
448 audit_pid, old, loginuid);
449 } 524 }
450 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) 525 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT)
451 audit_set_rate_limit(status_get->rate_limit, loginuid); 526 audit_set_rate_limit(status_get->rate_limit,
527 loginuid, sid);
452 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) 528 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
453 audit_set_backlog_limit(status_get->backlog_limit, 529 audit_set_backlog_limit(status_get->backlog_limit,
454 loginuid); 530 loginuid, sid);
455 break; 531 break;
456 case AUDIT_USER: 532 case AUDIT_USER:
457 case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG: 533 case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG:
@@ -465,8 +541,23 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
465 ab = audit_log_start(NULL, GFP_KERNEL, msg_type); 541 ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
466 if (ab) { 542 if (ab) {
467 audit_log_format(ab, 543 audit_log_format(ab,
468 "user pid=%d uid=%u auid=%u msg='%.1024s'", 544 "user pid=%d uid=%u auid=%u",
469 pid, uid, loginuid, (char *)data); 545 pid, uid, loginuid);
546 if (sid) {
547 char *ctx = NULL;
548 u32 len;
549 if (selinux_ctxid_to_string(
550 sid, &ctx, &len)) {
551 audit_log_format(ab,
552 " ssid=%u", sid);
553 /* Maybe call audit_panic? */
554 } else
555 audit_log_format(ab,
556 " subj=%s", ctx);
557 kfree(ctx);
558 }
559 audit_log_format(ab, " msg='%.1024s'",
560 (char *)data);
470 audit_set_pid(ab, pid); 561 audit_set_pid(ab, pid);
471 audit_log_end(ab); 562 audit_log_end(ab);
472 } 563 }
@@ -480,7 +571,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
480 case AUDIT_LIST: 571 case AUDIT_LIST:
481 err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid, 572 err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
482 uid, seq, data, nlmsg_len(nlh), 573 uid, seq, data, nlmsg_len(nlh),
483 loginuid); 574 loginuid, sid);
484 break; 575 break;
485 case AUDIT_ADD_RULE: 576 case AUDIT_ADD_RULE:
486 case AUDIT_DEL_RULE: 577 case AUDIT_DEL_RULE:
@@ -490,7 +581,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
490 case AUDIT_LIST_RULES: 581 case AUDIT_LIST_RULES:
491 err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid, 582 err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
492 uid, seq, data, nlmsg_len(nlh), 583 uid, seq, data, nlmsg_len(nlh),
493 loginuid); 584 loginuid, sid);
494 break; 585 break;
495 case AUDIT_SIGNAL_INFO: 586 case AUDIT_SIGNAL_INFO:
496 sig_data.uid = audit_sig_uid; 587 sig_data.uid = audit_sig_uid;
@@ -564,6 +655,11 @@ static int __init audit_init(void)
564 skb_queue_head_init(&audit_skb_queue); 655 skb_queue_head_init(&audit_skb_queue);
565 audit_initialized = 1; 656 audit_initialized = 1;
566 audit_enabled = audit_default; 657 audit_enabled = audit_default;
658
659 /* Register the callback with selinux. This callback will be invoked
660 * when a new policy is loaded. */
661 selinux_audit_set_callback(&selinux_audit_rule_update);
662
567 audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); 663 audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
568 return 0; 664 return 0;
569} 665}
diff --git a/kernel/audit.h b/kernel/audit.h
index bc5392076e2b..6f733920fd32 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -54,9 +54,11 @@ enum audit_state {
54 54
55/* Rule lists */ 55/* Rule lists */
56struct audit_field { 56struct audit_field {
57 u32 type; 57 u32 type;
58 u32 val; 58 u32 val;
59 u32 op; 59 u32 op;
60 char *se_str;
61 struct selinux_audit_rule *se_rule;
60}; 62};
61 63
62struct audit_krule { 64struct audit_krule {
@@ -86,3 +88,5 @@ extern void audit_send_reply(int pid, int seq, int type,
86extern void audit_log_lost(const char *message); 88extern void audit_log_lost(const char *message);
87extern void audit_panic(const char *message); 89extern void audit_panic(const char *message);
88extern struct mutex audit_netlink_mutex; 90extern struct mutex audit_netlink_mutex;
91
92extern int selinux_audit_rule_update(void);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index d3a8539f3a83..7c134906d689 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -23,6 +23,7 @@
23#include <linux/audit.h> 23#include <linux/audit.h>
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/netlink.h> 25#include <linux/netlink.h>
26#include <linux/selinux.h>
26#include "audit.h" 27#include "audit.h"
27 28
28/* There are three lists of rules -- one to search at task creation 29/* There are three lists of rules -- one to search at task creation
@@ -42,6 +43,13 @@ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = {
42 43
43static inline void audit_free_rule(struct audit_entry *e) 44static inline void audit_free_rule(struct audit_entry *e)
44{ 45{
46 int i;
47 if (e->rule.fields)
48 for (i = 0; i < e->rule.field_count; i++) {
49 struct audit_field *f = &e->rule.fields[i];
50 kfree(f->se_str);
51 selinux_audit_rule_free(f->se_rule);
52 }
45 kfree(e->rule.fields); 53 kfree(e->rule.fields);
46 kfree(e); 54 kfree(e);
47} 55}
@@ -52,9 +60,29 @@ static inline void audit_free_rule_rcu(struct rcu_head *head)
52 audit_free_rule(e); 60 audit_free_rule(e);
53} 61}
54 62
63/* Initialize an audit filterlist entry. */
64static inline struct audit_entry *audit_init_entry(u32 field_count)
65{
66 struct audit_entry *entry;
67 struct audit_field *fields;
68
69 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
70 if (unlikely(!entry))
71 return NULL;
72
73 fields = kzalloc(sizeof(*fields) * field_count, GFP_KERNEL);
74 if (unlikely(!fields)) {
75 kfree(entry);
76 return NULL;
77 }
78 entry->rule.fields = fields;
79
80 return entry;
81}
82
55/* Unpack a filter field's string representation from user-space 83/* Unpack a filter field's string representation from user-space
56 * buffer. */ 84 * buffer. */
57static __attribute__((unused)) char *audit_unpack_string(void **bufp, size_t *remain, size_t len) 85static char *audit_unpack_string(void **bufp, size_t *remain, size_t len)
58{ 86{
59 char *str; 87 char *str;
60 88
@@ -84,7 +112,6 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
84{ 112{
85 unsigned listnr; 113 unsigned listnr;
86 struct audit_entry *entry; 114 struct audit_entry *entry;
87 struct audit_field *fields;
88 int i, err; 115 int i, err;
89 116
90 err = -EINVAL; 117 err = -EINVAL;
@@ -108,23 +135,14 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
108 goto exit_err; 135 goto exit_err;
109 136
110 err = -ENOMEM; 137 err = -ENOMEM;
111 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 138 entry = audit_init_entry(rule->field_count);
112 if (unlikely(!entry)) 139 if (!entry)
113 goto exit_err;
114 fields = kmalloc(sizeof(*fields) * rule->field_count, GFP_KERNEL);
115 if (unlikely(!fields)) {
116 kfree(entry);
117 goto exit_err; 140 goto exit_err;
118 }
119
120 memset(&entry->rule, 0, sizeof(struct audit_krule));
121 memset(fields, 0, sizeof(struct audit_field));
122 141
123 entry->rule.flags = rule->flags & AUDIT_FILTER_PREPEND; 142 entry->rule.flags = rule->flags & AUDIT_FILTER_PREPEND;
124 entry->rule.listnr = listnr; 143 entry->rule.listnr = listnr;
125 entry->rule.action = rule->action; 144 entry->rule.action = rule->action;
126 entry->rule.field_count = rule->field_count; 145 entry->rule.field_count = rule->field_count;
127 entry->rule.fields = fields;
128 146
129 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) 147 for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
130 entry->rule.mask[i] = rule->mask[i]; 148 entry->rule.mask[i] = rule->mask[i];
@@ -150,15 +168,20 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
150 for (i = 0; i < rule->field_count; i++) { 168 for (i = 0; i < rule->field_count; i++) {
151 struct audit_field *f = &entry->rule.fields[i]; 169 struct audit_field *f = &entry->rule.fields[i];
152 170
153 if (rule->fields[i] & AUDIT_UNUSED_BITS) {
154 err = -EINVAL;
155 goto exit_free;
156 }
157
158 f->op = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS); 171 f->op = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS);
159 f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS); 172 f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS);
160 f->val = rule->values[i]; 173 f->val = rule->values[i];
161 174
175 if (f->type & AUDIT_UNUSED_BITS ||
176 f->type == AUDIT_SE_USER ||
177 f->type == AUDIT_SE_ROLE ||
178 f->type == AUDIT_SE_TYPE ||
179 f->type == AUDIT_SE_SEN ||
180 f->type == AUDIT_SE_CLR) {
181 err = -EINVAL;
182 goto exit_free;
183 }
184
162 entry->rule.vers_ops = (f->op & AUDIT_OPERATORS) ? 2 : 1; 185 entry->rule.vers_ops = (f->op & AUDIT_OPERATORS) ? 2 : 1;
163 186
164 /* Support for legacy operators where 187 /* Support for legacy operators where
@@ -188,8 +211,9 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
188 int err = 0; 211 int err = 0;
189 struct audit_entry *entry; 212 struct audit_entry *entry;
190 void *bufp; 213 void *bufp;
191 /* size_t remain = datasz - sizeof(struct audit_rule_data); */ 214 size_t remain = datasz - sizeof(struct audit_rule_data);
192 int i; 215 int i;
216 char *str;
193 217
194 entry = audit_to_entry_common((struct audit_rule *)data); 218 entry = audit_to_entry_common((struct audit_rule *)data);
195 if (IS_ERR(entry)) 219 if (IS_ERR(entry))
@@ -207,10 +231,35 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
207 231
208 f->op = data->fieldflags[i] & AUDIT_OPERATORS; 232 f->op = data->fieldflags[i] & AUDIT_OPERATORS;
209 f->type = data->fields[i]; 233 f->type = data->fields[i];
234 f->val = data->values[i];
235 f->se_str = NULL;
236 f->se_rule = NULL;
210 switch(f->type) { 237 switch(f->type) {
211 /* call type-specific conversion routines here */ 238 case AUDIT_SE_USER:
212 default: 239 case AUDIT_SE_ROLE:
213 f->val = data->values[i]; 240 case AUDIT_SE_TYPE:
241 case AUDIT_SE_SEN:
242 case AUDIT_SE_CLR:
243 str = audit_unpack_string(&bufp, &remain, f->val);
244 if (IS_ERR(str))
245 goto exit_free;
246 entry->rule.buflen += f->val;
247
248 err = selinux_audit_rule_init(f->type, f->op, str,
249 &f->se_rule);
250 /* Keep currently invalid fields around in case they
251 * become valid after a policy reload. */
252 if (err == -EINVAL) {
253 printk(KERN_WARNING "audit rule for selinux "
254 "\'%s\' is invalid\n", str);
255 err = 0;
256 }
257 if (err) {
258 kfree(str);
259 goto exit_free;
260 } else
261 f->se_str = str;
262 break;
214 } 263 }
215 } 264 }
216 265
@@ -286,7 +335,14 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
286 data->fields[i] = f->type; 335 data->fields[i] = f->type;
287 data->fieldflags[i] = f->op; 336 data->fieldflags[i] = f->op;
288 switch(f->type) { 337 switch(f->type) {
289 /* call type-specific conversion routines here */ 338 case AUDIT_SE_USER:
339 case AUDIT_SE_ROLE:
340 case AUDIT_SE_TYPE:
341 case AUDIT_SE_SEN:
342 case AUDIT_SE_CLR:
343 data->buflen += data->values[i] =
344 audit_pack_string(&bufp, f->se_str);
345 break;
290 default: 346 default:
291 data->values[i] = f->val; 347 data->values[i] = f->val;
292 } 348 }
@@ -314,7 +370,14 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
314 return 1; 370 return 1;
315 371
316 switch(a->fields[i].type) { 372 switch(a->fields[i].type) {
317 /* call type-specific comparison routines here */ 373 case AUDIT_SE_USER:
374 case AUDIT_SE_ROLE:
375 case AUDIT_SE_TYPE:
376 case AUDIT_SE_SEN:
377 case AUDIT_SE_CLR:
378 if (strcmp(a->fields[i].se_str, b->fields[i].se_str))
379 return 1;
380 break;
318 default: 381 default:
319 if (a->fields[i].val != b->fields[i].val) 382 if (a->fields[i].val != b->fields[i].val)
320 return 1; 383 return 1;
@@ -328,6 +391,81 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
328 return 0; 391 return 0;
329} 392}
330 393
394/* Duplicate selinux field information. The se_rule is opaque, so must be
395 * re-initialized. */
396static inline int audit_dupe_selinux_field(struct audit_field *df,
397 struct audit_field *sf)
398{
399 int ret = 0;
400 char *se_str;
401
402 /* our own copy of se_str */
403 se_str = kstrdup(sf->se_str, GFP_KERNEL);
404 if (unlikely(IS_ERR(se_str)))
405 return -ENOMEM;
406 df->se_str = se_str;
407
408 /* our own (refreshed) copy of se_rule */
409 ret = selinux_audit_rule_init(df->type, df->op, df->se_str,
410 &df->se_rule);
411 /* Keep currently invalid fields around in case they
412 * become valid after a policy reload. */
413 if (ret == -EINVAL) {
414 printk(KERN_WARNING "audit rule for selinux \'%s\' is "
415 "invalid\n", df->se_str);
416 ret = 0;
417 }
418
419 return ret;
420}
421
422/* Duplicate an audit rule. This will be a deep copy with the exception
423 * of the watch - that pointer is carried over. The selinux specific fields
424 * will be updated in the copy. The point is to be able to replace the old
425 * rule with the new rule in the filterlist, then free the old rule. */
426static struct audit_entry *audit_dupe_rule(struct audit_krule *old)
427{
428 u32 fcount = old->field_count;
429 struct audit_entry *entry;
430 struct audit_krule *new;
431 int i, err = 0;
432
433 entry = audit_init_entry(fcount);
434 if (unlikely(!entry))
435 return ERR_PTR(-ENOMEM);
436
437 new = &entry->rule;
438 new->vers_ops = old->vers_ops;
439 new->flags = old->flags;
440 new->listnr = old->listnr;
441 new->action = old->action;
442 for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
443 new->mask[i] = old->mask[i];
444 new->buflen = old->buflen;
445 new->field_count = old->field_count;
446 memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount);
447
448 /* deep copy this information, updating the se_rule fields, because
449 * the originals will all be freed when the old rule is freed. */
450 for (i = 0; i < fcount; i++) {
451 switch (new->fields[i].type) {
452 case AUDIT_SE_USER:
453 case AUDIT_SE_ROLE:
454 case AUDIT_SE_TYPE:
455 case AUDIT_SE_SEN:
456 case AUDIT_SE_CLR:
457 err = audit_dupe_selinux_field(&new->fields[i],
458 &old->fields[i]);
459 }
460 if (err) {
461 audit_free_rule(entry);
462 return ERR_PTR(err);
463 }
464 }
465
466 return entry;
467}
468
331/* Add rule to given filterlist if not a duplicate. Protected by 469/* Add rule to given filterlist if not a duplicate. Protected by
332 * audit_netlink_mutex. */ 470 * audit_netlink_mutex. */
333static inline int audit_add_rule(struct audit_entry *entry, 471static inline int audit_add_rule(struct audit_entry *entry,
@@ -448,9 +586,10 @@ static int audit_list_rules(void *_dest)
448 * @data: payload data 586 * @data: payload data
449 * @datasz: size of payload data 587 * @datasz: size of payload data
450 * @loginuid: loginuid of sender 588 * @loginuid: loginuid of sender
589 * @sid: SE Linux Security ID of sender
451 */ 590 */
452int audit_receive_filter(int type, int pid, int uid, int seq, void *data, 591int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
453 size_t datasz, uid_t loginuid) 592 size_t datasz, uid_t loginuid, u32 sid)
454{ 593{
455 struct task_struct *tsk; 594 struct task_struct *tsk;
456 int *dest; 595 int *dest;
@@ -493,9 +632,23 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
493 632
494 err = audit_add_rule(entry, 633 err = audit_add_rule(entry,
495 &audit_filter_list[entry->rule.listnr]); 634 &audit_filter_list[entry->rule.listnr]);
496 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, 635 if (sid) {
497 "auid=%u add rule to list=%d res=%d\n", 636 char *ctx = NULL;
498 loginuid, entry->rule.listnr, !err); 637 u32 len;
638 if (selinux_ctxid_to_string(sid, &ctx, &len)) {
639 /* Maybe call audit_panic? */
640 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
641 "auid=%u ssid=%u add rule to list=%d res=%d",
642 loginuid, sid, entry->rule.listnr, !err);
643 } else
644 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
645 "auid=%u subj=%s add rule to list=%d res=%d",
646 loginuid, ctx, entry->rule.listnr, !err);
647 kfree(ctx);
648 } else
649 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
650 "auid=%u add rule to list=%d res=%d",
651 loginuid, entry->rule.listnr, !err);
499 652
500 if (err) 653 if (err)
501 audit_free_rule(entry); 654 audit_free_rule(entry);
@@ -511,9 +664,24 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
511 664
512 err = audit_del_rule(entry, 665 err = audit_del_rule(entry,
513 &audit_filter_list[entry->rule.listnr]); 666 &audit_filter_list[entry->rule.listnr]);
514 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, 667
515 "auid=%u remove rule from list=%d res=%d\n", 668 if (sid) {
516 loginuid, entry->rule.listnr, !err); 669 char *ctx = NULL;
670 u32 len;
671 if (selinux_ctxid_to_string(sid, &ctx, &len)) {
672 /* Maybe call audit_panic? */
673 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
674 "auid=%u ssid=%u remove rule from list=%d res=%d",
675 loginuid, sid, entry->rule.listnr, !err);
676 } else
677 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
678 "auid=%u subj=%s remove rule from list=%d res=%d",
679 loginuid, ctx, entry->rule.listnr, !err);
680 kfree(ctx);
681 } else
682 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
683 "auid=%u remove rule from list=%d res=%d",
684 loginuid, entry->rule.listnr, !err);
517 685
518 audit_free_rule(entry); 686 audit_free_rule(entry);
519 break; 687 break;
@@ -628,3 +796,62 @@ unlock_and_return:
628 rcu_read_unlock(); 796 rcu_read_unlock();
629 return result; 797 return result;
630} 798}
799
800/* Check to see if the rule contains any selinux fields. Returns 1 if there
801 are selinux fields specified in the rule, 0 otherwise. */
802static inline int audit_rule_has_selinux(struct audit_krule *rule)
803{
804 int i;
805
806 for (i = 0; i < rule->field_count; i++) {
807 struct audit_field *f = &rule->fields[i];
808 switch (f->type) {
809 case AUDIT_SE_USER:
810 case AUDIT_SE_ROLE:
811 case AUDIT_SE_TYPE:
812 case AUDIT_SE_SEN:
813 case AUDIT_SE_CLR:
814 return 1;
815 }
816 }
817
818 return 0;
819}
820
821/* This function will re-initialize the se_rule field of all applicable rules.
822 * It will traverse the filter lists serarching for rules that contain selinux
823 * specific filter fields. When such a rule is found, it is copied, the
824 * selinux field is re-initialized, and the old rule is replaced with the
825 * updated rule. */
826int selinux_audit_rule_update(void)
827{
828 struct audit_entry *entry, *n, *nentry;
829 int i, err = 0;
830
831 /* audit_netlink_mutex synchronizes the writers */
832 mutex_lock(&audit_netlink_mutex);
833
834 for (i = 0; i < AUDIT_NR_FILTERS; i++) {
835 list_for_each_entry_safe(entry, n, &audit_filter_list[i], list) {
836 if (!audit_rule_has_selinux(&entry->rule))
837 continue;
838
839 nentry = audit_dupe_rule(&entry->rule);
840 if (unlikely(IS_ERR(nentry))) {
841 /* save the first error encountered for the
842 * return value */
843 if (!err)
844 err = PTR_ERR(nentry);
845 audit_panic("error updating selinux filters");
846 list_del_rcu(&entry->list);
847 } else {
848 list_replace_rcu(&entry->list, &nentry->list);
849 }
850 call_rcu(&entry->rcu, audit_free_rule_rcu);
851 }
852 }
853
854 mutex_unlock(&audit_netlink_mutex);
855
856 return err;
857}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 7f160df21a23..1c03a4ed1b27 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -58,6 +58,7 @@
58#include <linux/security.h> 58#include <linux/security.h>
59#include <linux/list.h> 59#include <linux/list.h>
60#include <linux/tty.h> 60#include <linux/tty.h>
61#include <linux/selinux.h>
61 62
62#include "audit.h" 63#include "audit.h"
63 64
@@ -89,7 +90,7 @@ struct audit_names {
89 uid_t uid; 90 uid_t uid;
90 gid_t gid; 91 gid_t gid;
91 dev_t rdev; 92 dev_t rdev;
92 char *ctx; 93 u32 osid;
93}; 94};
94 95
95struct audit_aux_data { 96struct audit_aux_data {
@@ -106,7 +107,7 @@ struct audit_aux_data_ipcctl {
106 uid_t uid; 107 uid_t uid;
107 gid_t gid; 108 gid_t gid;
108 mode_t mode; 109 mode_t mode;
109 char *ctx; 110 u32 osid;
110}; 111};
111 112
112struct audit_aux_data_socketcall { 113struct audit_aux_data_socketcall {
@@ -167,7 +168,8 @@ static int audit_filter_rules(struct task_struct *tsk,
167 struct audit_context *ctx, 168 struct audit_context *ctx,
168 enum audit_state *state) 169 enum audit_state *state)
169{ 170{
170 int i, j; 171 int i, j, need_sid = 1;
172 u32 sid;
171 173
172 for (i = 0; i < rule->field_count; i++) { 174 for (i = 0; i < rule->field_count; i++) {
173 struct audit_field *f = &rule->fields[i]; 175 struct audit_field *f = &rule->fields[i];
@@ -257,6 +259,27 @@ static int audit_filter_rules(struct task_struct *tsk,
257 if (ctx) 259 if (ctx)
258 result = audit_comparator(ctx->loginuid, f->op, f->val); 260 result = audit_comparator(ctx->loginuid, f->op, f->val);
259 break; 261 break;
262 case AUDIT_SE_USER:
263 case AUDIT_SE_ROLE:
264 case AUDIT_SE_TYPE:
265 case AUDIT_SE_SEN:
266 case AUDIT_SE_CLR:
267 /* NOTE: this may return negative values indicating
268 a temporary error. We simply treat this as a
269 match for now to avoid losing information that
270 may be wanted. An error message will also be
271 logged upon error */
272 if (f->se_rule) {
273 if (need_sid) {
274 selinux_task_ctxid(tsk, &sid);
275 need_sid = 0;
276 }
277 result = selinux_audit_rule_match(sid, f->type,
278 f->op,
279 f->se_rule,
280 ctx);
281 }
282 break;
260 case AUDIT_ARG0: 283 case AUDIT_ARG0:
261 case AUDIT_ARG1: 284 case AUDIT_ARG1:
262 case AUDIT_ARG2: 285 case AUDIT_ARG2:
@@ -329,7 +352,6 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
329 return AUDIT_BUILD_CONTEXT; 352 return AUDIT_BUILD_CONTEXT;
330} 353}
331 354
332/* This should be called with task_lock() held. */
333static inline struct audit_context *audit_get_context(struct task_struct *tsk, 355static inline struct audit_context *audit_get_context(struct task_struct *tsk,
334 int return_valid, 356 int return_valid,
335 int return_code) 357 int return_code)
@@ -391,9 +413,6 @@ static inline void audit_free_names(struct audit_context *context)
391#endif 413#endif
392 414
393 for (i = 0; i < context->name_count; i++) { 415 for (i = 0; i < context->name_count; i++) {
394 char *p = context->names[i].ctx;
395 context->names[i].ctx = NULL;
396 kfree(p);
397 if (context->names[i].name) 416 if (context->names[i].name)
398 __putname(context->names[i].name); 417 __putname(context->names[i].name);
399 } 418 }
@@ -416,11 +435,6 @@ static inline void audit_free_aux(struct audit_context *context)
416 dput(axi->dentry); 435 dput(axi->dentry);
417 mntput(axi->mnt); 436 mntput(axi->mnt);
418 } 437 }
419 if ( aux->type == AUDIT_IPC ) {
420 struct audit_aux_data_ipcctl *axi = (void *)aux;
421 if (axi->ctx)
422 kfree(axi->ctx);
423 }
424 438
425 context->aux = aux->next; 439 context->aux = aux->next;
426 kfree(aux); 440 kfree(aux);
@@ -506,7 +520,7 @@ static inline void audit_free_context(struct audit_context *context)
506 printk(KERN_ERR "audit: freed %d contexts\n", count); 520 printk(KERN_ERR "audit: freed %d contexts\n", count);
507} 521}
508 522
509static void audit_log_task_context(struct audit_buffer *ab, gfp_t gfp_mask) 523static void audit_log_task_context(struct audit_buffer *ab)
510{ 524{
511 char *ctx = NULL; 525 char *ctx = NULL;
512 ssize_t len = 0; 526 ssize_t len = 0;
@@ -518,7 +532,7 @@ static void audit_log_task_context(struct audit_buffer *ab, gfp_t gfp_mask)
518 return; 532 return;
519 } 533 }
520 534
521 ctx = kmalloc(len, gfp_mask); 535 ctx = kmalloc(len, GFP_KERNEL);
522 if (!ctx) 536 if (!ctx)
523 goto error_path; 537 goto error_path;
524 538
@@ -536,47 +550,46 @@ error_path:
536 return; 550 return;
537} 551}
538 552
539static void audit_log_task_info(struct audit_buffer *ab, gfp_t gfp_mask) 553static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
540{ 554{
541 char name[sizeof(current->comm)]; 555 char name[sizeof(tsk->comm)];
542 struct mm_struct *mm = current->mm; 556 struct mm_struct *mm = tsk->mm;
543 struct vm_area_struct *vma; 557 struct vm_area_struct *vma;
544 558
545 get_task_comm(name, current); 559 /* tsk == current */
560
561 get_task_comm(name, tsk);
546 audit_log_format(ab, " comm="); 562 audit_log_format(ab, " comm=");
547 audit_log_untrustedstring(ab, name); 563 audit_log_untrustedstring(ab, name);
548 564
549 if (!mm) 565 if (mm) {
550 return; 566 down_read(&mm->mmap_sem);
551 567 vma = mm->mmap;
552 /* 568 while (vma) {
553 * this is brittle; all callers that pass GFP_ATOMIC will have 569 if ((vma->vm_flags & VM_EXECUTABLE) &&
554 * NULL current->mm and we won't get here. 570 vma->vm_file) {
555 */ 571 audit_log_d_path(ab, "exe=",
556 down_read(&mm->mmap_sem); 572 vma->vm_file->f_dentry,
557 vma = mm->mmap; 573 vma->vm_file->f_vfsmnt);
558 while (vma) { 574 break;
559 if ((vma->vm_flags & VM_EXECUTABLE) && 575 }
560 vma->vm_file) { 576 vma = vma->vm_next;
561 audit_log_d_path(ab, "exe=",
562 vma->vm_file->f_dentry,
563 vma->vm_file->f_vfsmnt);
564 break;
565 } 577 }
566 vma = vma->vm_next; 578 up_read(&mm->mmap_sem);
567 } 579 }
568 up_read(&mm->mmap_sem); 580 audit_log_task_context(ab);
569 audit_log_task_context(ab, gfp_mask);
570} 581}
571 582
572static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask) 583static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
573{ 584{
574 int i; 585 int i, call_panic = 0;
575 struct audit_buffer *ab; 586 struct audit_buffer *ab;
576 struct audit_aux_data *aux; 587 struct audit_aux_data *aux;
577 const char *tty; 588 const char *tty;
578 589
579 ab = audit_log_start(context, gfp_mask, AUDIT_SYSCALL); 590 /* tsk == current */
591
592 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL);
580 if (!ab) 593 if (!ab)
581 return; /* audit_panic has been called */ 594 return; /* audit_panic has been called */
582 audit_log_format(ab, "arch=%x syscall=%d", 595 audit_log_format(ab, "arch=%x syscall=%d",
@@ -587,8 +600,8 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
587 audit_log_format(ab, " success=%s exit=%ld", 600 audit_log_format(ab, " success=%s exit=%ld",
588 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", 601 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
589 context->return_code); 602 context->return_code);
590 if (current->signal->tty && current->signal->tty->name) 603 if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
591 tty = current->signal->tty->name; 604 tty = tsk->signal->tty->name;
592 else 605 else
593 tty = "(none)"; 606 tty = "(none)";
594 audit_log_format(ab, 607 audit_log_format(ab,
@@ -607,12 +620,12 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
607 context->gid, 620 context->gid,
608 context->euid, context->suid, context->fsuid, 621 context->euid, context->suid, context->fsuid,
609 context->egid, context->sgid, context->fsgid, tty); 622 context->egid, context->sgid, context->fsgid, tty);
610 audit_log_task_info(ab, gfp_mask); 623 audit_log_task_info(ab, tsk);
611 audit_log_end(ab); 624 audit_log_end(ab);
612 625
613 for (aux = context->aux; aux; aux = aux->next) { 626 for (aux = context->aux; aux; aux = aux->next) {
614 627
615 ab = audit_log_start(context, gfp_mask, aux->type); 628 ab = audit_log_start(context, GFP_KERNEL, aux->type);
616 if (!ab) 629 if (!ab)
617 continue; /* audit_panic has been called */ 630 continue; /* audit_panic has been called */
618 631
@@ -620,8 +633,39 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
620 case AUDIT_IPC: { 633 case AUDIT_IPC: {
621 struct audit_aux_data_ipcctl *axi = (void *)aux; 634 struct audit_aux_data_ipcctl *axi = (void *)aux;
622 audit_log_format(ab, 635 audit_log_format(ab,
623 " qbytes=%lx iuid=%u igid=%u mode=%x obj=%s", 636 " qbytes=%lx iuid=%u igid=%u mode=%x",
624 axi->qbytes, axi->uid, axi->gid, axi->mode, axi->ctx); 637 axi->qbytes, axi->uid, axi->gid, axi->mode);
638 if (axi->osid != 0) {
639 char *ctx = NULL;
640 u32 len;
641 if (selinux_ctxid_to_string(
642 axi->osid, &ctx, &len)) {
643 audit_log_format(ab, " osid=%u",
644 axi->osid);
645 call_panic = 1;
646 } else
647 audit_log_format(ab, " obj=%s", ctx);
648 kfree(ctx);
649 }
650 break; }
651
652 case AUDIT_IPC_SET_PERM: {
653 struct audit_aux_data_ipcctl *axi = (void *)aux;
654 audit_log_format(ab,
655 " new qbytes=%lx new iuid=%u new igid=%u new mode=%x",
656 axi->qbytes, axi->uid, axi->gid, axi->mode);
657 if (axi->osid != 0) {
658 char *ctx = NULL;
659 u32 len;
660 if (selinux_ctxid_to_string(
661 axi->osid, &ctx, &len)) {
662 audit_log_format(ab, " osid=%u",
663 axi->osid);
664 call_panic = 1;
665 } else
666 audit_log_format(ab, " obj=%s", ctx);
667 kfree(ctx);
668 }
625 break; } 669 break; }
626 670
627 case AUDIT_SOCKETCALL: { 671 case AUDIT_SOCKETCALL: {
@@ -649,7 +693,7 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
649 } 693 }
650 694
651 if (context->pwd && context->pwdmnt) { 695 if (context->pwd && context->pwdmnt) {
652 ab = audit_log_start(context, gfp_mask, AUDIT_CWD); 696 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD);
653 if (ab) { 697 if (ab) {
654 audit_log_d_path(ab, "cwd=", context->pwd, context->pwdmnt); 698 audit_log_d_path(ab, "cwd=", context->pwd, context->pwdmnt);
655 audit_log_end(ab); 699 audit_log_end(ab);
@@ -659,7 +703,7 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
659 unsigned long ino = context->names[i].ino; 703 unsigned long ino = context->names[i].ino;
660 unsigned long pino = context->names[i].pino; 704 unsigned long pino = context->names[i].pino;
661 705
662 ab = audit_log_start(context, gfp_mask, AUDIT_PATH); 706 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
663 if (!ab) 707 if (!ab)
664 continue; /* audit_panic has been called */ 708 continue; /* audit_panic has been called */
665 709
@@ -685,32 +729,35 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
685 context->names[i].gid, 729 context->names[i].gid,
686 MAJOR(context->names[i].rdev), 730 MAJOR(context->names[i].rdev),
687 MINOR(context->names[i].rdev)); 731 MINOR(context->names[i].rdev));
688 if (context->names[i].ctx) { 732 if (context->names[i].osid != 0) {
689 audit_log_format(ab, " obj=%s", 733 char *ctx = NULL;
690 context->names[i].ctx); 734 u32 len;
735 if (selinux_ctxid_to_string(
736 context->names[i].osid, &ctx, &len)) {
737 audit_log_format(ab, " osid=%u",
738 context->names[i].osid);
739 call_panic = 2;
740 } else
741 audit_log_format(ab, " obj=%s", ctx);
742 kfree(ctx);
691 } 743 }
692 744
693 audit_log_end(ab); 745 audit_log_end(ab);
694 } 746 }
747 if (call_panic)
748 audit_panic("error converting sid to string");
695} 749}
696 750
697/** 751/**
698 * audit_free - free a per-task audit context 752 * audit_free - free a per-task audit context
699 * @tsk: task whose audit context block to free 753 * @tsk: task whose audit context block to free
700 * 754 *
701 * Called from copy_process and __put_task_struct. 755 * Called from copy_process and do_exit
702 */ 756 */
703void audit_free(struct task_struct *tsk) 757void audit_free(struct task_struct *tsk)
704{ 758{
705 struct audit_context *context; 759 struct audit_context *context;
706 760
707 /*
708 * No need to lock the task - when we execute audit_free()
709 * then the task has no external references anymore, and
710 * we are tearing it down. (The locking also confuses
711 * DEBUG_LOCKDEP - this freeing may occur in softirq
712 * contexts as well, via RCU.)
713 */
714 context = audit_get_context(tsk, 0, 0); 761 context = audit_get_context(tsk, 0, 0);
715 if (likely(!context)) 762 if (likely(!context))
716 return; 763 return;
@@ -719,8 +766,9 @@ void audit_free(struct task_struct *tsk)
719 * function (e.g., exit_group), then free context block. 766 * function (e.g., exit_group), then free context block.
720 * We use GFP_ATOMIC here because we might be doing this 767 * We use GFP_ATOMIC here because we might be doing this
721 * in the context of the idle thread */ 768 * in the context of the idle thread */
769 /* that can happen only if we are called from do_exit() */
722 if (context->in_syscall && context->auditable) 770 if (context->in_syscall && context->auditable)
723 audit_log_exit(context, GFP_ATOMIC); 771 audit_log_exit(context, tsk);
724 772
725 audit_free_context(context); 773 audit_free_context(context);
726} 774}
@@ -743,10 +791,11 @@ void audit_free(struct task_struct *tsk)
743 * will only be written if another part of the kernel requests that it 791 * will only be written if another part of the kernel requests that it
744 * be written). 792 * be written).
745 */ 793 */
746void audit_syscall_entry(struct task_struct *tsk, int arch, int major, 794void audit_syscall_entry(int arch, int major,
747 unsigned long a1, unsigned long a2, 795 unsigned long a1, unsigned long a2,
748 unsigned long a3, unsigned long a4) 796 unsigned long a3, unsigned long a4)
749{ 797{
798 struct task_struct *tsk = current;
750 struct audit_context *context = tsk->audit_context; 799 struct audit_context *context = tsk->audit_context;
751 enum audit_state state; 800 enum audit_state state;
752 801
@@ -824,22 +873,18 @@ void audit_syscall_entry(struct task_struct *tsk, int arch, int major,
824 * message), then write out the syscall information. In call cases, 873 * message), then write out the syscall information. In call cases,
825 * free the names stored from getname(). 874 * free the names stored from getname().
826 */ 875 */
827void audit_syscall_exit(struct task_struct *tsk, int valid, long return_code) 876void audit_syscall_exit(int valid, long return_code)
828{ 877{
878 struct task_struct *tsk = current;
829 struct audit_context *context; 879 struct audit_context *context;
830 880
831 get_task_struct(tsk);
832 task_lock(tsk);
833 context = audit_get_context(tsk, valid, return_code); 881 context = audit_get_context(tsk, valid, return_code);
834 task_unlock(tsk);
835 882
836 /* Not having a context here is ok, since the parent may have
837 * called __put_task_struct. */
838 if (likely(!context)) 883 if (likely(!context))
839 goto out; 884 return;
840 885
841 if (context->in_syscall && context->auditable) 886 if (context->in_syscall && context->auditable)
842 audit_log_exit(context, GFP_KERNEL); 887 audit_log_exit(context, tsk);
843 888
844 context->in_syscall = 0; 889 context->in_syscall = 0;
845 context->auditable = 0; 890 context->auditable = 0;
@@ -854,8 +899,6 @@ void audit_syscall_exit(struct task_struct *tsk, int valid, long return_code)
854 audit_free_aux(context); 899 audit_free_aux(context);
855 tsk->audit_context = context; 900 tsk->audit_context = context;
856 } 901 }
857 out:
858 put_task_struct(tsk);
859} 902}
860 903
861/** 904/**
@@ -936,40 +979,11 @@ void audit_putname(const char *name)
936#endif 979#endif
937} 980}
938 981
939void audit_inode_context(int idx, const struct inode *inode) 982static void audit_inode_context(int idx, const struct inode *inode)
940{ 983{
941 struct audit_context *context = current->audit_context; 984 struct audit_context *context = current->audit_context;
942 const char *suffix = security_inode_xattr_getsuffix();
943 char *ctx = NULL;
944 int len = 0;
945
946 if (!suffix)
947 goto ret;
948
949 len = security_inode_getsecurity(inode, suffix, NULL, 0, 0);
950 if (len == -EOPNOTSUPP)
951 goto ret;
952 if (len < 0)
953 goto error_path;
954
955 ctx = kmalloc(len, GFP_KERNEL);
956 if (!ctx)
957 goto error_path;
958 985
959 len = security_inode_getsecurity(inode, suffix, ctx, len, 0); 986 selinux_get_inode_sid(inode, &context->names[idx].osid);
960 if (len < 0)
961 goto error_path;
962
963 kfree(context->names[idx].ctx);
964 context->names[idx].ctx = ctx;
965 goto ret;
966
967error_path:
968 if (ctx)
969 kfree(ctx);
970 audit_panic("error in audit_inode_context");
971ret:
972 return;
973} 987}
974 988
975 989
@@ -1155,40 +1169,37 @@ uid_t audit_get_loginuid(struct audit_context *ctx)
1155 return ctx ? ctx->loginuid : -1; 1169 return ctx ? ctx->loginuid : -1;
1156} 1170}
1157 1171
1158static char *audit_ipc_context(struct kern_ipc_perm *ipcp) 1172/**
1173 * audit_ipc_obj - record audit data for ipc object
1174 * @ipcp: ipc permissions
1175 *
1176 * Returns 0 for success or NULL context or < 0 on error.
1177 */
1178int audit_ipc_obj(struct kern_ipc_perm *ipcp)
1159{ 1179{
1180 struct audit_aux_data_ipcctl *ax;
1160 struct audit_context *context = current->audit_context; 1181 struct audit_context *context = current->audit_context;
1161 char *ctx = NULL;
1162 int len = 0;
1163 1182
1164 if (likely(!context)) 1183 if (likely(!context))
1165 return NULL; 1184 return 0;
1166
1167 len = security_ipc_getsecurity(ipcp, NULL, 0);
1168 if (len == -EOPNOTSUPP)
1169 goto ret;
1170 if (len < 0)
1171 goto error_path;
1172
1173 ctx = kmalloc(len, GFP_ATOMIC);
1174 if (!ctx)
1175 goto error_path;
1176 1185
1177 len = security_ipc_getsecurity(ipcp, ctx, len); 1186 ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
1178 if (len < 0) 1187 if (!ax)
1179 goto error_path; 1188 return -ENOMEM;
1180 1189
1181 return ctx; 1190 ax->uid = ipcp->uid;
1191 ax->gid = ipcp->gid;
1192 ax->mode = ipcp->mode;
1193 selinux_get_ipc_sid(ipcp, &ax->osid);
1182 1194
1183error_path: 1195 ax->d.type = AUDIT_IPC;
1184 kfree(ctx); 1196 ax->d.next = context->aux;
1185 audit_panic("error in audit_ipc_context"); 1197 context->aux = (void *)ax;
1186ret: 1198 return 0;
1187 return NULL;
1188} 1199}
1189 1200
1190/** 1201/**
1191 * audit_ipc_perms - record audit data for ipc 1202 * audit_ipc_set_perm - record audit data for new ipc permissions
1192 * @qbytes: msgq bytes 1203 * @qbytes: msgq bytes
1193 * @uid: msgq user id 1204 * @uid: msgq user id
1194 * @gid: msgq group id 1205 * @gid: msgq group id
@@ -1196,7 +1207,7 @@ ret:
1196 * 1207 *
1197 * Returns 0 for success or NULL context or < 0 on error. 1208 * Returns 0 for success or NULL context or < 0 on error.
1198 */ 1209 */
1199int audit_ipc_perms(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode, struct kern_ipc_perm *ipcp) 1210int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode, struct kern_ipc_perm *ipcp)
1200{ 1211{
1201 struct audit_aux_data_ipcctl *ax; 1212 struct audit_aux_data_ipcctl *ax;
1202 struct audit_context *context = current->audit_context; 1213 struct audit_context *context = current->audit_context;
@@ -1212,9 +1223,9 @@ int audit_ipc_perms(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode, str
1212 ax->uid = uid; 1223 ax->uid = uid;
1213 ax->gid = gid; 1224 ax->gid = gid;
1214 ax->mode = mode; 1225 ax->mode = mode;
1215 ax->ctx = audit_ipc_context(ipcp); 1226 selinux_get_ipc_sid(ipcp, &ax->osid);
1216 1227
1217 ax->d.type = AUDIT_IPC; 1228 ax->d.type = AUDIT_IPC_SET_PERM;
1218 ax->d.next = context->aux; 1229 ax->d.next = context->aux;
1219 context->aux = (void *)ax; 1230 context->aux = (void *)ax;
1220 return 0; 1231 return 0;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 72248d1b9e3f..ab81fdd4572b 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2231,19 +2231,25 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2231 * So only GFP_KERNEL allocations, if all nodes in the cpuset are 2231 * So only GFP_KERNEL allocations, if all nodes in the cpuset are
2232 * short of memory, might require taking the callback_mutex mutex. 2232 * short of memory, might require taking the callback_mutex mutex.
2233 * 2233 *
2234 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() 2234 * The first call here from mm/page_alloc:get_page_from_freelist()
2235 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing 2235 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, so
2236 * hardwall cpusets - no allocation on a node outside the cpuset is 2236 * no allocation on a node outside the cpuset is allowed (unless in
2237 * allowed (unless in interrupt, of course). 2237 * interrupt, of course).
2238 * 2238 *
2239 * The second loop doesn't even call here for GFP_ATOMIC requests 2239 * The second pass through get_page_from_freelist() doesn't even call
2240 * (if the __alloc_pages() local variable 'wait' is set). That check 2240 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
2241 * and the checks below have the combined affect in the second loop of 2241 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2242 * the __alloc_pages() routine that: 2242 * in alloc_flags. That logic and the checks below have the combined
2243 * affect that:
2243 * in_interrupt - any node ok (current task context irrelevant) 2244 * in_interrupt - any node ok (current task context irrelevant)
2244 * GFP_ATOMIC - any node ok 2245 * GFP_ATOMIC - any node ok
2245 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok 2246 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
2246 * GFP_USER - only nodes in current tasks mems allowed ok. 2247 * GFP_USER - only nodes in current tasks mems allowed ok.
2248 *
2249 * Rule:
2250 * Don't call cpuset_zone_allowed() if you can't sleep, unless you
2251 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2252 * the code that might scan up ancestor cpusets and sleep.
2247 **/ 2253 **/
2248 2254
2249int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 2255int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
@@ -2255,6 +2261,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
2255 if (in_interrupt()) 2261 if (in_interrupt())
2256 return 1; 2262 return 1;
2257 node = z->zone_pgdat->node_id; 2263 node = z->zone_pgdat->node_id;
2264 might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2258 if (node_isset(node, current->mems_allowed)) 2265 if (node_isset(node, current->mems_allowed))
2259 return 1; 2266 return 1;
2260 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 2267 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
diff --git a/kernel/exit.c b/kernel/exit.c
index 1a9787ac6173..e95b93282210 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -35,6 +35,7 @@
35#include <linux/futex.h> 35#include <linux/futex.h>
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/pipe_fs_i.h> 37#include <linux/pipe_fs_i.h>
38#include <linux/audit.h> /* for audit_free() */
38 39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/unistd.h> 41#include <asm/unistd.h>
@@ -56,7 +57,7 @@ static void __unhash_process(struct task_struct *p)
56 detach_pid(p, PIDTYPE_PGID); 57 detach_pid(p, PIDTYPE_PGID);
57 detach_pid(p, PIDTYPE_SID); 58 detach_pid(p, PIDTYPE_SID);
58 59
59 list_del_init(&p->tasks); 60 list_del_rcu(&p->tasks);
60 __get_cpu_var(process_counts)--; 61 __get_cpu_var(process_counts)--;
61 } 62 }
62 list_del_rcu(&p->thread_group); 63 list_del_rcu(&p->thread_group);
@@ -910,6 +911,8 @@ fastcall NORET_TYPE void do_exit(long code)
910 if (unlikely(tsk->compat_robust_list)) 911 if (unlikely(tsk->compat_robust_list))
911 compat_exit_robust_list(tsk); 912 compat_exit_robust_list(tsk);
912#endif 913#endif
914 if (unlikely(tsk->audit_context))
915 audit_free(tsk);
913 exit_mm(tsk); 916 exit_mm(tsk);
914 917
915 exit_sem(tsk); 918 exit_sem(tsk);
diff --git a/kernel/extable.c b/kernel/extable.c
index 7501b531ceed..7fe262855317 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -40,7 +40,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
40 return e; 40 return e;
41} 41}
42 42
43static int core_kernel_text(unsigned long addr) 43int core_kernel_text(unsigned long addr)
44{ 44{
45 if (addr >= (unsigned long)_stext && 45 if (addr >= (unsigned long)_stext &&
46 addr <= (unsigned long)_etext) 46 addr <= (unsigned long)_etext)
diff --git a/kernel/fork.c b/kernel/fork.c
index 3384eb89cb1c..ac8100e3088a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -114,8 +114,6 @@ void __put_task_struct(struct task_struct *tsk)
114 WARN_ON(atomic_read(&tsk->usage)); 114 WARN_ON(atomic_read(&tsk->usage));
115 WARN_ON(tsk == current); 115 WARN_ON(tsk == current);
116 116
117 if (unlikely(tsk->audit_context))
118 audit_free(tsk);
119 security_task_free(tsk); 117 security_task_free(tsk);
120 free_uid(tsk->user); 118 free_uid(tsk->user);
121 put_group_info(tsk->group_info); 119 put_group_info(tsk->group_info);
@@ -124,12 +122,6 @@ void __put_task_struct(struct task_struct *tsk)
124 free_task(tsk); 122 free_task(tsk);
125} 123}
126 124
127void __put_task_struct_cb(struct rcu_head *rhp)
128{
129 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
130 __put_task_struct(tsk);
131}
132
133void __init fork_init(unsigned long mempages) 125void __init fork_init(unsigned long mempages)
134{ 126{
135#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 127#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -186,6 +178,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
186 atomic_set(&tsk->usage,2); 178 atomic_set(&tsk->usage,2);
187 atomic_set(&tsk->fs_excl, 0); 179 atomic_set(&tsk->fs_excl, 0);
188 tsk->btrace_seq = 0; 180 tsk->btrace_seq = 0;
181 tsk->splice_pipe = NULL;
189 return tsk; 182 return tsk;
190} 183}
191 184
@@ -1210,7 +1203,7 @@ static task_t *copy_process(unsigned long clone_flags,
1210 attach_pid(p, PIDTYPE_PGID, process_group(p)); 1203 attach_pid(p, PIDTYPE_PGID, process_group(p));
1211 attach_pid(p, PIDTYPE_SID, p->signal->session); 1204 attach_pid(p, PIDTYPE_SID, p->signal->session);
1212 1205
1213 list_add_tail(&p->tasks, &init_task.tasks); 1206 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1214 __get_cpu_var(process_counts)++; 1207 __get_cpu_var(process_counts)++;
1215 } 1208 }
1216 attach_pid(p, PIDTYPE_PID, p->pid); 1209 attach_pid(p, PIDTYPE_PID, p->pid);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d2a7296c8251..01fa2ae98a85 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -456,6 +456,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
456 456
457 return ret; 457 return ret;
458} 458}
459EXPORT_SYMBOL_GPL(hrtimer_start);
459 460
460/** 461/**
461 * hrtimer_try_to_cancel - try to deactivate a timer 462 * hrtimer_try_to_cancel - try to deactivate a timer
@@ -484,6 +485,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
484 return ret; 485 return ret;
485 486
486} 487}
488EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
487 489
488/** 490/**
489 * hrtimer_cancel - cancel a timer and wait for the handler to finish. 491 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
@@ -504,6 +506,7 @@ int hrtimer_cancel(struct hrtimer *timer)
504 cpu_relax(); 506 cpu_relax();
505 } 507 }
506} 508}
509EXPORT_SYMBOL_GPL(hrtimer_cancel);
507 510
508/** 511/**
509 * hrtimer_get_remaining - get remaining time for the timer 512 * hrtimer_get_remaining - get remaining time for the timer
@@ -522,6 +525,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
522 525
523 return rem; 526 return rem;
524} 527}
528EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
525 529
526#ifdef CONFIG_NO_IDLE_HZ 530#ifdef CONFIG_NO_IDLE_HZ
527/** 531/**
@@ -580,6 +584,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
580 timer->base = &bases[clock_id]; 584 timer->base = &bases[clock_id];
581 timer->node.rb_parent = HRTIMER_INACTIVE; 585 timer->node.rb_parent = HRTIMER_INACTIVE;
582} 586}
587EXPORT_SYMBOL_GPL(hrtimer_init);
583 588
584/** 589/**
585 * hrtimer_get_res - get the timer resolution for a clock 590 * hrtimer_get_res - get the timer resolution for a clock
@@ -599,6 +604,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
599 604
600 return 0; 605 return 0;
601} 606}
607EXPORT_SYMBOL_GPL(hrtimer_get_res);
602 608
603/* 609/*
604 * Expire the per base hrtimer-queue: 610 * Expire the per base hrtimer-queue:
@@ -836,7 +842,7 @@ static void migrate_hrtimers(int cpu)
836} 842}
837#endif /* CONFIG_HOTPLUG_CPU */ 843#endif /* CONFIG_HOTPLUG_CPU */
838 844
839static int __devinit hrtimer_cpu_notify(struct notifier_block *self, 845static int hrtimer_cpu_notify(struct notifier_block *self,
840 unsigned long action, void *hcpu) 846 unsigned long action, void *hcpu)
841{ 847{
842 long cpu = (long)hcpu; 848 long cpu = (long)hcpu;
@@ -860,7 +866,7 @@ static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
860 return NOTIFY_OK; 866 return NOTIFY_OK;
861} 867}
862 868
863static struct notifier_block __devinitdata hrtimers_nb = { 869static struct notifier_block hrtimers_nb = {
864 .notifier_call = hrtimer_cpu_notify, 870 .notifier_call = hrtimer_cpu_notify,
865}; 871};
866 872
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ac766ad573e8..1279e3499534 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -246,8 +246,10 @@ int setup_irq(unsigned int irq, struct irqaction * new)
246 246
247mismatch: 247mismatch:
248 spin_unlock_irqrestore(&desc->lock, flags); 248 spin_unlock_irqrestore(&desc->lock, flags);
249 printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__); 249 if (!(new->flags & SA_PROBEIRQ)) {
250 dump_stack(); 250 printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__);
251 dump_stack();
252 }
251 return -EBUSY; 253 return -EBUSY;
252} 254}
253 255
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1156eb0977d0..1fbf466a29aa 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -585,6 +585,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
585 int i; 585 int i;
586 586
587 rp->kp.pre_handler = pre_handler_kretprobe; 587 rp->kp.pre_handler = pre_handler_kretprobe;
588 rp->kp.post_handler = NULL;
589 rp->kp.fault_handler = NULL;
590 rp->kp.break_handler = NULL;
588 591
589 /* Pre-allocate memory for max kretprobe instances */ 592 /* Pre-allocate memory for max kretprobe instances */
590 if (rp->maxactive <= 0) { 593 if (rp->maxactive <= 0) {
diff --git a/kernel/module.c b/kernel/module.c
index d24deb0dbbc9..bbe04862e1b0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -705,14 +705,14 @@ EXPORT_SYMBOL(__symbol_put);
705 705
706void symbol_put_addr(void *addr) 706void symbol_put_addr(void *addr)
707{ 707{
708 unsigned long flags; 708 struct module *modaddr;
709 709
710 spin_lock_irqsave(&modlist_lock, flags); 710 if (core_kernel_text((unsigned long)addr))
711 if (!kernel_text_address((unsigned long)addr)) 711 return;
712 BUG();
713 712
714 module_put(module_text_address((unsigned long)addr)); 713 if (!(modaddr = module_text_address((unsigned long)addr)))
715 spin_unlock_irqrestore(&modlist_lock, flags); 714 BUG();
715 module_put(modaddr);
716} 716}
717EXPORT_SYMBOL_GPL(symbol_put_addr); 717EXPORT_SYMBOL_GPL(symbol_put_addr);
718 718
diff --git a/kernel/power/main.c b/kernel/power/main.c
index ee371f50ccaa..a6d9ef46009e 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -272,7 +272,7 @@ static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n
272 if (*s && !strncmp(buf, *s, len)) 272 if (*s && !strncmp(buf, *s, len))
273 break; 273 break;
274 } 274 }
275 if (*s) 275 if (state < PM_SUSPEND_MAX && *s)
276 error = enter_state(state); 276 error = enter_state(state);
277 else 277 else
278 error = -EINVAL; 278 error = -EINVAL;
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index 0f6908cce1dd..84063ac8fcfc 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -75,25 +75,6 @@ struct pm_dev *pm_register(pm_dev_t type,
75 return dev; 75 return dev;
76} 76}
77 77
78/**
79 * pm_unregister - unregister a device with power management
80 * @dev: device to unregister
81 *
82 * Remove a device from the power management notification lists. The
83 * dev passed must be a handle previously returned by pm_register.
84 */
85
86void pm_unregister(struct pm_dev *dev)
87{
88 if (dev) {
89 mutex_lock(&pm_devs_lock);
90 list_del(&dev->entry);
91 mutex_unlock(&pm_devs_lock);
92
93 kfree(dev);
94 }
95}
96
97static void __pm_unregister(struct pm_dev *dev) 78static void __pm_unregister(struct pm_dev *dev)
98{ 79{
99 if (dev) { 80 if (dev) {
@@ -258,7 +239,6 @@ int pm_send_all(pm_request_t rqst, void *data)
258} 239}
259 240
260EXPORT_SYMBOL(pm_register); 241EXPORT_SYMBOL(pm_register);
261EXPORT_SYMBOL(pm_unregister);
262EXPORT_SYMBOL(pm_unregister_all); 242EXPORT_SYMBOL(pm_unregister_all);
263EXPORT_SYMBOL(pm_send_all); 243EXPORT_SYMBOL(pm_send_all);
264EXPORT_SYMBOL(pm_active); 244EXPORT_SYMBOL(pm_active);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index c5863d02c89e..3eeedbb13b78 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -240,14 +240,15 @@ static void copy_data_pages(struct pbe *pblist)
240 * free_pagedir - free pages allocated with alloc_pagedir() 240 * free_pagedir - free pages allocated with alloc_pagedir()
241 */ 241 */
242 242
243static void free_pagedir(struct pbe *pblist) 243static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
244{ 244{
245 struct pbe *pbe; 245 struct pbe *pbe;
246 246
247 while (pblist) { 247 while (pblist) {
248 pbe = (pblist + PB_PAGE_SKIP)->next; 248 pbe = (pblist + PB_PAGE_SKIP)->next;
249 ClearPageNosave(virt_to_page(pblist)); 249 ClearPageNosave(virt_to_page(pblist));
250 ClearPageNosaveFree(virt_to_page(pblist)); 250 if (clear_nosave_free)
251 ClearPageNosaveFree(virt_to_page(pblist));
251 free_page((unsigned long)pblist); 252 free_page((unsigned long)pblist);
252 pblist = pbe; 253 pblist = pbe;
253 } 254 }
@@ -389,7 +390,7 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
389 pbe->next = alloc_image_page(gfp_mask, safe_needed); 390 pbe->next = alloc_image_page(gfp_mask, safe_needed);
390 } 391 }
391 if (!pbe) { /* get_zeroed_page() failed */ 392 if (!pbe) { /* get_zeroed_page() failed */
392 free_pagedir(pblist); 393 free_pagedir(pblist, 1);
393 pblist = NULL; 394 pblist = NULL;
394 } else 395 } else
395 create_pbe_list(pblist, nr_pages); 396 create_pbe_list(pblist, nr_pages);
@@ -736,7 +737,7 @@ static int create_image(struct snapshot_handle *handle)
736 pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); 737 pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
737 if (pblist) 738 if (pblist)
738 copy_page_backup_list(pblist, p); 739 copy_page_backup_list(pblist, p);
739 free_pagedir(p); 740 free_pagedir(p, 0);
740 if (!pblist) 741 if (!pblist)
741 error = -ENOMEM; 742 error = -ENOMEM;
742 } 743 }
diff --git a/kernel/profile.c b/kernel/profile.c
index 5a730fdb1a2c..68afe121e507 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -299,7 +299,7 @@ out:
299} 299}
300 300
301#ifdef CONFIG_HOTPLUG_CPU 301#ifdef CONFIG_HOTPLUG_CPU
302static int __devinit profile_cpu_callback(struct notifier_block *info, 302static int profile_cpu_callback(struct notifier_block *info,
303 unsigned long action, void *__cpu) 303 unsigned long action, void *__cpu)
304{ 304{
305 int node, cpu = (unsigned long)__cpu; 305 int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0eeb7e66722c..921c22ad16e4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -56,10 +56,6 @@ void ptrace_untrace(task_t *child)
56 signal_wake_up(child, 1); 56 signal_wake_up(child, 1);
57 } 57 }
58 } 58 }
59 if (child->signal->flags & SIGNAL_GROUP_EXIT) {
60 sigaddset(&child->pending.signal, SIGKILL);
61 signal_wake_up(child, 1);
62 }
63 spin_unlock(&child->sighand->siglock); 59 spin_unlock(&child->sighand->siglock);
64} 60}
65 61
@@ -81,7 +77,8 @@ void __ptrace_unlink(task_t *child)
81 add_parent(child); 77 add_parent(child);
82 } 78 }
83 79
84 ptrace_untrace(child); 80 if (child->state == TASK_TRACED)
81 ptrace_untrace(child);
85} 82}
86 83
87/* 84/*
@@ -151,12 +148,34 @@ int ptrace_may_attach(struct task_struct *task)
151int ptrace_attach(struct task_struct *task) 148int ptrace_attach(struct task_struct *task)
152{ 149{
153 int retval; 150 int retval;
154 task_lock(task); 151
155 retval = -EPERM; 152 retval = -EPERM;
156 if (task->pid <= 1) 153 if (task->pid <= 1)
157 goto bad; 154 goto out;
158 if (task->tgid == current->tgid) 155 if (task->tgid == current->tgid)
159 goto bad; 156 goto out;
157
158repeat:
159 /*
160 * Nasty, nasty.
161 *
162 * We want to hold both the task-lock and the
163 * tasklist_lock for writing at the same time.
164 * But that's against the rules (tasklist_lock
165 * is taken for reading by interrupts on other
166 * cpu's that may have task_lock).
167 */
168 task_lock(task);
169 local_irq_disable();
170 if (!write_trylock(&tasklist_lock)) {
171 local_irq_enable();
172 task_unlock(task);
173 do {
174 cpu_relax();
175 } while (!write_can_lock(&tasklist_lock));
176 goto repeat;
177 }
178
160 /* the same process cannot be attached many times */ 179 /* the same process cannot be attached many times */
161 if (task->ptrace & PT_PTRACED) 180 if (task->ptrace & PT_PTRACED)
162 goto bad; 181 goto bad;
@@ -169,17 +188,15 @@ int ptrace_attach(struct task_struct *task)
169 ? PT_ATTACHED : 0); 188 ? PT_ATTACHED : 0);
170 if (capable(CAP_SYS_PTRACE)) 189 if (capable(CAP_SYS_PTRACE))
171 task->ptrace |= PT_PTRACE_CAP; 190 task->ptrace |= PT_PTRACE_CAP;
172 task_unlock(task);
173 191
174 write_lock_irq(&tasklist_lock);
175 __ptrace_link(task, current); 192 __ptrace_link(task, current);
176 write_unlock_irq(&tasklist_lock);
177 193
178 force_sig_specific(SIGSTOP, task); 194 force_sig_specific(SIGSTOP, task);
179 return 0;
180 195
181bad: 196bad:
197 write_unlock_irq(&tasklist_lock);
182 task_unlock(task); 198 task_unlock(task);
199out:
183 return retval; 200 return retval;
184} 201}
185 202
@@ -420,21 +437,22 @@ int ptrace_request(struct task_struct *child, long request,
420 */ 437 */
421int ptrace_traceme(void) 438int ptrace_traceme(void)
422{ 439{
423 int ret; 440 int ret = -EPERM;
424 441
425 /* 442 /*
426 * Are we already being traced? 443 * Are we already being traced?
427 */ 444 */
428 if (current->ptrace & PT_PTRACED) 445 task_lock(current);
429 return -EPERM; 446 if (!(current->ptrace & PT_PTRACED)) {
430 ret = security_ptrace(current->parent, current); 447 ret = security_ptrace(current->parent, current);
431 if (ret) 448 /*
432 return -EPERM; 449 * Set the ptrace bit in the process ptrace flags.
433 /* 450 */
434 * Set the ptrace bit in the process ptrace flags. 451 if (!ret)
435 */ 452 current->ptrace |= PT_PTRACED;
436 current->ptrace |= PT_PTRACED; 453 }
437 return 0; 454 task_unlock(current);
455 return ret;
438} 456}
439 457
440/** 458/**
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 13458bbaa1be..2058f88c7bbb 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -479,12 +479,31 @@ static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
479 return 0; 479 return 0;
480} 480}
481 481
482/*
483 * Check to see if there is any immediate RCU-related work to be done
484 * by the current CPU, returning 1 if so. This function is part of the
485 * RCU implementation; it is -not- an exported member of the RCU API.
486 */
482int rcu_pending(int cpu) 487int rcu_pending(int cpu)
483{ 488{
484 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || 489 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
485 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); 490 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
486} 491}
487 492
493/*
494 * Check to see if any future RCU-related work will need to be done
495 * by the current CPU, even if none need be done immediately, returning
496 * 1 if so. This function is part of the RCU implementation; it is -not-
497 * an exported member of the RCU API.
498 */
499int rcu_needs_cpu(int cpu)
500{
501 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
502 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
503
504 return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
505}
506
488void rcu_check_callbacks(int cpu, int user) 507void rcu_check_callbacks(int cpu, int user)
489{ 508{
490 if (user || 509 if (user ||
@@ -520,7 +539,7 @@ static void __devinit rcu_online_cpu(int cpu)
520 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); 539 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
521} 540}
522 541
523static int __devinit rcu_cpu_notify(struct notifier_block *self, 542static int rcu_cpu_notify(struct notifier_block *self,
524 unsigned long action, void *hcpu) 543 unsigned long action, void *hcpu)
525{ 544{
526 long cpu = (long)hcpu; 545 long cpu = (long)hcpu;
@@ -537,7 +556,7 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self,
537 return NOTIFY_OK; 556 return NOTIFY_OK;
538} 557}
539 558
540static struct notifier_block __devinitdata rcu_nb = { 559static struct notifier_block rcu_nb = {
541 .notifier_call = rcu_cpu_notify, 560 .notifier_call = rcu_cpu_notify,
542}; 561};
543 562
diff --git a/kernel/sched.c b/kernel/sched.c
index 365f0b90b4de..c13f1bd2df7d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -665,55 +665,13 @@ static int effective_prio(task_t *p)
665} 665}
666 666
667/* 667/*
668 * We place interactive tasks back into the active array, if possible.
669 *
670 * To guarantee that this does not starve expired tasks we ignore the
671 * interactivity of a task if the first expired task had to wait more
672 * than a 'reasonable' amount of time. This deadline timeout is
673 * load-dependent, as the frequency of array switched decreases with
674 * increasing number of running tasks. We also ignore the interactivity
675 * if a better static_prio task has expired, and switch periodically
676 * regardless, to ensure that highly interactive tasks do not starve
677 * the less fortunate for unreasonably long periods.
678 */
679static inline int expired_starving(runqueue_t *rq)
680{
681 int limit;
682
683 /*
684 * Arrays were recently switched, all is well
685 */
686 if (!rq->expired_timestamp)
687 return 0;
688
689 limit = STARVATION_LIMIT * rq->nr_running;
690
691 /*
692 * It's time to switch arrays
693 */
694 if (jiffies - rq->expired_timestamp >= limit)
695 return 1;
696
697 /*
698 * There's a better selection in the expired array
699 */
700 if (rq->curr->static_prio > rq->best_expired_prio)
701 return 1;
702
703 /*
704 * All is well
705 */
706 return 0;
707}
708
709/*
710 * __activate_task - move a task to the runqueue. 668 * __activate_task - move a task to the runqueue.
711 */ 669 */
712static void __activate_task(task_t *p, runqueue_t *rq) 670static void __activate_task(task_t *p, runqueue_t *rq)
713{ 671{
714 prio_array_t *target = rq->active; 672 prio_array_t *target = rq->active;
715 673
716 if (unlikely(batch_task(p) || (expired_starving(rq) && !rt_task(p)))) 674 if (batch_task(p))
717 target = rq->expired; 675 target = rq->expired;
718 enqueue_task(p, target); 676 enqueue_task(p, target);
719 rq->nr_running++; 677 rq->nr_running++;
@@ -2532,6 +2490,22 @@ unsigned long long current_sched_time(const task_t *tsk)
2532} 2490}
2533 2491
2534/* 2492/*
2493 * We place interactive tasks back into the active array, if possible.
2494 *
2495 * To guarantee that this does not starve expired tasks we ignore the
2496 * interactivity of a task if the first expired task had to wait more
2497 * than a 'reasonable' amount of time. This deadline timeout is
2498 * load-dependent, as the frequency of array switched decreases with
2499 * increasing number of running tasks. We also ignore the interactivity
2500 * if a better static_prio task has expired:
2501 */
2502#define EXPIRED_STARVING(rq) \
2503 ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
2504 (jiffies - (rq)->expired_timestamp >= \
2505 STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
2506 ((rq)->curr->static_prio > (rq)->best_expired_prio))
2507
2508/*
2535 * Account user cpu time to a process. 2509 * Account user cpu time to a process.
2536 * @p: the process that the cpu time gets accounted to 2510 * @p: the process that the cpu time gets accounted to
2537 * @hardirq_offset: the offset to subtract from hardirq_count() 2511 * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2666,7 +2640,7 @@ void scheduler_tick(void)
2666 2640
2667 if (!rq->expired_timestamp) 2641 if (!rq->expired_timestamp)
2668 rq->expired_timestamp = jiffies; 2642 rq->expired_timestamp = jiffies;
2669 if (!TASK_INTERACTIVE(p) || expired_starving(rq)) { 2643 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
2670 enqueue_task(p, rq->expired); 2644 enqueue_task(p, rq->expired);
2671 if (p->static_prio < rq->best_expired_prio) 2645 if (p->static_prio < rq->best_expired_prio)
2672 rq->best_expired_prio = p->static_prio; 2646 rq->best_expired_prio = p->static_prio;
@@ -4814,7 +4788,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4814/* Register at highest priority so that task migration (migrate_all_tasks) 4788/* Register at highest priority so that task migration (migrate_all_tasks)
4815 * happens before everything else. 4789 * happens before everything else.
4816 */ 4790 */
4817static struct notifier_block __devinitdata migration_notifier = { 4791static struct notifier_block migration_notifier = {
4818 .notifier_call = migration_call, 4792 .notifier_call = migration_call,
4819 .priority = 10 4793 .priority = 10
4820}; 4794};
diff --git a/kernel/signal.c b/kernel/signal.c
index b14f895027c3..e5f8aea78ffe 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1754,9 +1754,9 @@ relock:
1754 /* Let the debugger run. */ 1754 /* Let the debugger run. */
1755 ptrace_stop(signr, signr, info); 1755 ptrace_stop(signr, signr, info);
1756 1756
1757 /* We're back. Did the debugger cancel the sig or group_exit? */ 1757 /* We're back. Did the debugger cancel the sig? */
1758 signr = current->exit_code; 1758 signr = current->exit_code;
1759 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT) 1759 if (signr == 0)
1760 continue; 1760 continue;
1761 1761
1762 current->exit_code = 0; 1762 current->exit_code = 0;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ec8fed42a86f..336f92d64e2e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
446} 446}
447#endif /* CONFIG_HOTPLUG_CPU */ 447#endif /* CONFIG_HOTPLUG_CPU */
448 448
449static int __devinit cpu_callback(struct notifier_block *nfb, 449static int cpu_callback(struct notifier_block *nfb,
450 unsigned long action, 450 unsigned long action,
451 void *hcpu) 451 void *hcpu)
452{ 452{
@@ -484,7 +484,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
484 return NOTIFY_OK; 484 return NOTIFY_OK;
485} 485}
486 486
487static struct notifier_block __devinitdata cpu_nfb = { 487static struct notifier_block cpu_nfb = {
488 .notifier_call = cpu_callback 488 .notifier_call = cpu_callback
489}; 489};
490 490
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index ced91e1ff564..14c7faf02909 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
104/* 104/*
105 * Create/destroy watchdog threads as CPUs come and go: 105 * Create/destroy watchdog threads as CPUs come and go:
106 */ 106 */
107static int __devinit 107static int
108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
109{ 109{
110 int hotcpu = (unsigned long)hcpu; 110 int hotcpu = (unsigned long)hcpu;
@@ -140,7 +140,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
140 return NOTIFY_OK; 140 return NOTIFY_OK;
141} 141}
142 142
143static struct notifier_block __devinitdata cpu_nfb = { 143static struct notifier_block cpu_nfb = {
144 .notifier_call = cpu_callback 144 .notifier_call = cpu_callback
145}; 145};
146 146
diff --git a/kernel/timer.c b/kernel/timer.c
index 883773788836..9e49deed468c 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -541,6 +541,22 @@ found:
541 } 541 }
542 spin_unlock(&base->lock); 542 spin_unlock(&base->lock);
543 543
544 /*
545 * It can happen that other CPUs service timer IRQs and increment
546 * jiffies, but we have not yet got a local timer tick to process
547 * the timer wheels. In that case, the expiry time can be before
548 * jiffies, but since the high-resolution timer here is relative to
549 * jiffies, the default expression when high-resolution timers are
550 * not active,
551 *
552 * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
553 *
554 * would falsely evaluate to true. If that is the case, just
555 * return jiffies so that we can immediately fire the local timer
556 */
557 if (time_before(expires, jiffies))
558 return jiffies;
559
544 if (time_before(hr_expires, expires)) 560 if (time_before(hr_expires, expires))
545 return hr_expires; 561 return hr_expires;
546 562
@@ -1314,7 +1330,7 @@ static void __devinit migrate_timers(int cpu)
1314} 1330}
1315#endif /* CONFIG_HOTPLUG_CPU */ 1331#endif /* CONFIG_HOTPLUG_CPU */
1316 1332
1317static int __devinit timer_cpu_notify(struct notifier_block *self, 1333static int timer_cpu_notify(struct notifier_block *self,
1318 unsigned long action, void *hcpu) 1334 unsigned long action, void *hcpu)
1319{ 1335{
1320 long cpu = (long)hcpu; 1336 long cpu = (long)hcpu;
@@ -1334,7 +1350,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
1334 return NOTIFY_OK; 1350 return NOTIFY_OK;
1335} 1351}
1336 1352
1337static struct notifier_block __devinitdata timers_nb = { 1353static struct notifier_block timers_nb = {
1338 .notifier_call = timer_cpu_notify, 1354 .notifier_call = timer_cpu_notify,
1339}; 1355};
1340 1356
diff --git a/kernel/uid16.c b/kernel/uid16.c
index aa25605027c8..187e2a423878 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -20,43 +20,67 @@
20 20
21asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group) 21asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group)
22{ 22{
23 return sys_chown(filename, low2highuid(user), low2highgid(group)); 23 long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
24 /* avoid REGPARM breakage on x86: */
25 prevent_tail_call(ret);
26 return ret;
24} 27}
25 28
26asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group) 29asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group)
27{ 30{
28 return sys_lchown(filename, low2highuid(user), low2highgid(group)); 31 long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
32 /* avoid REGPARM breakage on x86: */
33 prevent_tail_call(ret);
34 return ret;
29} 35}
30 36
31asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group) 37asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
32{ 38{
33 return sys_fchown(fd, low2highuid(user), low2highgid(group)); 39 long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
40 /* avoid REGPARM breakage on x86: */
41 prevent_tail_call(ret);
42 return ret;
34} 43}
35 44
36asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid) 45asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
37{ 46{
38 return sys_setregid(low2highgid(rgid), low2highgid(egid)); 47 long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
48 /* avoid REGPARM breakage on x86: */
49 prevent_tail_call(ret);
50 return ret;
39} 51}
40 52
41asmlinkage long sys_setgid16(old_gid_t gid) 53asmlinkage long sys_setgid16(old_gid_t gid)
42{ 54{
43 return sys_setgid(low2highgid(gid)); 55 long ret = sys_setgid(low2highgid(gid));
56 /* avoid REGPARM breakage on x86: */
57 prevent_tail_call(ret);
58 return ret;
44} 59}
45 60
46asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid) 61asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
47{ 62{
48 return sys_setreuid(low2highuid(ruid), low2highuid(euid)); 63 long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
64 /* avoid REGPARM breakage on x86: */
65 prevent_tail_call(ret);
66 return ret;
49} 67}
50 68
51asmlinkage long sys_setuid16(old_uid_t uid) 69asmlinkage long sys_setuid16(old_uid_t uid)
52{ 70{
53 return sys_setuid(low2highuid(uid)); 71 long ret = sys_setuid(low2highuid(uid));
72 /* avoid REGPARM breakage on x86: */
73 prevent_tail_call(ret);
74 return ret;
54} 75}
55 76
56asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) 77asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
57{ 78{
58 return sys_setresuid(low2highuid(ruid), low2highuid(euid), 79 long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
59 low2highuid(suid)); 80 low2highuid(suid));
81 /* avoid REGPARM breakage on x86: */
82 prevent_tail_call(ret);
83 return ret;
60} 84}
61 85
62asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) 86asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid)
@@ -72,8 +96,11 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid,
72 96
73asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) 97asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
74{ 98{
75 return sys_setresgid(low2highgid(rgid), low2highgid(egid), 99 long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
76 low2highgid(sgid)); 100 low2highgid(sgid));
101 /* avoid REGPARM breakage on x86: */
102 prevent_tail_call(ret);
103 return ret;
77} 104}
78 105
79asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) 106asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid)
@@ -89,12 +116,18 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid,
89 116
90asmlinkage long sys_setfsuid16(old_uid_t uid) 117asmlinkage long sys_setfsuid16(old_uid_t uid)
91{ 118{
92 return sys_setfsuid(low2highuid(uid)); 119 long ret = sys_setfsuid(low2highuid(uid));
120 /* avoid REGPARM breakage on x86: */
121 prevent_tail_call(ret);
122 return ret;
93} 123}
94 124
95asmlinkage long sys_setfsgid16(old_gid_t gid) 125asmlinkage long sys_setfsgid16(old_gid_t gid)
96{ 126{
97 return sys_setfsgid(low2highgid(gid)); 127 long ret = sys_setfsgid(low2highgid(gid));
128 /* avoid REGPARM breakage on x86: */
129 prevent_tail_call(ret);
130 return ret;
98} 131}
99 132
100static int groups16_to_user(old_gid_t __user *grouplist, 133static int groups16_to_user(old_gid_t __user *grouplist,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e9e464a90376..880fb415a8f6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -547,7 +547,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
547} 547}
548 548
549/* We're holding the cpucontrol mutex here */ 549/* We're holding the cpucontrol mutex here */
550static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 550static int workqueue_cpu_callback(struct notifier_block *nfb,
551 unsigned long action, 551 unsigned long action,
552 void *hcpu) 552 void *hcpu)
553{ 553{