diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 6 | ||||
-rw-r--r-- | kernel/audit.h | 1 | ||||
-rw-r--r-- | kernel/auditfilter.c | 37 | ||||
-rw-r--r-- | kernel/auditsc.c | 51 | ||||
-rw-r--r-- | kernel/delayacct.c | 16 | ||||
-rw-r--r-- | kernel/exit.c | 3 | ||||
-rw-r--r-- | kernel/fork.c | 6 | ||||
-rw-r--r-- | kernel/futex.c | 84 | ||||
-rw-r--r-- | kernel/irq/handle.c | 5 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 2 | ||||
-rw-r--r-- | kernel/panic.c | 2 | ||||
-rw-r--r-- | kernel/power/Kconfig | 6 | ||||
-rw-r--r-- | kernel/rcupdate.c | 6 | ||||
-rw-r--r-- | kernel/spinlock.c | 2 |
14 files changed, 124 insertions, 103 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 0a36091ed712..963fd15c9621 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -1028,6 +1028,9 @@ void audit_log_hex(struct audit_buffer *ab, const unsigned char *buf, | |||
1028 | struct sk_buff *skb; | 1028 | struct sk_buff *skb; |
1029 | static const unsigned char *hex = "0123456789ABCDEF"; | 1029 | static const unsigned char *hex = "0123456789ABCDEF"; |
1030 | 1030 | ||
1031 | if (!ab) | ||
1032 | return; | ||
1033 | |||
1031 | BUG_ON(!ab->skb); | 1034 | BUG_ON(!ab->skb); |
1032 | skb = ab->skb; | 1035 | skb = ab->skb; |
1033 | avail = skb_tailroom(skb); | 1036 | avail = skb_tailroom(skb); |
@@ -1060,6 +1063,9 @@ static void audit_log_n_string(struct audit_buffer *ab, size_t slen, | |||
1060 | unsigned char *ptr; | 1063 | unsigned char *ptr; |
1061 | struct sk_buff *skb; | 1064 | struct sk_buff *skb; |
1062 | 1065 | ||
1066 | if (!ab) | ||
1067 | return; | ||
1068 | |||
1063 | BUG_ON(!ab->skb); | 1069 | BUG_ON(!ab->skb); |
1064 | skb = ab->skb; | 1070 | skb = ab->skb; |
1065 | avail = skb_tailroom(skb); | 1071 | avail = skb_tailroom(skb); |
diff --git a/kernel/audit.h b/kernel/audit.h index 6aa33b848cf2..a3370232a390 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
@@ -104,6 +104,7 @@ static inline int audit_hash_ino(u32 ino) | |||
104 | return (ino & (AUDIT_INODE_BUCKETS-1)); | 104 | return (ino & (AUDIT_INODE_BUCKETS-1)); |
105 | } | 105 | } |
106 | 106 | ||
107 | extern int audit_match_class(int class, unsigned syscall); | ||
107 | extern int audit_comparator(const u32 left, const u32 op, const u32 right); | 108 | extern int audit_comparator(const u32 left, const u32 op, const u32 right); |
108 | extern int audit_compare_dname_path(const char *dname, const char *path, | 109 | extern int audit_compare_dname_path(const char *dname, const char *path, |
109 | int *dirlen); | 110 | int *dirlen); |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 6a9a5c5a4e7d..a44879b0c72f 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -302,6 +302,15 @@ int __init audit_register_class(int class, unsigned *list) | |||
302 | return 0; | 302 | return 0; |
303 | } | 303 | } |
304 | 304 | ||
305 | int audit_match_class(int class, unsigned syscall) | ||
306 | { | ||
307 | if (unlikely(syscall >= AUDIT_BITMASK_SIZE * sizeof(__u32))) | ||
308 | return 0; | ||
309 | if (unlikely(class >= AUDIT_SYSCALL_CLASSES || !classes[class])) | ||
310 | return 0; | ||
311 | return classes[class][AUDIT_WORD(syscall)] & AUDIT_BIT(syscall); | ||
312 | } | ||
313 | |||
305 | /* Common user-space to kernel rule translation. */ | 314 | /* Common user-space to kernel rule translation. */ |
306 | static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) | 315 | static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) |
307 | { | 316 | { |
@@ -404,6 +413,7 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) | |||
404 | case AUDIT_PERS: | 413 | case AUDIT_PERS: |
405 | case AUDIT_ARCH: | 414 | case AUDIT_ARCH: |
406 | case AUDIT_MSGTYPE: | 415 | case AUDIT_MSGTYPE: |
416 | case AUDIT_PPID: | ||
407 | case AUDIT_DEVMAJOR: | 417 | case AUDIT_DEVMAJOR: |
408 | case AUDIT_DEVMINOR: | 418 | case AUDIT_DEVMINOR: |
409 | case AUDIT_EXIT: | 419 | case AUDIT_EXIT: |
@@ -413,6 +423,10 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) | |||
413 | case AUDIT_ARG2: | 423 | case AUDIT_ARG2: |
414 | case AUDIT_ARG3: | 424 | case AUDIT_ARG3: |
415 | break; | 425 | break; |
426 | case AUDIT_PERM: | ||
427 | if (f->val & ~15) | ||
428 | goto exit_free; | ||
429 | break; | ||
416 | case AUDIT_INODE: | 430 | case AUDIT_INODE: |
417 | err = audit_to_inode(&entry->rule, f); | 431 | err = audit_to_inode(&entry->rule, f); |
418 | if (err) | 432 | if (err) |
@@ -567,6 +581,10 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
567 | entry->rule.buflen += f->val; | 581 | entry->rule.buflen += f->val; |
568 | entry->rule.filterkey = str; | 582 | entry->rule.filterkey = str; |
569 | break; | 583 | break; |
584 | case AUDIT_PERM: | ||
585 | if (f->val & ~15) | ||
586 | goto exit_free; | ||
587 | break; | ||
570 | default: | 588 | default: |
571 | goto exit_free; | 589 | goto exit_free; |
572 | } | 590 | } |
@@ -913,7 +931,7 @@ static void audit_update_watch(struct audit_parent *parent, | |||
913 | } | 931 | } |
914 | 932 | ||
915 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | 933 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
916 | audit_log_format(ab, "audit updated rules specifying watch="); | 934 | audit_log_format(ab, "audit updated rules specifying path="); |
917 | audit_log_untrustedstring(ab, owatch->path); | 935 | audit_log_untrustedstring(ab, owatch->path); |
918 | audit_log_format(ab, " with dev=%u ino=%lu\n", dev, ino); | 936 | audit_log_format(ab, " with dev=%u ino=%lu\n", dev, ino); |
919 | audit_log_end(ab); | 937 | audit_log_end(ab); |
@@ -936,19 +954,28 @@ static void audit_remove_parent_watches(struct audit_parent *parent) | |||
936 | struct audit_watch *w, *nextw; | 954 | struct audit_watch *w, *nextw; |
937 | struct audit_krule *r, *nextr; | 955 | struct audit_krule *r, *nextr; |
938 | struct audit_entry *e; | 956 | struct audit_entry *e; |
957 | struct audit_buffer *ab; | ||
939 | 958 | ||
940 | mutex_lock(&audit_filter_mutex); | 959 | mutex_lock(&audit_filter_mutex); |
941 | parent->flags |= AUDIT_PARENT_INVALID; | 960 | parent->flags |= AUDIT_PARENT_INVALID; |
942 | list_for_each_entry_safe(w, nextw, &parent->watches, wlist) { | 961 | list_for_each_entry_safe(w, nextw, &parent->watches, wlist) { |
943 | list_for_each_entry_safe(r, nextr, &w->rules, rlist) { | 962 | list_for_each_entry_safe(r, nextr, &w->rules, rlist) { |
944 | e = container_of(r, struct audit_entry, rule); | 963 | e = container_of(r, struct audit_entry, rule); |
964 | |||
965 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | ||
966 | audit_log_format(ab, "audit implicitly removed rule path="); | ||
967 | audit_log_untrustedstring(ab, w->path); | ||
968 | if (r->filterkey) { | ||
969 | audit_log_format(ab, " key="); | ||
970 | audit_log_untrustedstring(ab, r->filterkey); | ||
971 | } else | ||
972 | audit_log_format(ab, " key=(null)"); | ||
973 | audit_log_format(ab, " list=%d", r->listnr); | ||
974 | audit_log_end(ab); | ||
975 | |||
945 | list_del(&r->rlist); | 976 | list_del(&r->rlist); |
946 | list_del_rcu(&e->list); | 977 | list_del_rcu(&e->list); |
947 | call_rcu(&e->rcu, audit_free_rule_rcu); | 978 | call_rcu(&e->rcu, audit_free_rule_rcu); |
948 | |||
949 | audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, | ||
950 | "audit implicitly removed rule from list=%d\n", | ||
951 | AUDIT_FILTER_EXIT); | ||
952 | } | 979 | } |
953 | audit_remove_watch(w); | 980 | audit_remove_watch(w); |
954 | } | 981 | } |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index efc1b74bebf3..1bd8827a0102 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -209,6 +209,54 @@ struct audit_context { | |||
209 | #endif | 209 | #endif |
210 | }; | 210 | }; |
211 | 211 | ||
212 | #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) | ||
213 | static inline int open_arg(int flags, int mask) | ||
214 | { | ||
215 | int n = ACC_MODE(flags); | ||
216 | if (flags & (O_TRUNC | O_CREAT)) | ||
217 | n |= AUDIT_PERM_WRITE; | ||
218 | return n & mask; | ||
219 | } | ||
220 | |||
221 | static int audit_match_perm(struct audit_context *ctx, int mask) | ||
222 | { | ||
223 | unsigned n = ctx->major; | ||
224 | switch (audit_classify_syscall(ctx->arch, n)) { | ||
225 | case 0: /* native */ | ||
226 | if ((mask & AUDIT_PERM_WRITE) && | ||
227 | audit_match_class(AUDIT_CLASS_WRITE, n)) | ||
228 | return 1; | ||
229 | if ((mask & AUDIT_PERM_READ) && | ||
230 | audit_match_class(AUDIT_CLASS_READ, n)) | ||
231 | return 1; | ||
232 | if ((mask & AUDIT_PERM_ATTR) && | ||
233 | audit_match_class(AUDIT_CLASS_CHATTR, n)) | ||
234 | return 1; | ||
235 | return 0; | ||
236 | case 1: /* 32bit on biarch */ | ||
237 | if ((mask & AUDIT_PERM_WRITE) && | ||
238 | audit_match_class(AUDIT_CLASS_WRITE_32, n)) | ||
239 | return 1; | ||
240 | if ((mask & AUDIT_PERM_READ) && | ||
241 | audit_match_class(AUDIT_CLASS_READ_32, n)) | ||
242 | return 1; | ||
243 | if ((mask & AUDIT_PERM_ATTR) && | ||
244 | audit_match_class(AUDIT_CLASS_CHATTR_32, n)) | ||
245 | return 1; | ||
246 | return 0; | ||
247 | case 2: /* open */ | ||
248 | return mask & ACC_MODE(ctx->argv[1]); | ||
249 | case 3: /* openat */ | ||
250 | return mask & ACC_MODE(ctx->argv[2]); | ||
251 | case 4: /* socketcall */ | ||
252 | return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND); | ||
253 | case 5: /* execve */ | ||
254 | return mask & AUDIT_PERM_EXEC; | ||
255 | default: | ||
256 | return 0; | ||
257 | } | ||
258 | } | ||
259 | |||
212 | /* Determine if any context name data matches a rule's watch data */ | 260 | /* Determine if any context name data matches a rule's watch data */ |
213 | /* Compare a task_struct with an audit_rule. Return 1 on match, 0 | 261 | /* Compare a task_struct with an audit_rule. Return 1 on match, 0 |
214 | * otherwise. */ | 262 | * otherwise. */ |
@@ -397,6 +445,9 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
397 | /* ignore this field for filtering */ | 445 | /* ignore this field for filtering */ |
398 | result = 1; | 446 | result = 1; |
399 | break; | 447 | break; |
448 | case AUDIT_PERM: | ||
449 | result = audit_match_perm(ctx, f->val); | ||
450 | break; | ||
400 | } | 451 | } |
401 | 452 | ||
402 | if (!result) | 453 | if (!result) |
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 57ca3730205d..36752f124c6a 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -41,24 +41,11 @@ void delayacct_init(void) | |||
41 | 41 | ||
42 | void __delayacct_tsk_init(struct task_struct *tsk) | 42 | void __delayacct_tsk_init(struct task_struct *tsk) |
43 | { | 43 | { |
44 | spin_lock_init(&tsk->delays_lock); | ||
45 | /* No need to acquire tsk->delays_lock for allocation here unless | ||
46 | __delayacct_tsk_init called after tsk is attached to tasklist | ||
47 | */ | ||
48 | tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL); | 44 | tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL); |
49 | if (tsk->delays) | 45 | if (tsk->delays) |
50 | spin_lock_init(&tsk->delays->lock); | 46 | spin_lock_init(&tsk->delays->lock); |
51 | } | 47 | } |
52 | 48 | ||
53 | void __delayacct_tsk_exit(struct task_struct *tsk) | ||
54 | { | ||
55 | struct task_delay_info *delays = tsk->delays; | ||
56 | spin_lock(&tsk->delays_lock); | ||
57 | tsk->delays = NULL; | ||
58 | spin_unlock(&tsk->delays_lock); | ||
59 | kmem_cache_free(delayacct_cache, delays); | ||
60 | } | ||
61 | |||
62 | /* | 49 | /* |
63 | * Start accounting for a delay statistic using | 50 | * Start accounting for a delay statistic using |
64 | * its starting timestamp (@start) | 51 | * its starting timestamp (@start) |
@@ -118,8 +105,6 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
118 | struct timespec ts; | 105 | struct timespec ts; |
119 | unsigned long t1,t2,t3; | 106 | unsigned long t1,t2,t3; |
120 | 107 | ||
121 | spin_lock(&tsk->delays_lock); | ||
122 | |||
123 | /* Though tsk->delays accessed later, early exit avoids | 108 | /* Though tsk->delays accessed later, early exit avoids |
124 | * unnecessary returning of other data | 109 | * unnecessary returning of other data |
125 | */ | 110 | */ |
@@ -161,7 +146,6 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
161 | spin_unlock(&tsk->delays->lock); | 146 | spin_unlock(&tsk->delays->lock); |
162 | 147 | ||
163 | done: | 148 | done: |
164 | spin_unlock(&tsk->delays_lock); | ||
165 | return 0; | 149 | return 0; |
166 | } | 150 | } |
167 | 151 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index dba194a8d416..d891883420f7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -908,7 +908,6 @@ fastcall NORET_TYPE void do_exit(long code) | |||
908 | audit_free(tsk); | 908 | audit_free(tsk); |
909 | taskstats_exit_send(tsk, tidstats, group_dead, mycpu); | 909 | taskstats_exit_send(tsk, tidstats, group_dead, mycpu); |
910 | taskstats_exit_free(tidstats); | 910 | taskstats_exit_free(tidstats); |
911 | delayacct_tsk_exit(tsk); | ||
912 | 911 | ||
913 | exit_mm(tsk); | 912 | exit_mm(tsk); |
914 | 913 | ||
@@ -1054,7 +1053,7 @@ static int eligible_child(pid_t pid, int options, struct task_struct *p) | |||
1054 | * Do not consider thread group leaders that are | 1053 | * Do not consider thread group leaders that are |
1055 | * in a non-empty thread group: | 1054 | * in a non-empty thread group: |
1056 | */ | 1055 | */ |
1057 | if (current->tgid != p->tgid && delay_group_leader(p)) | 1056 | if (delay_group_leader(p)) |
1058 | return 2; | 1057 | return 2; |
1059 | 1058 | ||
1060 | if (security_task_wait(p)) | 1059 | if (security_task_wait(p)) |
diff --git a/kernel/fork.c b/kernel/fork.c index aa36c43783cc..f9b014e3e700 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -117,6 +117,7 @@ void __put_task_struct(struct task_struct *tsk) | |||
117 | security_task_free(tsk); | 117 | security_task_free(tsk); |
118 | free_uid(tsk->user); | 118 | free_uid(tsk->user); |
119 | put_group_info(tsk->group_info); | 119 | put_group_info(tsk->group_info); |
120 | delayacct_tsk_free(tsk); | ||
120 | 121 | ||
121 | if (!profile_handoff_task(tsk)) | 122 | if (!profile_handoff_task(tsk)) |
122 | free_task(tsk); | 123 | free_task(tsk); |
@@ -1011,7 +1012,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1011 | retval = -EFAULT; | 1012 | retval = -EFAULT; |
1012 | if (clone_flags & CLONE_PARENT_SETTID) | 1013 | if (clone_flags & CLONE_PARENT_SETTID) |
1013 | if (put_user(p->pid, parent_tidptr)) | 1014 | if (put_user(p->pid, parent_tidptr)) |
1014 | goto bad_fork_cleanup; | 1015 | goto bad_fork_cleanup_delays_binfmt; |
1015 | 1016 | ||
1016 | INIT_LIST_HEAD(&p->children); | 1017 | INIT_LIST_HEAD(&p->children); |
1017 | INIT_LIST_HEAD(&p->sibling); | 1018 | INIT_LIST_HEAD(&p->sibling); |
@@ -1277,7 +1278,8 @@ bad_fork_cleanup_policy: | |||
1277 | bad_fork_cleanup_cpuset: | 1278 | bad_fork_cleanup_cpuset: |
1278 | #endif | 1279 | #endif |
1279 | cpuset_exit(p); | 1280 | cpuset_exit(p); |
1280 | bad_fork_cleanup: | 1281 | bad_fork_cleanup_delays_binfmt: |
1282 | delayacct_tsk_free(p); | ||
1281 | if (p->binfmt) | 1283 | if (p->binfmt) |
1282 | module_put(p->binfmt->module); | 1284 | module_put(p->binfmt->module); |
1283 | bad_fork_cleanup_put_domain: | 1285 | bad_fork_cleanup_put_domain: |
diff --git a/kernel/futex.c b/kernel/futex.c index b9b8aea5389e..9d260e838cff 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1120,9 +1120,10 @@ static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time) | |||
1120 | * if there are waiters then it will block, it does PI, etc. (Due to | 1120 | * if there are waiters then it will block, it does PI, etc. (Due to |
1121 | * races the kernel might see a 0 value of the futex too.) | 1121 | * races the kernel might see a 0 value of the futex too.) |
1122 | */ | 1122 | */ |
1123 | static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | 1123 | static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, |
1124 | struct hrtimer_sleeper *to) | 1124 | long nsec, int trylock) |
1125 | { | 1125 | { |
1126 | struct hrtimer_sleeper timeout, *to = NULL; | ||
1126 | struct task_struct *curr = current; | 1127 | struct task_struct *curr = current; |
1127 | struct futex_hash_bucket *hb; | 1128 | struct futex_hash_bucket *hb; |
1128 | u32 uval, newval, curval; | 1129 | u32 uval, newval, curval; |
@@ -1132,6 +1133,13 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | |||
1132 | if (refill_pi_state_cache()) | 1133 | if (refill_pi_state_cache()) |
1133 | return -ENOMEM; | 1134 | return -ENOMEM; |
1134 | 1135 | ||
1136 | if (sec != MAX_SCHEDULE_TIMEOUT) { | ||
1137 | to = &timeout; | ||
1138 | hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS); | ||
1139 | hrtimer_init_sleeper(to, current); | ||
1140 | to->timer.expires = ktime_set(sec, nsec); | ||
1141 | } | ||
1142 | |||
1135 | q.pi_state = NULL; | 1143 | q.pi_state = NULL; |
1136 | retry: | 1144 | retry: |
1137 | down_read(&curr->mm->mmap_sem); | 1145 | down_read(&curr->mm->mmap_sem); |
@@ -1307,7 +1315,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | |||
1307 | if (!detect && ret == -EDEADLK && 0) | 1315 | if (!detect && ret == -EDEADLK && 0) |
1308 | force_sig(SIGKILL, current); | 1316 | force_sig(SIGKILL, current); |
1309 | 1317 | ||
1310 | return ret; | 1318 | return ret != -EINTR ? ret : -ERESTARTNOINTR; |
1311 | 1319 | ||
1312 | out_unlock_release_sem: | 1320 | out_unlock_release_sem: |
1313 | queue_unlock(&q, hb); | 1321 | queue_unlock(&q, hb); |
@@ -1342,76 +1350,6 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | |||
1342 | } | 1350 | } |
1343 | 1351 | ||
1344 | /* | 1352 | /* |
1345 | * Restart handler | ||
1346 | */ | ||
1347 | static long futex_lock_pi_restart(struct restart_block *restart) | ||
1348 | { | ||
1349 | struct hrtimer_sleeper timeout, *to = NULL; | ||
1350 | int ret; | ||
1351 | |||
1352 | restart->fn = do_no_restart_syscall; | ||
1353 | |||
1354 | if (restart->arg2 || restart->arg3) { | ||
1355 | to = &timeout; | ||
1356 | hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS); | ||
1357 | hrtimer_init_sleeper(to, current); | ||
1358 | to->timer.expires.tv64 = ((u64)restart->arg1 << 32) | | ||
1359 | (u64) restart->arg0; | ||
1360 | } | ||
1361 | |||
1362 | pr_debug("lock_pi restart: %p, %d (%d)\n", | ||
1363 | (u32 __user *)restart->arg0, current->pid); | ||
1364 | |||
1365 | ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1, | ||
1366 | 0, to); | ||
1367 | |||
1368 | if (ret != -EINTR) | ||
1369 | return ret; | ||
1370 | |||
1371 | restart->fn = futex_lock_pi_restart; | ||
1372 | |||
1373 | /* The other values are filled in */ | ||
1374 | return -ERESTART_RESTARTBLOCK; | ||
1375 | } | ||
1376 | |||
1377 | /* | ||
1378 | * Called from the syscall entry below. | ||
1379 | */ | ||
1380 | static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, | ||
1381 | long nsec, int trylock) | ||
1382 | { | ||
1383 | struct hrtimer_sleeper timeout, *to = NULL; | ||
1384 | struct restart_block *restart; | ||
1385 | int ret; | ||
1386 | |||
1387 | if (sec != MAX_SCHEDULE_TIMEOUT) { | ||
1388 | to = &timeout; | ||
1389 | hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS); | ||
1390 | hrtimer_init_sleeper(to, current); | ||
1391 | to->timer.expires = ktime_set(sec, nsec); | ||
1392 | } | ||
1393 | |||
1394 | ret = do_futex_lock_pi(uaddr, detect, trylock, to); | ||
1395 | |||
1396 | if (ret != -EINTR) | ||
1397 | return ret; | ||
1398 | |||
1399 | pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid); | ||
1400 | |||
1401 | restart = ¤t_thread_info()->restart_block; | ||
1402 | restart->fn = futex_lock_pi_restart; | ||
1403 | restart->arg0 = (unsigned long) uaddr; | ||
1404 | restart->arg1 = detect; | ||
1405 | if (to) { | ||
1406 | restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF; | ||
1407 | restart->arg3 = to->timer.expires.tv64 >> 32; | ||
1408 | } else | ||
1409 | restart->arg2 = restart->arg3 = 0; | ||
1410 | |||
1411 | return -ERESTART_RESTARTBLOCK; | ||
1412 | } | ||
1413 | |||
1414 | /* | ||
1415 | * Userspace attempted a TID -> 0 atomic transition, and failed. | 1353 | * Userspace attempted a TID -> 0 atomic transition, and failed. |
1416 | * This is the in-kernel slowpath: we look up the PI state (if any), | 1354 | * This is the in-kernel slowpath: we look up the PI state (if any), |
1417 | * and do the rt-mutex unlock. | 1355 | * and do the rt-mutex unlock. |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index fc4e906aedbd..48a53f68af96 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -20,6 +20,11 @@ | |||
20 | 20 | ||
21 | /** | 21 | /** |
22 | * handle_bad_irq - handle spurious and unhandled irqs | 22 | * handle_bad_irq - handle spurious and unhandled irqs |
23 | * @irq: the interrupt number | ||
24 | * @desc: description of the interrupt | ||
25 | * @regs: pointer to a register structure | ||
26 | * | ||
27 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | ||
23 | */ | 28 | */ |
24 | void fastcall | 29 | void fastcall |
25 | handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) | 30 | handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs) |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 0d355f24fe04..eab043c83bb2 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -27,7 +27,7 @@ | |||
27 | * Stack-trace: tightly packed array of stack backtrace | 27 | * Stack-trace: tightly packed array of stack backtrace |
28 | * addresses. Protected by the hash_lock. | 28 | * addresses. Protected by the hash_lock. |
29 | */ | 29 | */ |
30 | #define MAX_STACK_TRACE_ENTRIES 131072UL | 30 | #define MAX_STACK_TRACE_ENTRIES 262144UL |
31 | 31 | ||
32 | extern struct list_head all_lock_classes; | 32 | extern struct list_head all_lock_classes; |
33 | 33 | ||
diff --git a/kernel/panic.c b/kernel/panic.c index 9b8dcfd1ca93..8010b9b17aca 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -173,7 +173,7 @@ const char *print_tainted(void) | |||
173 | 173 | ||
174 | void add_taint(unsigned flag) | 174 | void add_taint(unsigned flag) |
175 | { | 175 | { |
176 | debug_locks_off(); /* can't trust the integrity of the kernel anymore */ | 176 | debug_locks = 0; /* can't trust the integrity of the kernel anymore */ |
177 | tainted |= flag; | 177 | tainted |= flag; |
178 | } | 178 | } |
179 | EXPORT_SYMBOL(add_taint); | 179 | EXPORT_SYMBOL(add_taint); |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index ae44a70aae8a..619ecabf7c58 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -56,7 +56,7 @@ config PM_TRACE | |||
56 | 56 | ||
57 | config SOFTWARE_SUSPEND | 57 | config SOFTWARE_SUSPEND |
58 | bool "Software Suspend" | 58 | bool "Software Suspend" |
59 | depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) | 59 | depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP) && !X86_PAE) || ((FRV || PPC32) && !SMP)) |
60 | ---help--- | 60 | ---help--- |
61 | Enable the possibility of suspending the machine. | 61 | Enable the possibility of suspending the machine. |
62 | It doesn't need ACPI or APM. | 62 | It doesn't need ACPI or APM. |
@@ -78,6 +78,10 @@ config SOFTWARE_SUSPEND | |||
78 | 78 | ||
79 | For more information take a look at <file:Documentation/power/swsusp.txt>. | 79 | For more information take a look at <file:Documentation/power/swsusp.txt>. |
80 | 80 | ||
81 | (For now, swsusp is incompatible with PAE aka HIGHMEM_64G on i386. | ||
82 | we need identity mapping for resume to work, and that is trivial | ||
83 | to get with 4MB pages, but less than trivial on PAE). | ||
84 | |||
81 | config PM_STD_PARTITION | 85 | config PM_STD_PARTITION |
82 | string "Default resume partition" | 86 | string "Default resume partition" |
83 | depends on SOFTWARE_SUSPEND | 87 | depends on SOFTWARE_SUSPEND |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 436ab35f6fa7..523e46483b99 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -241,12 +241,16 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
241 | next = rdp->donelist = list->next; | 241 | next = rdp->donelist = list->next; |
242 | list->func(list); | 242 | list->func(list); |
243 | list = next; | 243 | list = next; |
244 | rdp->qlen--; | ||
245 | if (++count >= rdp->blimit) | 244 | if (++count >= rdp->blimit) |
246 | break; | 245 | break; |
247 | } | 246 | } |
247 | |||
248 | local_irq_disable(); | ||
249 | rdp->qlen -= count; | ||
250 | local_irq_enable(); | ||
248 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) | 251 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) |
249 | rdp->blimit = blimit; | 252 | rdp->blimit = blimit; |
253 | |||
250 | if (!rdp->donelist) | 254 | if (!rdp->donelist) |
251 | rdp->donetail = &rdp->donelist; | 255 | rdp->donetail = &rdp->donelist; |
252 | else | 256 | else |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index bfd6ad9c0330..fb524b009eef 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(_write_trylock); | |||
72 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | 72 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
73 | */ | 73 | */ |
74 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ | 74 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ |
75 | defined(CONFIG_PROVE_LOCKING) | 75 | defined(CONFIG_DEBUG_LOCK_ALLOC) |
76 | 76 | ||
77 | void __lockfunc _read_lock(rwlock_t *lock) | 77 | void __lockfunc _read_lock(rwlock_t *lock) |
78 | { | 78 | { |