aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c13
-rw-r--r--kernel/auditfilter.c10
-rw-r--r--kernel/auditsc.c11
-rw-r--r--kernel/cgroup.c39
-rw-r--r--kernel/cpuset.c71
-rw-r--r--kernel/dma-coherent.c5
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kgdb.c94
-rw-r--r--kernel/marker.c12
-rw-r--r--kernel/mutex.c1
-rw-r--r--kernel/pm_qos_params.c16
-rw-r--r--kernel/printk.c8
-rw-r--r--kernel/relay.c12
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched.c44
-rw-r--r--kernel/semaphore.c4
-rw-r--r--kernel/workqueue.c13
18 files changed, 239 insertions, 125 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index e092f1c0ce30..4414e93d8750 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -707,12 +707,14 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
707 if (status_get->mask & AUDIT_STATUS_ENABLED) { 707 if (status_get->mask & AUDIT_STATUS_ENABLED) {
708 err = audit_set_enabled(status_get->enabled, 708 err = audit_set_enabled(status_get->enabled,
709 loginuid, sessionid, sid); 709 loginuid, sessionid, sid);
710 if (err < 0) return err; 710 if (err < 0)
711 return err;
711 } 712 }
712 if (status_get->mask & AUDIT_STATUS_FAILURE) { 713 if (status_get->mask & AUDIT_STATUS_FAILURE) {
713 err = audit_set_failure(status_get->failure, 714 err = audit_set_failure(status_get->failure,
714 loginuid, sessionid, sid); 715 loginuid, sessionid, sid);
715 if (err < 0) return err; 716 if (err < 0)
717 return err;
716 } 718 }
717 if (status_get->mask & AUDIT_STATUS_PID) { 719 if (status_get->mask & AUDIT_STATUS_PID) {
718 int new_pid = status_get->pid; 720 int new_pid = status_get->pid;
@@ -725,9 +727,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
725 audit_pid = new_pid; 727 audit_pid = new_pid;
726 audit_nlk_pid = NETLINK_CB(skb).pid; 728 audit_nlk_pid = NETLINK_CB(skb).pid;
727 } 729 }
728 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) 730 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
729 err = audit_set_rate_limit(status_get->rate_limit, 731 err = audit_set_rate_limit(status_get->rate_limit,
730 loginuid, sessionid, sid); 732 loginuid, sessionid, sid);
733 if (err < 0)
734 return err;
735 }
731 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) 736 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
732 err = audit_set_backlog_limit(status_get->backlog_limit, 737 err = audit_set_backlog_limit(status_get->backlog_limit,
733 loginuid, sessionid, sid); 738 loginuid, sessionid, sid);
@@ -1366,7 +1371,7 @@ int audit_string_contains_control(const char *string, size_t len)
1366{ 1371{
1367 const unsigned char *p; 1372 const unsigned char *p;
1368 for (p = string; p < (const unsigned char *)string + len && *p; p++) { 1373 for (p = string; p < (const unsigned char *)string + len && *p; p++) {
1369 if (*p == '"' || *p < 0x21 || *p > 0x7f) 1374 if (*p == '"' || *p < 0x21 || *p > 0x7e)
1370 return 1; 1375 return 1;
1371 } 1376 }
1372 return 0; 1377 return 0;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 98c50cc671bb..b7d354e2b0ef 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1022,8 +1022,11 @@ static void audit_update_watch(struct audit_parent *parent,
1022 struct audit_buffer *ab; 1022 struct audit_buffer *ab;
1023 ab = audit_log_start(NULL, GFP_KERNEL, 1023 ab = audit_log_start(NULL, GFP_KERNEL,
1024 AUDIT_CONFIG_CHANGE); 1024 AUDIT_CONFIG_CHANGE);
1025 audit_log_format(ab, "auid=%u ses=%u",
1026 audit_get_loginuid(current),
1027 audit_get_sessionid(current));
1025 audit_log_format(ab, 1028 audit_log_format(ab,
1026 "op=updated rules specifying path="); 1029 " op=updated rules specifying path=");
1027 audit_log_untrustedstring(ab, owatch->path); 1030 audit_log_untrustedstring(ab, owatch->path);
1028 audit_log_format(ab, " with dev=%u ino=%lu\n", 1031 audit_log_format(ab, " with dev=%u ino=%lu\n",
1029 dev, ino); 1032 dev, ino);
@@ -1058,7 +1061,10 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
1058 struct audit_buffer *ab; 1061 struct audit_buffer *ab;
1059 ab = audit_log_start(NULL, GFP_KERNEL, 1062 ab = audit_log_start(NULL, GFP_KERNEL,
1060 AUDIT_CONFIG_CHANGE); 1063 AUDIT_CONFIG_CHANGE);
1061 audit_log_format(ab, "op=remove rule path="); 1064 audit_log_format(ab, "auid=%u ses=%u",
1065 audit_get_loginuid(current),
1066 audit_get_sessionid(current));
1067 audit_log_format(ab, " op=remove rule path=");
1062 audit_log_untrustedstring(ab, w->path); 1068 audit_log_untrustedstring(ab, w->path);
1063 if (r->filterkey) { 1069 if (r->filterkey) {
1064 audit_log_format(ab, " key="); 1070 audit_log_format(ab, " key=");
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 4699950e65bd..972f8e61d36a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -243,6 +243,9 @@ static inline int open_arg(int flags, int mask)
243 243
244static int audit_match_perm(struct audit_context *ctx, int mask) 244static int audit_match_perm(struct audit_context *ctx, int mask)
245{ 245{
246 if (unlikely(!ctx))
247 return 0;
248
246 unsigned n = ctx->major; 249 unsigned n = ctx->major;
247 switch (audit_classify_syscall(ctx->arch, n)) { 250 switch (audit_classify_syscall(ctx->arch, n)) {
248 case 0: /* native */ 251 case 0: /* native */
@@ -284,6 +287,10 @@ static int audit_match_filetype(struct audit_context *ctx, int which)
284{ 287{
285 unsigned index = which & ~S_IFMT; 288 unsigned index = which & ~S_IFMT;
286 mode_t mode = which & S_IFMT; 289 mode_t mode = which & S_IFMT;
290
291 if (unlikely(!ctx))
292 return 0;
293
287 if (index >= ctx->name_count) 294 if (index >= ctx->name_count)
288 return 0; 295 return 0;
289 if (ctx->names[index].ino == -1) 296 if (ctx->names[index].ino == -1)
@@ -610,7 +617,7 @@ static int audit_filter_rules(struct task_struct *tsk,
610 if (!result) 617 if (!result)
611 return 0; 618 return 0;
612 } 619 }
613 if (rule->filterkey) 620 if (rule->filterkey && ctx)
614 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); 621 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
615 switch (rule->action) { 622 switch (rule->action) {
616 case AUDIT_NEVER: *state = AUDIT_DISABLED; break; 623 case AUDIT_NEVER: *state = AUDIT_DISABLED; break;
@@ -2375,7 +2382,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
2375 struct audit_context *ctx = tsk->audit_context; 2382 struct audit_context *ctx = tsk->audit_context;
2376 2383
2377 if (audit_pid && t->tgid == audit_pid) { 2384 if (audit_pid && t->tgid == audit_pid) {
2378 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) { 2385 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
2379 audit_sig_pid = tsk->pid; 2386 audit_sig_pid = tsk->pid;
2380 if (tsk->loginuid != -1) 2387 if (tsk->loginuid != -1)
2381 audit_sig_uid = tsk->loginuid; 2388 audit_sig_uid = tsk->loginuid;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 657f8f8d93a5..13932abde159 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -355,6 +355,17 @@ static struct css_set *find_existing_css_set(
355 return NULL; 355 return NULL;
356} 356}
357 357
358static void free_cg_links(struct list_head *tmp)
359{
360 struct cg_cgroup_link *link;
361 struct cg_cgroup_link *saved_link;
362
363 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
364 list_del(&link->cgrp_link_list);
365 kfree(link);
366 }
367}
368
358/* 369/*
359 * allocate_cg_links() allocates "count" cg_cgroup_link structures 370 * allocate_cg_links() allocates "count" cg_cgroup_link structures
360 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on 371 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
@@ -363,17 +374,12 @@ static struct css_set *find_existing_css_set(
363static int allocate_cg_links(int count, struct list_head *tmp) 374static int allocate_cg_links(int count, struct list_head *tmp)
364{ 375{
365 struct cg_cgroup_link *link; 376 struct cg_cgroup_link *link;
366 struct cg_cgroup_link *saved_link;
367 int i; 377 int i;
368 INIT_LIST_HEAD(tmp); 378 INIT_LIST_HEAD(tmp);
369 for (i = 0; i < count; i++) { 379 for (i = 0; i < count; i++) {
370 link = kmalloc(sizeof(*link), GFP_KERNEL); 380 link = kmalloc(sizeof(*link), GFP_KERNEL);
371 if (!link) { 381 if (!link) {
372 list_for_each_entry_safe(link, saved_link, tmp, 382 free_cg_links(tmp);
373 cgrp_link_list) {
374 list_del(&link->cgrp_link_list);
375 kfree(link);
376 }
377 return -ENOMEM; 383 return -ENOMEM;
378 } 384 }
379 list_add(&link->cgrp_link_list, tmp); 385 list_add(&link->cgrp_link_list, tmp);
@@ -381,17 +387,6 @@ static int allocate_cg_links(int count, struct list_head *tmp)
381 return 0; 387 return 0;
382} 388}
383 389
384static void free_cg_links(struct list_head *tmp)
385{
386 struct cg_cgroup_link *link;
387 struct cg_cgroup_link *saved_link;
388
389 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
390 list_del(&link->cgrp_link_list);
391 kfree(link);
392 }
393}
394
395/* 390/*
396 * find_css_set() takes an existing cgroup group and a 391 * find_css_set() takes an existing cgroup group and a
397 * cgroup object, and returns a css_set object that's 392 * cgroup object, and returns a css_set object that's
@@ -956,7 +951,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
956 struct super_block *sb; 951 struct super_block *sb;
957 struct cgroupfs_root *root; 952 struct cgroupfs_root *root;
958 struct list_head tmp_cg_links; 953 struct list_head tmp_cg_links;
959 INIT_LIST_HEAD(&tmp_cg_links);
960 954
961 /* First find the desired set of subsystems */ 955 /* First find the desired set of subsystems */
962 ret = parse_cgroupfs_options(data, &opts); 956 ret = parse_cgroupfs_options(data, &opts);
@@ -1424,14 +1418,17 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1424 if (buffer == NULL) 1418 if (buffer == NULL)
1425 return -ENOMEM; 1419 return -ENOMEM;
1426 } 1420 }
1427 if (nbytes && copy_from_user(buffer, userbuf, nbytes)) 1421 if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
1428 return -EFAULT; 1422 retval = -EFAULT;
1423 goto out;
1424 }
1429 1425
1430 buffer[nbytes] = 0; /* nul-terminate */ 1426 buffer[nbytes] = 0; /* nul-terminate */
1431 strstrip(buffer); 1427 strstrip(buffer);
1432 retval = cft->write_string(cgrp, cft, buffer); 1428 retval = cft->write_string(cgrp, cft, buffer);
1433 if (!retval) 1429 if (!retval)
1434 retval = nbytes; 1430 retval = nbytes;
1431out:
1435 if (buffer != local_buffer) 1432 if (buffer != local_buffer)
1436 kfree(buffer); 1433 kfree(buffer);
1437 return retval; 1434 return retval;
@@ -2371,7 +2368,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2371 return cgroup_create(c_parent, dentry, mode | S_IFDIR); 2368 return cgroup_create(c_parent, dentry, mode | S_IFDIR);
2372} 2369}
2373 2370
2374static inline int cgroup_has_css_refs(struct cgroup *cgrp) 2371static int cgroup_has_css_refs(struct cgroup *cgrp)
2375{ 2372{
2376 /* Check the reference count on each subsystem. Since we 2373 /* Check the reference count on each subsystem. Since we
2377 * already established that there are no tasks in the 2374 * already established that there are no tasks in the
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 91cf85b36dd5..d5ab79cf516d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -54,7 +54,6 @@
54#include <asm/uaccess.h> 54#include <asm/uaccess.h>
55#include <asm/atomic.h> 55#include <asm/atomic.h>
56#include <linux/mutex.h> 56#include <linux/mutex.h>
57#include <linux/kfifo.h>
58#include <linux/workqueue.h> 57#include <linux/workqueue.h>
59#include <linux/cgroup.h> 58#include <linux/cgroup.h>
60 59
@@ -486,13 +485,38 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
486static void 485static void
487update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 486update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
488{ 487{
489 if (!dattr)
490 return;
491 if (dattr->relax_domain_level < c->relax_domain_level) 488 if (dattr->relax_domain_level < c->relax_domain_level)
492 dattr->relax_domain_level = c->relax_domain_level; 489 dattr->relax_domain_level = c->relax_domain_level;
493 return; 490 return;
494} 491}
495 492
493static void
494update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
495{
496 LIST_HEAD(q);
497
498 list_add(&c->stack_list, &q);
499 while (!list_empty(&q)) {
500 struct cpuset *cp;
501 struct cgroup *cont;
502 struct cpuset *child;
503
504 cp = list_first_entry(&q, struct cpuset, stack_list);
505 list_del(q.next);
506
507 if (cpus_empty(cp->cpus_allowed))
508 continue;
509
510 if (is_sched_load_balance(cp))
511 update_domain_attr(dattr, cp);
512
513 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
514 child = cgroup_cs(cont);
515 list_add_tail(&child->stack_list, &q);
516 }
517 }
518}
519
496/* 520/*
497 * rebuild_sched_domains() 521 * rebuild_sched_domains()
498 * 522 *
@@ -532,7 +556,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
532 * So the reverse nesting would risk an ABBA deadlock. 556 * So the reverse nesting would risk an ABBA deadlock.
533 * 557 *
534 * The three key local variables below are: 558 * The three key local variables below are:
535 * q - a kfifo queue of cpuset pointers, used to implement a 559 * q - a linked-list queue of cpuset pointers, used to implement a
536 * top-down scan of all cpusets. This scan loads a pointer 560 * top-down scan of all cpusets. This scan loads a pointer
537 * to each cpuset marked is_sched_load_balance into the 561 * to each cpuset marked is_sched_load_balance into the
538 * array 'csa'. For our purposes, rebuilding the schedulers 562 * array 'csa'. For our purposes, rebuilding the schedulers
@@ -567,7 +591,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
567 591
568void rebuild_sched_domains(void) 592void rebuild_sched_domains(void)
569{ 593{
570 struct kfifo *q; /* queue of cpusets to be scanned */ 594 LIST_HEAD(q); /* queue of cpusets to be scanned*/
571 struct cpuset *cp; /* scans q */ 595 struct cpuset *cp; /* scans q */
572 struct cpuset **csa; /* array of all cpuset ptrs */ 596 struct cpuset **csa; /* array of all cpuset ptrs */
573 int csn; /* how many cpuset ptrs in csa so far */ 597 int csn; /* how many cpuset ptrs in csa so far */
@@ -577,7 +601,6 @@ void rebuild_sched_domains(void)
577 int ndoms; /* number of sched domains in result */ 601 int ndoms; /* number of sched domains in result */
578 int nslot; /* next empty doms[] cpumask_t slot */ 602 int nslot; /* next empty doms[] cpumask_t slot */
579 603
580 q = NULL;
581 csa = NULL; 604 csa = NULL;
582 doms = NULL; 605 doms = NULL;
583 dattr = NULL; 606 dattr = NULL;
@@ -591,35 +614,42 @@ void rebuild_sched_domains(void)
591 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 614 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
592 if (dattr) { 615 if (dattr) {
593 *dattr = SD_ATTR_INIT; 616 *dattr = SD_ATTR_INIT;
594 update_domain_attr(dattr, &top_cpuset); 617 update_domain_attr_tree(dattr, &top_cpuset);
595 } 618 }
596 *doms = top_cpuset.cpus_allowed; 619 *doms = top_cpuset.cpus_allowed;
597 goto rebuild; 620 goto rebuild;
598 } 621 }
599 622
600 q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
601 if (IS_ERR(q))
602 goto done;
603 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); 623 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
604 if (!csa) 624 if (!csa)
605 goto done; 625 goto done;
606 csn = 0; 626 csn = 0;
607 627
608 cp = &top_cpuset; 628 list_add(&top_cpuset.stack_list, &q);
609 __kfifo_put(q, (void *)&cp, sizeof(cp)); 629 while (!list_empty(&q)) {
610 while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
611 struct cgroup *cont; 630 struct cgroup *cont;
612 struct cpuset *child; /* scans child cpusets of cp */ 631 struct cpuset *child; /* scans child cpusets of cp */
613 632
633 cp = list_first_entry(&q, struct cpuset, stack_list);
634 list_del(q.next);
635
614 if (cpus_empty(cp->cpus_allowed)) 636 if (cpus_empty(cp->cpus_allowed))
615 continue; 637 continue;
616 638
617 if (is_sched_load_balance(cp)) 639 /*
640 * All child cpusets contain a subset of the parent's cpus, so
641 * just skip them, and then we call update_domain_attr_tree()
642 * to calc relax_domain_level of the corresponding sched
643 * domain.
644 */
645 if (is_sched_load_balance(cp)) {
618 csa[csn++] = cp; 646 csa[csn++] = cp;
647 continue;
648 }
619 649
620 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 650 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
621 child = cgroup_cs(cont); 651 child = cgroup_cs(cont);
622 __kfifo_put(q, (void *)&child, sizeof(cp)); 652 list_add_tail(&child->stack_list, &q);
623 } 653 }
624 } 654 }
625 655
@@ -686,7 +716,7 @@ restart:
686 cpus_or(*dp, *dp, b->cpus_allowed); 716 cpus_or(*dp, *dp, b->cpus_allowed);
687 b->pn = -1; 717 b->pn = -1;
688 if (dattr) 718 if (dattr)
689 update_domain_attr(dattr 719 update_domain_attr_tree(dattr
690 + nslot, b); 720 + nslot, b);
691 } 721 }
692 } 722 }
@@ -702,8 +732,6 @@ rebuild:
702 put_online_cpus(); 732 put_online_cpus();
703 733
704done: 734done:
705 if (q && !IS_ERR(q))
706 kfifo_free(q);
707 kfree(csa); 735 kfree(csa);
708 /* Don't kfree(doms) -- partition_sched_domains() does that. */ 736 /* Don't kfree(doms) -- partition_sched_domains() does that. */
709 /* Don't kfree(dattr) -- partition_sched_domains() does that. */ 737 /* Don't kfree(dattr) -- partition_sched_domains() does that. */
@@ -1833,24 +1861,21 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1833 */ 1861 */
1834static void scan_for_empty_cpusets(const struct cpuset *root) 1862static void scan_for_empty_cpusets(const struct cpuset *root)
1835{ 1863{
1864 LIST_HEAD(queue);
1836 struct cpuset *cp; /* scans cpusets being updated */ 1865 struct cpuset *cp; /* scans cpusets being updated */
1837 struct cpuset *child; /* scans child cpusets of cp */ 1866 struct cpuset *child; /* scans child cpusets of cp */
1838 struct list_head queue;
1839 struct cgroup *cont; 1867 struct cgroup *cont;
1840 nodemask_t oldmems; 1868 nodemask_t oldmems;
1841 1869
1842 INIT_LIST_HEAD(&queue);
1843
1844 list_add_tail((struct list_head *)&root->stack_list, &queue); 1870 list_add_tail((struct list_head *)&root->stack_list, &queue);
1845 1871
1846 while (!list_empty(&queue)) { 1872 while (!list_empty(&queue)) {
1847 cp = container_of(queue.next, struct cpuset, stack_list); 1873 cp = list_first_entry(&queue, struct cpuset, stack_list);
1848 list_del(queue.next); 1874 list_del(queue.next);
1849 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 1875 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
1850 child = cgroup_cs(cont); 1876 child = cgroup_cs(cont);
1851 list_add_tail(&child->stack_list, &queue); 1877 list_add_tail(&child->stack_list, &queue);
1852 } 1878 }
1853 cont = cp->css.cgroup;
1854 1879
1855 /* Continue past cpusets with all cpus, mems online */ 1880 /* Continue past cpusets with all cpus, mems online */
1856 if (cpus_subset(cp->cpus_allowed, cpu_online_map) && 1881 if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index 7517115a8cce..91e96950cd52 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -77,15 +77,14 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
77{ 77{
78 struct dma_coherent_mem *mem = dev->dma_mem; 78 struct dma_coherent_mem *mem = dev->dma_mem;
79 int pos, err; 79 int pos, err;
80 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
81 80
82 pages >>= PAGE_SHIFT; 81 size += device_addr & ~PAGE_MASK;
83 82
84 if (!mem) 83 if (!mem)
85 return ERR_PTR(-EINVAL); 84 return ERR_PTR(-EINVAL);
86 85
87 pos = (device_addr - mem->device_base) >> PAGE_SHIFT; 86 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
88 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); 87 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
89 if (err != 0) 88 if (err != 0)
90 return ERR_PTR(err); 89 return ERR_PTR(err);
91 return mem->virt_base + (pos << PAGE_SHIFT); 90 return mem->virt_base + (pos << PAGE_SHIFT);
diff --git a/kernel/exit.c b/kernel/exit.c
index eb4d6470d1d0..38ec40630149 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -911,10 +911,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
911 tsk->exit_signal = SIGCHLD; 911 tsk->exit_signal = SIGCHLD;
912 912
913 signal = tracehook_notify_death(tsk, &cookie, group_dead); 913 signal = tracehook_notify_death(tsk, &cookie, group_dead);
914 if (signal > 0) 914 if (signal >= 0)
915 signal = do_notify_parent(tsk, signal); 915 signal = do_notify_parent(tsk, signal);
916 916
917 tsk->exit_state = signal < 0 ? EXIT_DEAD : EXIT_ZOMBIE; 917 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
918 918
919 /* mt-exec, de_thread() is waiting for us */ 919 /* mt-exec, de_thread() is waiting for us */
920 if (thread_group_leader(tsk) && 920 if (thread_group_leader(tsk) &&
@@ -927,7 +927,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
927 tracehook_report_death(tsk, signal, cookie, group_dead); 927 tracehook_report_death(tsk, signal, cookie, group_dead);
928 928
929 /* If the process is dead, release it - nobody will wait for it */ 929 /* If the process is dead, release it - nobody will wait for it */
930 if (signal < 0) 930 if (signal == DEATH_REAP)
931 release_task(tsk); 931 release_task(tsk);
932} 932}
933 933
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 152abfd3589f..0314074fa232 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -323,7 +323,8 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq,
323 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); 323 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
324 324
325 if (ret) 325 if (ret)
326 pr_err("setting flow type for irq %u failed (%pF)\n", 326 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
327 (int)(flags & IRQF_TRIGGER_MASK),
327 irq, chip->set_type); 328 irq, chip->set_type);
328 329
329 return ret; 330 return ret;
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 3ec23c3ec97f..eaa21fc9ad1d 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -56,12 +56,14 @@
56 56
57static int kgdb_break_asap; 57static int kgdb_break_asap;
58 58
59#define KGDB_MAX_THREAD_QUERY 17
59struct kgdb_state { 60struct kgdb_state {
60 int ex_vector; 61 int ex_vector;
61 int signo; 62 int signo;
62 int err_code; 63 int err_code;
63 int cpu; 64 int cpu;
64 int pass_exception; 65 int pass_exception;
66 unsigned long thr_query;
65 unsigned long threadid; 67 unsigned long threadid;
66 long kgdb_usethreadid; 68 long kgdb_usethreadid;
67 struct pt_regs *linux_regs; 69 struct pt_regs *linux_regs;
@@ -166,13 +168,6 @@ early_param("nokgdbroundup", opt_nokgdbroundup);
166 * Weak aliases for breakpoint management, 168 * Weak aliases for breakpoint management,
167 * can be overriden by architectures when needed: 169 * can be overriden by architectures when needed:
168 */ 170 */
169int __weak kgdb_validate_break_address(unsigned long addr)
170{
171 char tmp_variable[BREAK_INSTR_SIZE];
172
173 return probe_kernel_read(tmp_variable, (char *)addr, BREAK_INSTR_SIZE);
174}
175
176int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) 171int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
177{ 172{
178 int err; 173 int err;
@@ -191,6 +186,25 @@ int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
191 (char *)bundle, BREAK_INSTR_SIZE); 186 (char *)bundle, BREAK_INSTR_SIZE);
192} 187}
193 188
189int __weak kgdb_validate_break_address(unsigned long addr)
190{
191 char tmp_variable[BREAK_INSTR_SIZE];
192 int err;
193 /* Validate setting the breakpoint and then removing it. In the
194 * remove fails, the kernel needs to emit a bad message because we
195 * are deep trouble not being able to put things back the way we
196 * found them.
197 */
198 err = kgdb_arch_set_breakpoint(addr, tmp_variable);
199 if (err)
200 return err;
201 err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
202 if (err)
203 printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
204 "memory destroyed at: %lx", addr);
205 return err;
206}
207
194unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) 208unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
195{ 209{
196 return instruction_pointer(regs); 210 return instruction_pointer(regs);
@@ -433,9 +447,14 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
433{ 447{
434 int hex_val; 448 int hex_val;
435 int num = 0; 449 int num = 0;
450 int negate = 0;
436 451
437 *long_val = 0; 452 *long_val = 0;
438 453
454 if (**ptr == '-') {
455 negate = 1;
456 (*ptr)++;
457 }
439 while (**ptr) { 458 while (**ptr) {
440 hex_val = hex(**ptr); 459 hex_val = hex(**ptr);
441 if (hex_val < 0) 460 if (hex_val < 0)
@@ -446,6 +465,9 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
446 (*ptr)++; 465 (*ptr)++;
447 } 466 }
448 467
468 if (negate)
469 *long_val = -*long_val;
470
449 return num; 471 return num;
450} 472}
451 473
@@ -515,10 +537,16 @@ static void int_to_threadref(unsigned char *id, int value)
515static struct task_struct *getthread(struct pt_regs *regs, int tid) 537static struct task_struct *getthread(struct pt_regs *regs, int tid)
516{ 538{
517 /* 539 /*
518 * Non-positive TIDs are remapped idle tasks: 540 * Non-positive TIDs are remapped to the cpu shadow information
519 */ 541 */
520 if (tid <= 0) 542 if (tid == 0 || tid == -1)
521 return idle_task(-tid); 543 tid = -atomic_read(&kgdb_active) - 2;
544 if (tid < 0) {
545 if (kgdb_info[-tid - 2].task)
546 return kgdb_info[-tid - 2].task;
547 else
548 return idle_task(-tid - 2);
549 }
522 550
523 /* 551 /*
524 * find_task_by_pid_ns() does not take the tasklist lock anymore 552 * find_task_by_pid_ns() does not take the tasklist lock anymore
@@ -725,14 +753,15 @@ setundefined:
725} 753}
726 754
727/* 755/*
728 * Remap normal tasks to their real PID, idle tasks to -1 ... -NR_CPUs: 756 * Remap normal tasks to their real PID,
757 * CPU shadow threads are mapped to -CPU - 2
729 */ 758 */
730static inline int shadow_pid(int realpid) 759static inline int shadow_pid(int realpid)
731{ 760{
732 if (realpid) 761 if (realpid)
733 return realpid; 762 return realpid;
734 763
735 return -1-raw_smp_processor_id(); 764 return -raw_smp_processor_id() - 2;
736} 765}
737 766
738static char gdbmsgbuf[BUFMAX + 1]; 767static char gdbmsgbuf[BUFMAX + 1];
@@ -826,7 +855,7 @@ static void gdb_cmd_getregs(struct kgdb_state *ks)
826 local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; 855 local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
827 } else { 856 } else {
828 local_debuggerinfo = NULL; 857 local_debuggerinfo = NULL;
829 for (i = 0; i < NR_CPUS; i++) { 858 for_each_online_cpu(i) {
830 /* 859 /*
831 * Try to find the task on some other 860 * Try to find the task on some other
832 * or possibly this node if we do not 861 * or possibly this node if we do not
@@ -960,10 +989,13 @@ static int gdb_cmd_reboot(struct kgdb_state *ks)
960/* Handle the 'q' query packets */ 989/* Handle the 'q' query packets */
961static void gdb_cmd_query(struct kgdb_state *ks) 990static void gdb_cmd_query(struct kgdb_state *ks)
962{ 991{
963 struct task_struct *thread; 992 struct task_struct *g;
993 struct task_struct *p;
964 unsigned char thref[8]; 994 unsigned char thref[8];
965 char *ptr; 995 char *ptr;
966 int i; 996 int i;
997 int cpu;
998 int finished = 0;
967 999
968 switch (remcom_in_buffer[1]) { 1000 switch (remcom_in_buffer[1]) {
969 case 's': 1001 case 's':
@@ -973,22 +1005,34 @@ static void gdb_cmd_query(struct kgdb_state *ks)
973 break; 1005 break;
974 } 1006 }
975 1007
976 if (remcom_in_buffer[1] == 'f') 1008 i = 0;
977 ks->threadid = 1;
978
979 remcom_out_buffer[0] = 'm'; 1009 remcom_out_buffer[0] = 'm';
980 ptr = remcom_out_buffer + 1; 1010 ptr = remcom_out_buffer + 1;
981 1011 if (remcom_in_buffer[1] == 'f') {
982 for (i = 0; i < 17; ks->threadid++) { 1012 /* Each cpu is a shadow thread */
983 thread = getthread(ks->linux_regs, ks->threadid); 1013 for_each_online_cpu(cpu) {
984 if (thread) { 1014 ks->thr_query = 0;
985 int_to_threadref(thref, ks->threadid); 1015 int_to_threadref(thref, -cpu - 2);
986 pack_threadid(ptr, thref); 1016 pack_threadid(ptr, thref);
987 ptr += BUF_THREAD_ID_SIZE; 1017 ptr += BUF_THREAD_ID_SIZE;
988 *(ptr++) = ','; 1018 *(ptr++) = ',';
989 i++; 1019 i++;
990 } 1020 }
991 } 1021 }
1022
1023 do_each_thread(g, p) {
1024 if (i >= ks->thr_query && !finished) {
1025 int_to_threadref(thref, p->pid);
1026 pack_threadid(ptr, thref);
1027 ptr += BUF_THREAD_ID_SIZE;
1028 *(ptr++) = ',';
1029 ks->thr_query++;
1030 if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
1031 finished = 1;
1032 }
1033 i++;
1034 } while_each_thread(g, p);
1035
992 *(--ptr) = '\0'; 1036 *(--ptr) = '\0';
993 break; 1037 break;
994 1038
@@ -1011,15 +1055,15 @@ static void gdb_cmd_query(struct kgdb_state *ks)
1011 error_packet(remcom_out_buffer, -EINVAL); 1055 error_packet(remcom_out_buffer, -EINVAL);
1012 break; 1056 break;
1013 } 1057 }
1014 if (ks->threadid > 0) { 1058 if ((int)ks->threadid > 0) {
1015 kgdb_mem2hex(getthread(ks->linux_regs, 1059 kgdb_mem2hex(getthread(ks->linux_regs,
1016 ks->threadid)->comm, 1060 ks->threadid)->comm,
1017 remcom_out_buffer, 16); 1061 remcom_out_buffer, 16);
1018 } else { 1062 } else {
1019 static char tmpstr[23 + BUF_THREAD_ID_SIZE]; 1063 static char tmpstr[23 + BUF_THREAD_ID_SIZE];
1020 1064
1021 sprintf(tmpstr, "Shadow task %d for pid 0", 1065 sprintf(tmpstr, "shadowCPU%d",
1022 (int)(-ks->threadid-1)); 1066 (int)(-ks->threadid - 2));
1023 kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); 1067 kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
1024 } 1068 }
1025 break; 1069 break;
diff --git a/kernel/marker.c b/kernel/marker.c
index 971da5317903..7d1faecd7a51 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -126,6 +126,11 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
126 struct marker_probe_closure *multi; 126 struct marker_probe_closure *multi;
127 int i; 127 int i;
128 /* 128 /*
129 * Read mdata->ptype before mdata->multi.
130 */
131 smp_rmb();
132 multi = mdata->multi;
133 /*
129 * multi points to an array, therefore accessing the array 134 * multi points to an array, therefore accessing the array
130 * depends on reading multi. However, even in this case, 135 * depends on reading multi. However, even in this case,
131 * we must insure that the pointer is read _before_ the array 136 * we must insure that the pointer is read _before_ the array
@@ -133,7 +138,6 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
133 * in the fast path, so put the explicit barrier here. 138 * in the fast path, so put the explicit barrier here.
134 */ 139 */
135 smp_read_barrier_depends(); 140 smp_read_barrier_depends();
136 multi = mdata->multi;
137 for (i = 0; multi[i].func; i++) { 141 for (i = 0; multi[i].func; i++) {
138 va_start(args, call_private); 142 va_start(args, call_private);
139 multi[i].func(multi[i].probe_private, call_private, 143 multi[i].func(multi[i].probe_private, call_private,
@@ -175,6 +179,11 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
175 struct marker_probe_closure *multi; 179 struct marker_probe_closure *multi;
176 int i; 180 int i;
177 /* 181 /*
182 * Read mdata->ptype before mdata->multi.
183 */
184 smp_rmb();
185 multi = mdata->multi;
186 /*
178 * multi points to an array, therefore accessing the array 187 * multi points to an array, therefore accessing the array
179 * depends on reading multi. However, even in this case, 188 * depends on reading multi. However, even in this case,
180 * we must insure that the pointer is read _before_ the array 189 * we must insure that the pointer is read _before_ the array
@@ -182,7 +191,6 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
182 * in the fast path, so put the explicit barrier here. 191 * in the fast path, so put the explicit barrier here.
183 */ 192 */
184 smp_read_barrier_depends(); 193 smp_read_barrier_depends();
185 multi = mdata->multi;
186 for (i = 0; multi[i].func; i++) 194 for (i = 0; multi[i].func; i++)
187 multi[i].func(multi[i].probe_private, call_private, 195 multi[i].func(multi[i].probe_private, call_private,
188 mdata->format, &args); 196 mdata->format, &args);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index bcdc9ac8ef60..12c779dc65d4 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -34,6 +34,7 @@
34/*** 34/***
35 * mutex_init - initialize the mutex 35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized 36 * @lock: the mutex to be initialized
37 * @key: the lock_class_key for the class; used by mutex lock debugging
37 * 38 *
38 * Initialize the mutex to unlocked state. 39 * Initialize the mutex to unlocked state.
39 * 40 *
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 8cb757026386..da9c2dda6a4e 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -24,7 +24,7 @@
24 * requirement that the application has is cleaned up when closes the file 24 * requirement that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up. 25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 * 26 *
27 * mark gross mgross@linux.intel.com 27 * Mark Gross <mgross@linux.intel.com>
28 */ 28 */
29 29
30#include <linux/pm_qos_params.h> 30#include <linux/pm_qos_params.h>
@@ -211,8 +211,8 @@ EXPORT_SYMBOL_GPL(pm_qos_requirement);
211 * @value: defines the qos request 211 * @value: defines the qos request
212 * 212 *
213 * This function inserts a new entry in the pm_qos_class list of requested qos 213 * This function inserts a new entry in the pm_qos_class list of requested qos
214 * performance charactoistics. It recomputes the agregate QoS expectations for 214 * performance characteristics. It recomputes the aggregate QoS expectations
215 * the pm_qos_class of parrameters. 215 * for the pm_qos_class of parameters.
216 */ 216 */
217int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value) 217int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
218{ 218{
@@ -250,10 +250,10 @@ EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
250 * @name: identifies the request 250 * @name: identifies the request
251 * @value: defines the qos request 251 * @value: defines the qos request
252 * 252 *
253 * Updates an existing qos requierement for the pm_qos_class of parameters along 253 * Updates an existing qos requirement for the pm_qos_class of parameters along
254 * with updating the target pm_qos_class value. 254 * with updating the target pm_qos_class value.
255 * 255 *
256 * If the named request isn't in the lest then no change is made. 256 * If the named request isn't in the list then no change is made.
257 */ 257 */
258int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value) 258int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
259{ 259{
@@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
287 * @pm_qos_class: identifies which list of qos request to us 287 * @pm_qos_class: identifies which list of qos request to us
288 * @name: identifies the request 288 * @name: identifies the request
289 * 289 *
290 * Will remove named qos request from pm_qos_class list of parrameters and 290 * Will remove named qos request from pm_qos_class list of parameters and
291 * recompute the current target value for the pm_qos_class. 291 * recompute the current target value for the pm_qos_class.
292 */ 292 */
293void pm_qos_remove_requirement(int pm_qos_class, char *name) 293void pm_qos_remove_requirement(int pm_qos_class, char *name)
@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
319 * @notifier: notifier block managed by caller. 319 * @notifier: notifier block managed by caller.
320 * 320 *
321 * will register the notifier into a notification chain that gets called 321 * will register the notifier into a notification chain that gets called
322 * uppon changes to the pm_qos_class target value. 322 * upon changes to the pm_qos_class target value.
323 */ 323 */
324 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) 324 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
325{ 325{
@@ -338,7 +338,7 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
338 * @notifier: notifier block to be removed. 338 * @notifier: notifier block to be removed.
339 * 339 *
340 * will remove the notifier from the notification chain that gets called 340 * will remove the notifier from the notification chain that gets called
341 * uppon changes to the pm_qos_class target value. 341 * upon changes to the pm_qos_class target value.
342 */ 342 */
343int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) 343int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
344{ 344{
diff --git a/kernel/printk.c b/kernel/printk.c
index a7f7559c5f6c..b51b1567bb55 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1309,14 +1309,14 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1309 1309
1310#if defined CONFIG_PRINTK 1310#if defined CONFIG_PRINTK
1311 1311
1312DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1313/* 1312/*
1314 * printk rate limiting, lifted from the networking subsystem. 1313 * printk rate limiting, lifted from the networking subsystem.
1315 * 1314 *
1316 * This enforces a rate limit: not more than one kernel message 1315 * This enforces a rate limit: not more than 10 kernel messages
1317 * every printk_ratelimit_jiffies to make a denial-of-service 1316 * every 5s to make a denial-of-service attack impossible.
1318 * attack impossible.
1319 */ 1317 */
1318DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1319
1320int printk_ratelimit(void) 1320int printk_ratelimit(void)
1321{ 1321{
1322 return __ratelimit(&printk_ratelimit_state); 1322 return __ratelimit(&printk_ratelimit_state);
diff --git a/kernel/relay.c b/kernel/relay.c
index 04006ef970b8..8d13a7855c08 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -944,6 +944,10 @@ static void relay_file_read_consume(struct rchan_buf *buf,
944 size_t n_subbufs = buf->chan->n_subbufs; 944 size_t n_subbufs = buf->chan->n_subbufs;
945 size_t read_subbuf; 945 size_t read_subbuf;
946 946
947 if (buf->subbufs_produced == buf->subbufs_consumed &&
948 buf->offset == buf->bytes_consumed)
949 return;
950
947 if (buf->bytes_consumed + bytes_consumed > subbuf_size) { 951 if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
948 relay_subbufs_consumed(buf->chan, buf->cpu, 1); 952 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
949 buf->bytes_consumed = 0; 953 buf->bytes_consumed = 0;
@@ -975,6 +979,8 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
975 979
976 relay_file_read_consume(buf, read_pos, 0); 980 relay_file_read_consume(buf, read_pos, 0);
977 981
982 consumed = buf->subbufs_consumed;
983
978 if (unlikely(buf->offset > subbuf_size)) { 984 if (unlikely(buf->offset > subbuf_size)) {
979 if (produced == consumed) 985 if (produced == consumed)
980 return 0; 986 return 0;
@@ -993,8 +999,12 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
993 if (consumed > produced) 999 if (consumed > produced)
994 produced += n_subbufs * subbuf_size; 1000 produced += n_subbufs * subbuf_size;
995 1001
996 if (consumed == produced) 1002 if (consumed == produced) {
1003 if (buf->offset == subbuf_size &&
1004 buf->subbufs_produced > buf->subbufs_consumed)
1005 return 1;
997 return 0; 1006 return 0;
1007 }
998 1008
999 return 1; 1009 return 1;
1000} 1010}
diff --git a/kernel/resource.c b/kernel/resource.c
index 74af2d7cb5a1..f5b518eabefe 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -490,7 +490,7 @@ resource_size_t resource_alignment(struct resource *res)
490{ 490{
491 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 491 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
492 case IORESOURCE_SIZEALIGN: 492 case IORESOURCE_SIZEALIGN:
493 return res->end - res->start + 1; 493 return resource_size(res);
494 case IORESOURCE_STARTALIGN: 494 case IORESOURCE_STARTALIGN:
495 return res->start; 495 return res->start;
496 default: 496 default:
diff --git a/kernel/sched.c b/kernel/sched.c
index 0d1717b00225..f0141947c7d5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5004,19 +5004,21 @@ recheck:
5004 return -EPERM; 5004 return -EPERM;
5005 } 5005 }
5006 5006
5007 if (user) {
5007#ifdef CONFIG_RT_GROUP_SCHED 5008#ifdef CONFIG_RT_GROUP_SCHED
5008 /* 5009 /*
5009 * Do not allow realtime tasks into groups that have no runtime 5010 * Do not allow realtime tasks into groups that have no runtime
5010 * assigned. 5011 * assigned.
5011 */ 5012 */
5012 if (user 5013 if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
5013 && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) 5014 return -EPERM;
5014 return -EPERM;
5015#endif 5015#endif
5016 5016
5017 retval = security_task_setscheduler(p, policy, param); 5017 retval = security_task_setscheduler(p, policy, param);
5018 if (retval) 5018 if (retval)
5019 return retval; 5019 return retval;
5020 }
5021
5020 /* 5022 /*
5021 * make sure no PI-waiters arrive (or leave) while we are 5023 * make sure no PI-waiters arrive (or leave) while we are
5022 * changing the priority of the task: 5024 * changing the priority of the task:
@@ -7671,34 +7673,34 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7671} 7673}
7672 7674
7673#ifdef CONFIG_SCHED_MC 7675#ifdef CONFIG_SCHED_MC
7674static ssize_t sched_mc_power_savings_show(struct sys_device *dev, 7676static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
7675 struct sysdev_attribute *attr, char *page) 7677 char *page)
7676{ 7678{
7677 return sprintf(page, "%u\n", sched_mc_power_savings); 7679 return sprintf(page, "%u\n", sched_mc_power_savings);
7678} 7680}
7679static ssize_t sched_mc_power_savings_store(struct sys_device *dev, 7681static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
7680 struct sysdev_attribute *attr,
7681 const char *buf, size_t count) 7682 const char *buf, size_t count)
7682{ 7683{
7683 return sched_power_savings_store(buf, count, 0); 7684 return sched_power_savings_store(buf, count, 0);
7684} 7685}
7685static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, 7686static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7686 sched_mc_power_savings_store); 7687 sched_mc_power_savings_show,
7688 sched_mc_power_savings_store);
7687#endif 7689#endif
7688 7690
7689#ifdef CONFIG_SCHED_SMT 7691#ifdef CONFIG_SCHED_SMT
7690static ssize_t sched_smt_power_savings_show(struct sys_device *dev, 7692static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
7691 struct sysdev_attribute *attr, char *page) 7693 char *page)
7692{ 7694{
7693 return sprintf(page, "%u\n", sched_smt_power_savings); 7695 return sprintf(page, "%u\n", sched_smt_power_savings);
7694} 7696}
7695static ssize_t sched_smt_power_savings_store(struct sys_device *dev, 7697static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
7696 struct sysdev_attribute *attr,
7697 const char *buf, size_t count) 7698 const char *buf, size_t count)
7698{ 7699{
7699 return sched_power_savings_store(buf, count, 1); 7700 return sched_power_savings_store(buf, count, 1);
7700} 7701}
7701static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, 7702static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7703 sched_smt_power_savings_show,
7702 sched_smt_power_savings_store); 7704 sched_smt_power_savings_store);
7703#endif 7705#endif
7704 7706
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index aaaeae8244e7..94a62c0d4ade 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -212,9 +212,7 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
212 waiter.up = 0; 212 waiter.up = 0;
213 213
214 for (;;) { 214 for (;;) {
215 if (state == TASK_INTERRUPTIBLE && signal_pending(task)) 215 if (signal_pending_state(state, task))
216 goto interrupted;
217 if (state == TASK_KILLABLE && fatal_signal_pending(task))
218 goto interrupted; 216 goto interrupted;
219 if (timeout <= 0) 217 if (timeout <= 0)
220 goto timed_out; 218 goto timed_out;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ec7e4f62aaff..4a26a1382df0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -830,10 +830,21 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
830 start_workqueue_thread(cwq, -1); 830 start_workqueue_thread(cwq, -1);
831 } else { 831 } else {
832 cpu_maps_update_begin(); 832 cpu_maps_update_begin();
833 /*
834 * We must place this wq on list even if the code below fails.
835 * cpu_down(cpu) can remove cpu from cpu_populated_map before
836 * destroy_workqueue() takes the lock, in that case we leak
837 * cwq[cpu]->thread.
838 */
833 spin_lock(&workqueue_lock); 839 spin_lock(&workqueue_lock);
834 list_add(&wq->list, &workqueues); 840 list_add(&wq->list, &workqueues);
835 spin_unlock(&workqueue_lock); 841 spin_unlock(&workqueue_lock);
836 842 /*
843 * We must initialize cwqs for each possible cpu even if we
844 * are going to call destroy_workqueue() finally. Otherwise
845 * cpu_up() can hit the uninitialized cwq once we drop the
846 * lock.
847 */
837 for_each_possible_cpu(cpu) { 848 for_each_possible_cpu(cpu) {
838 cwq = init_cpu_workqueue(wq, cpu); 849 cwq = init_cpu_workqueue(wq, cpu);
839 if (err || !cpu_online(cpu)) 850 if (err || !cpu_online(cpu))