aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/exec.c6
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/sched_stats.h3
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/shmem.c5
8 files changed, 23 insertions, 11 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 4ccaaa4b13b2..282240afe99e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1780,6 +1780,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1780 but keep the previous behaviour for now. */ 1780 but keep the previous behaviour for now. */
1781 if (!ispipe && !S_ISREG(inode->i_mode)) 1781 if (!ispipe && !S_ISREG(inode->i_mode))
1782 goto close_fail; 1782 goto close_fail;
1783 /*
1784 * Dont allow local users get cute and trick others to coredump
1785 * into their pre-created files:
1786 */
1787 if (inode->i_uid != current->fsuid)
1788 goto close_fail;
1783 if (!file->f_op) 1789 if (!file->f_op)
1784 goto close_fail; 1790 goto close_fail;
1785 if (!file->f_op->write) 1791 if (!file->f_op->write)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ee800e7a70de..ac3d496fbd20 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -282,6 +282,10 @@ static inline void touch_all_softlockup_watchdogs(void)
282 282
283/* Attach to any functions which should be ignored in wchan output. */ 283/* Attach to any functions which should be ignored in wchan output. */
284#define __sched __attribute__((__section__(".sched.text"))) 284#define __sched __attribute__((__section__(".sched.text")))
285
286/* Linker adds these: start and end of __sched functions */
287extern char __sched_text_start[], __sched_text_end[];
288
285/* Is this address in the __sched functions? */ 289/* Is this address in the __sched functions? */
286extern int in_sched_functions(unsigned long addr); 290extern int in_sched_functions(unsigned long addr);
287 291
diff --git a/kernel/sched.c b/kernel/sched.c
index 38933cafea8a..98dcdf272db3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5466,7 +5466,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
5466 return table; 5466 return table;
5467} 5467}
5468 5468
5469static ctl_table * sd_alloc_ctl_cpu_table(int cpu) 5469static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5470{ 5470{
5471 struct ctl_table *entry, *table; 5471 struct ctl_table *entry, *table;
5472 struct sched_domain *sd; 5472 struct sched_domain *sd;
@@ -6708,9 +6708,6 @@ void __init sched_init_smp(void)
6708 6708
6709int in_sched_functions(unsigned long addr) 6709int in_sched_functions(unsigned long addr)
6710{ 6710{
6711 /* Linker adds these: start and end of __sched functions */
6712 extern char __sched_text_start[], __sched_text_end[];
6713
6714 return in_lock_functions(addr) || 6711 return in_lock_functions(addr) ||
6715 (addr >= (unsigned long)__sched_text_start 6712 (addr >= (unsigned long)__sched_text_start
6716 && addr < (unsigned long)__sched_text_end); 6713 && addr < (unsigned long)__sched_text_end);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 5d0d623a5465..d30467b47ddd 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -327,10 +327,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
327 avg_atom = -1LL; 327 avg_atom = -1LL;
328 328
329 avg_per_cpu = p->se.sum_exec_runtime; 329 avg_per_cpu = p->se.sum_exec_runtime;
330 if (p->se.nr_migrations) 330 if (p->se.nr_migrations) {
331 avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations); 331 avg_per_cpu = div64_64(avg_per_cpu,
332 else 332 p->se.nr_migrations);
333 } else {
333 avg_per_cpu = -1LL; 334 avg_per_cpu = -1LL;
335 }
334 336
335 __PN(avg_atom); 337 __PN(avg_atom);
336 __PN(avg_per_cpu); 338 __PN(avg_per_cpu);
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 630178e53bb6..5b32433e7ee5 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -52,7 +52,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
52 sd->lb_nobusyq[itype], 52 sd->lb_nobusyq[itype],
53 sd->lb_nobusyg[itype]); 53 sd->lb_nobusyg[itype]);
54 } 54 }
55 seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n", 55 seq_printf(seq,
56 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
56 sd->alb_count, sd->alb_failed, sd->alb_pushed, 57 sd->alb_count, sd->alb_failed, sd->alb_pushed,
57 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, 58 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
58 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, 59 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 27a2338deb4a..cb89fa8db110 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -133,6 +133,8 @@ void tick_nohz_update_jiffies(void)
133 if (!ts->tick_stopped) 133 if (!ts->tick_stopped)
134 return; 134 return;
135 135
136 touch_softlockup_watchdog();
137
136 cpu_clear(cpu, nohz_cpu_mask); 138 cpu_clear(cpu, nohz_cpu_mask);
137 now = ktime_get(); 139 now = ktime_get();
138 140
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 12376ae3f733..4ffed1cd158b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -305,7 +305,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
305{ 305{
306 int i; 306 int i;
307 307
308 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
309 /* 308 /*
310 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 309 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
311 * and __GFP_HIGHMEM from hard or soft interrupt context. 310 * and __GFP_HIGHMEM from hard or soft interrupt context.
diff --git a/mm/shmem.c b/mm/shmem.c
index 253d205914ba..51b3d6ccddab 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1072,7 +1072,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1072 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1072 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1073 pvma.vm_pgoff = idx; 1073 pvma.vm_pgoff = idx;
1074 pvma.vm_end = PAGE_SIZE; 1074 pvma.vm_end = PAGE_SIZE;
1075 page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); 1075 page = alloc_page_vma(gfp, &pvma, 0);
1076 mpol_free(pvma.vm_policy); 1076 mpol_free(pvma.vm_policy);
1077 return page; 1077 return page;
1078} 1078}
@@ -1093,7 +1093,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1093static inline struct page * 1093static inline struct page *
1094shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 1094shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
1095{ 1095{
1096 return alloc_page(gfp | __GFP_ZERO); 1096 return alloc_page(gfp);
1097} 1097}
1098#endif 1098#endif
1099 1099
@@ -1306,6 +1306,7 @@ repeat:
1306 1306
1307 info->alloced++; 1307 info->alloced++;
1308 spin_unlock(&info->lock); 1308 spin_unlock(&info->lock);
1309 clear_highpage(filepage);
1309 flush_dcache_page(filepage); 1310 flush_dcache_page(filepage);
1310 SetPageUptodate(filepage); 1311 SetPageUptodate(filepage);
1311 } 1312 }