aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c11
-rw-r--r--kernel/cpuset.c13
-rw-r--r--kernel/debug/debug_core.c16
-rw-r--r--kernel/debug/kdb/kdb_main.c48
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sched_fair.c25
-rw-r--r--kernel/sched_stats.h20
7 files changed, 50 insertions, 91 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 5cf366965d0c..66a416b42c18 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1460,9 +1460,9 @@ static int cgroup_get_rootdir(struct super_block *sb)
1460 return 0; 1460 return 0;
1461} 1461}
1462 1462
1463static int cgroup_get_sb(struct file_system_type *fs_type, 1463static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1464 int flags, const char *unused_dev_name, 1464 int flags, const char *unused_dev_name,
1465 void *data, struct vfsmount *mnt) 1465 void *data)
1466{ 1466{
1467 struct cgroup_sb_opts opts; 1467 struct cgroup_sb_opts opts;
1468 struct cgroupfs_root *root; 1468 struct cgroupfs_root *root;
@@ -1596,10 +1596,9 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1596 drop_parsed_module_refcounts(opts.subsys_bits); 1596 drop_parsed_module_refcounts(opts.subsys_bits);
1597 } 1597 }
1598 1598
1599 simple_set_mnt(mnt, sb);
1600 kfree(opts.release_agent); 1599 kfree(opts.release_agent);
1601 kfree(opts.name); 1600 kfree(opts.name);
1602 return 0; 1601 return dget(sb->s_root);
1603 1602
1604 drop_new_super: 1603 drop_new_super:
1605 deactivate_locked_super(sb); 1604 deactivate_locked_super(sb);
@@ -1608,7 +1607,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1608 out_err: 1607 out_err:
1609 kfree(opts.release_agent); 1608 kfree(opts.release_agent);
1610 kfree(opts.name); 1609 kfree(opts.name);
1611 return ret; 1610 return ERR_PTR(ret);
1612} 1611}
1613 1612
1614static void cgroup_kill_sb(struct super_block *sb) { 1613static void cgroup_kill_sb(struct super_block *sb) {
@@ -1658,7 +1657,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
1658 1657
1659static struct file_system_type cgroup_fs_type = { 1658static struct file_system_type cgroup_fs_type = {
1660 .name = "cgroup", 1659 .name = "cgroup",
1661 .get_sb = cgroup_get_sb, 1660 .mount = cgroup_mount,
1662 .kill_sb = cgroup_kill_sb, 1661 .kill_sb = cgroup_kill_sb,
1663}; 1662};
1664 1663
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 51b143e2a07a..4349935c2ad8 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -231,18 +231,17 @@ static DEFINE_SPINLOCK(cpuset_buffer_lock);
231 * users. If someone tries to mount the "cpuset" filesystem, we 231 * users. If someone tries to mount the "cpuset" filesystem, we
232 * silently switch it to mount "cgroup" instead 232 * silently switch it to mount "cgroup" instead
233 */ 233 */
234static int cpuset_get_sb(struct file_system_type *fs_type, 234static struct dentry *cpuset_mount(struct file_system_type *fs_type,
235 int flags, const char *unused_dev_name, 235 int flags, const char *unused_dev_name, void *data)
236 void *data, struct vfsmount *mnt)
237{ 236{
238 struct file_system_type *cgroup_fs = get_fs_type("cgroup"); 237 struct file_system_type *cgroup_fs = get_fs_type("cgroup");
239 int ret = -ENODEV; 238 struct dentry *ret = ERR_PTR(-ENODEV);
240 if (cgroup_fs) { 239 if (cgroup_fs) {
241 char mountopts[] = 240 char mountopts[] =
242 "cpuset,noprefix," 241 "cpuset,noprefix,"
243 "release_agent=/sbin/cpuset_release_agent"; 242 "release_agent=/sbin/cpuset_release_agent";
244 ret = cgroup_fs->get_sb(cgroup_fs, flags, 243 ret = cgroup_fs->mount(cgroup_fs, flags,
245 unused_dev_name, mountopts, mnt); 244 unused_dev_name, mountopts);
246 put_filesystem(cgroup_fs); 245 put_filesystem(cgroup_fs);
247 } 246 }
248 return ret; 247 return ret;
@@ -250,7 +249,7 @@ static int cpuset_get_sb(struct file_system_type *fs_type,
250 249
251static struct file_system_type cpuset_fs_type = { 250static struct file_system_type cpuset_fs_type = {
252 .name = "cpuset", 251 .name = "cpuset",
253 .get_sb = cpuset_get_sb, 252 .mount = cpuset_mount,
254}; 253};
255 254
256/* 255/*
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index fec596da9bd0..cefd4a11f6d9 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -209,18 +209,6 @@ int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
209 return 0; 209 return 0;
210} 210}
211 211
212/**
213 * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
214 * @regs: Current &struct pt_regs.
215 *
216 * This function will be called if the particular architecture must
217 * disable hardware debugging while it is processing gdb packets or
218 * handling exception.
219 */
220void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
221{
222}
223
224/* 212/*
225 * Some architectures need cache flushes when we set/clear a 213 * Some architectures need cache flushes when we set/clear a
226 * breakpoint: 214 * breakpoint:
@@ -484,7 +472,9 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
484 atomic_inc(&masters_in_kgdb); 472 atomic_inc(&masters_in_kgdb);
485 else 473 else
486 atomic_inc(&slaves_in_kgdb); 474 atomic_inc(&slaves_in_kgdb);
487 kgdb_disable_hw_debug(ks->linux_regs); 475
476 if (arch_kgdb_ops.disable_hw_break)
477 arch_kgdb_ops.disable_hw_break(regs);
488 478
489acquirelock: 479acquirelock:
490 /* 480 /*
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index d7bda21a106b..37755d621924 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1127,7 +1127,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
1127 /* special case below */ 1127 /* special case below */
1128 } else { 1128 } else {
1129 kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", 1129 kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
1130 kdb_current, kdb_current->pid); 1130 kdb_current, kdb_current ? kdb_current->pid : 0);
1131#if defined(CONFIG_SMP) 1131#if defined(CONFIG_SMP)
1132 kdb_printf("on processor %d ", raw_smp_processor_id()); 1132 kdb_printf("on processor %d ", raw_smp_processor_id());
1133#endif 1133#endif
@@ -2603,20 +2603,17 @@ static int kdb_summary(int argc, const char **argv)
2603 */ 2603 */
2604static int kdb_per_cpu(int argc, const char **argv) 2604static int kdb_per_cpu(int argc, const char **argv)
2605{ 2605{
2606 char buf[256], fmtstr[64]; 2606 char fmtstr[64];
2607 kdb_symtab_t symtab; 2607 int cpu, diag, nextarg = 1;
2608 cpumask_t suppress = CPU_MASK_NONE; 2608 unsigned long addr, symaddr, val, bytesperword = 0, whichcpu = ~0UL;
2609 int cpu, diag;
2610 unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL;
2611 2609
2612 if (argc < 1 || argc > 3) 2610 if (argc < 1 || argc > 3)
2613 return KDB_ARGCOUNT; 2611 return KDB_ARGCOUNT;
2614 2612
2615 snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]); 2613 diag = kdbgetaddrarg(argc, argv, &nextarg, &symaddr, NULL, NULL);
2616 if (!kdbgetsymval(buf, &symtab)) { 2614 if (diag)
2617 kdb_printf("%s is not a per_cpu variable\n", argv[1]); 2615 return diag;
2618 return KDB_BADADDR; 2616
2619 }
2620 if (argc >= 2) { 2617 if (argc >= 2) {
2621 diag = kdbgetularg(argv[2], &bytesperword); 2618 diag = kdbgetularg(argv[2], &bytesperword);
2622 if (diag) 2619 if (diag)
@@ -2649,46 +2646,25 @@ static int kdb_per_cpu(int argc, const char **argv)
2649#define KDB_PCU(cpu) 0 2646#define KDB_PCU(cpu) 0
2650#endif 2647#endif
2651#endif 2648#endif
2652
2653 for_each_online_cpu(cpu) { 2649 for_each_online_cpu(cpu) {
2650 if (KDB_FLAG(CMD_INTERRUPT))
2651 return 0;
2652
2654 if (whichcpu != ~0UL && whichcpu != cpu) 2653 if (whichcpu != ~0UL && whichcpu != cpu)
2655 continue; 2654 continue;
2656 addr = symtab.sym_start + KDB_PCU(cpu); 2655 addr = symaddr + KDB_PCU(cpu);
2657 diag = kdb_getword(&val, addr, bytesperword); 2656 diag = kdb_getword(&val, addr, bytesperword);
2658 if (diag) { 2657 if (diag) {
2659 kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to " 2658 kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to "
2660 "read, diag=%d\n", cpu, addr, diag); 2659 "read, diag=%d\n", cpu, addr, diag);
2661 continue; 2660 continue;
2662 } 2661 }
2663#ifdef CONFIG_SMP
2664 if (!val) {
2665 cpu_set(cpu, suppress);
2666 continue;
2667 }
2668#endif /* CONFIG_SMP */
2669 kdb_printf("%5d ", cpu); 2662 kdb_printf("%5d ", cpu);
2670 kdb_md_line(fmtstr, addr, 2663 kdb_md_line(fmtstr, addr,
2671 bytesperword == KDB_WORD_SIZE, 2664 bytesperword == KDB_WORD_SIZE,
2672 1, bytesperword, 1, 1, 0); 2665 1, bytesperword, 1, 1, 0);
2673 } 2666 }
2674 if (cpus_weight(suppress) == 0)
2675 return 0;
2676 kdb_printf("Zero suppressed cpu(s):");
2677 for (cpu = first_cpu(suppress); cpu < num_possible_cpus();
2678 cpu = next_cpu(cpu, suppress)) {
2679 kdb_printf(" %d", cpu);
2680 if (cpu == num_possible_cpus() - 1 ||
2681 next_cpu(cpu, suppress) != cpu + 1)
2682 continue;
2683 while (cpu < num_possible_cpus() &&
2684 next_cpu(cpu, suppress) == cpu + 1)
2685 ++cpu;
2686 kdb_printf("-%d", cpu);
2687 }
2688 kdb_printf("\n");
2689
2690#undef KDB_PCU 2667#undef KDB_PCU
2691
2692 return 0; 2668 return 0;
2693} 2669}
2694 2670
diff --git a/kernel/sched.c b/kernel/sched.c
index d42992bccdfa..aa14a56f9d03 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8510,12 +8510,12 @@ void sched_move_task(struct task_struct *tsk)
8510 if (unlikely(running)) 8510 if (unlikely(running))
8511 tsk->sched_class->put_prev_task(rq, tsk); 8511 tsk->sched_class->put_prev_task(rq, tsk);
8512 8512
8513 set_task_rq(tsk, task_cpu(tsk));
8514
8515#ifdef CONFIG_FAIR_GROUP_SCHED 8513#ifdef CONFIG_FAIR_GROUP_SCHED
8516 if (tsk->sched_class->moved_group) 8514 if (tsk->sched_class->task_move_group)
8517 tsk->sched_class->moved_group(tsk, on_rq); 8515 tsk->sched_class->task_move_group(tsk, on_rq);
8516 else
8518#endif 8517#endif
8518 set_task_rq(tsk, task_cpu(tsk));
8519 8519
8520 if (unlikely(running)) 8520 if (unlikely(running))
8521 tsk->sched_class->set_curr_task(rq); 8521 tsk->sched_class->set_curr_task(rq);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 933f3d1b62ea..f4f6a8326dd0 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3869,13 +3869,26 @@ static void set_curr_task_fair(struct rq *rq)
3869} 3869}
3870 3870
3871#ifdef CONFIG_FAIR_GROUP_SCHED 3871#ifdef CONFIG_FAIR_GROUP_SCHED
3872static void moved_group_fair(struct task_struct *p, int on_rq) 3872static void task_move_group_fair(struct task_struct *p, int on_rq)
3873{ 3873{
3874 struct cfs_rq *cfs_rq = task_cfs_rq(p); 3874 /*
3875 3875 * If the task was not on the rq at the time of this cgroup movement
3876 update_curr(cfs_rq); 3876 * it must have been asleep, sleeping tasks keep their ->vruntime
3877 * absolute on their old rq until wakeup (needed for the fair sleeper
3878 * bonus in place_entity()).
3879 *
3880 * If it was on the rq, we've just 'preempted' it, which does convert
3881 * ->vruntime to a relative base.
3882 *
3883 * Make sure both cases convert their relative position when migrating
3884 * to another cgroup's rq. This does somewhat interfere with the
3885 * fair sleeper stuff for the first placement, but who cares.
3886 */
3887 if (!on_rq)
3888 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
3889 set_task_rq(p, task_cpu(p));
3877 if (!on_rq) 3890 if (!on_rq)
3878 place_entity(cfs_rq, &p->se, 1); 3891 p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
3879} 3892}
3880#endif 3893#endif
3881 3894
@@ -3927,7 +3940,7 @@ static const struct sched_class fair_sched_class = {
3927 .get_rr_interval = get_rr_interval_fair, 3940 .get_rr_interval = get_rr_interval_fair,
3928 3941
3929#ifdef CONFIG_FAIR_GROUP_SCHED 3942#ifdef CONFIG_FAIR_GROUP_SCHED
3930 .moved_group = moved_group_fair, 3943 .task_move_group = task_move_group_fair,
3931#endif 3944#endif
3932}; 3945};
3933 3946
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 25c2f962f6fc..48ddf431db0e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -157,15 +157,7 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
157} 157}
158 158
159/* 159/*
160 * Called when a process is dequeued from the active array and given 160 * We are interested in knowing how long it was from the *first* time a
161 * the cpu. We should note that with the exception of interactive
162 * tasks, the expired queue will become the active queue after the active
163 * queue is empty, without explicitly dequeuing and requeuing tasks in the
164 * expired queue. (Interactive tasks may be requeued directly to the
165 * active queue, thus delaying tasks in the expired queue from running;
166 * see scheduler_tick()).
167 *
168 * Though we are interested in knowing how long it was from the *first* time a
169 * task was queued to the time that it finally hit a cpu, we call this routine 161 * task was queued to the time that it finally hit a cpu, we call this routine
170 * from dequeue_task() to account for possible rq->clock skew across cpus. The 162 * from dequeue_task() to account for possible rq->clock skew across cpus. The
171 * delta taken on each cpu would annul the skew. 163 * delta taken on each cpu would annul the skew.
@@ -203,16 +195,6 @@ static void sched_info_arrive(struct task_struct *t)
203} 195}
204 196
205/* 197/*
206 * Called when a process is queued into either the active or expired
207 * array. The time is noted and later used to determine how long we
208 * had to wait for us to reach the cpu. Since the expired queue will
209 * become the active queue after active queue is empty, without dequeuing
210 * and requeuing any tasks, we are interested in queuing to either. It
211 * is unusual but not impossible for tasks to be dequeued and immediately
212 * requeued in the same or another array: this can happen in sched_yield(),
213 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
214 * to runqueue.
215 *
216 * This function is only called from enqueue_task(), but also only updates 198 * This function is only called from enqueue_task(), but also only updates
217 * the timestamp if it is already not set. It's assumed that 199 * the timestamp if it is already not set. It's assumed that
218 * sched_info_dequeued() will clear that stamp when appropriate. 200 * sched_info_dequeued() will clear that stamp when appropriate.