aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/base.c109
-rw-r--r--fs/proc/internal.h4
-rw-r--r--fs/proc/stat.c14
-rw-r--r--fs/proc/task_mmu.c53
4 files changed, 173 insertions, 7 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 144a96732dd..3c231adf845 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -873,6 +873,113 @@ static const struct file_operations proc_environ_operations = {
873 .release = mem_release, 873 .release = mem_release,
874}; 874};
875 875
876static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
877 loff_t *ppos)
878{
879 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
880 char buffer[PROC_NUMBUF];
881 int oom_adj = OOM_ADJUST_MIN;
882 size_t len;
883 unsigned long flags;
884
885 if (!task)
886 return -ESRCH;
887 if (lock_task_sighand(task, &flags)) {
888 if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
889 oom_adj = OOM_ADJUST_MAX;
890 else
891 oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
892 OOM_SCORE_ADJ_MAX;
893 unlock_task_sighand(task, &flags);
894 }
895 put_task_struct(task);
896 len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
897 return simple_read_from_buffer(buf, count, ppos, buffer, len);
898}
899
900static ssize_t oom_adj_write(struct file *file, const char __user *buf,
901 size_t count, loff_t *ppos)
902{
903 struct task_struct *task;
904 char buffer[PROC_NUMBUF];
905 int oom_adj;
906 unsigned long flags;
907 int err;
908
909 memset(buffer, 0, sizeof(buffer));
910 if (count > sizeof(buffer) - 1)
911 count = sizeof(buffer) - 1;
912 if (copy_from_user(buffer, buf, count)) {
913 err = -EFAULT;
914 goto out;
915 }
916
917 err = kstrtoint(strstrip(buffer), 0, &oom_adj);
918 if (err)
919 goto out;
920 if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) &&
921 oom_adj != OOM_DISABLE) {
922 err = -EINVAL;
923 goto out;
924 }
925
926 task = get_proc_task(file->f_path.dentry->d_inode);
927 if (!task) {
928 err = -ESRCH;
929 goto out;
930 }
931
932 task_lock(task);
933 if (!task->mm) {
934 err = -EINVAL;
935 goto err_task_lock;
936 }
937
938 if (!lock_task_sighand(task, &flags)) {
939 err = -ESRCH;
940 goto err_task_lock;
941 }
942
943 /*
944 * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
945 * value is always attainable.
946 */
947 if (oom_adj == OOM_ADJUST_MAX)
948 oom_adj = OOM_SCORE_ADJ_MAX;
949 else
950 oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
951
952 if (oom_adj < task->signal->oom_score_adj &&
953 !capable(CAP_SYS_RESOURCE)) {
954 err = -EACCES;
955 goto err_sighand;
956 }
957
958 /*
959 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
960 * /proc/pid/oom_score_adj instead.
961 */
962 printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
963 current->comm, task_pid_nr(current), task_pid_nr(task),
964 task_pid_nr(task));
965
966 task->signal->oom_score_adj = oom_adj;
967 trace_oom_score_adj_update(task);
968err_sighand:
969 unlock_task_sighand(task, &flags);
970err_task_lock:
971 task_unlock(task);
972 put_task_struct(task);
973out:
974 return err < 0 ? err : count;
975}
976
977static const struct file_operations proc_oom_adj_operations = {
978 .read = oom_adj_read,
979 .write = oom_adj_write,
980 .llseek = generic_file_llseek,
981};
982
876static ssize_t oom_score_adj_read(struct file *file, char __user *buf, 983static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
877 size_t count, loff_t *ppos) 984 size_t count, loff_t *ppos)
878{ 985{
@@ -2598,6 +2705,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2598 REG("cgroup", S_IRUGO, proc_cgroup_operations), 2705 REG("cgroup", S_IRUGO, proc_cgroup_operations),
2599#endif 2706#endif
2600 INF("oom_score", S_IRUGO, proc_oom_score), 2707 INF("oom_score", S_IRUGO, proc_oom_score),
2708 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
2601 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 2709 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
2602#ifdef CONFIG_AUDITSYSCALL 2710#ifdef CONFIG_AUDITSYSCALL
2603 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 2711 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
@@ -2964,6 +3072,7 @@ static const struct pid_entry tid_base_stuff[] = {
2964 REG("cgroup", S_IRUGO, proc_cgroup_operations), 3072 REG("cgroup", S_IRUGO, proc_cgroup_operations),
2965#endif 3073#endif
2966 INF("oom_score", S_IRUGO, proc_oom_score), 3074 INF("oom_score", S_IRUGO, proc_oom_score),
3075 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
2967 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 3076 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
2968#ifdef CONFIG_AUDITSYSCALL 3077#ifdef CONFIG_AUDITSYSCALL
2969 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 3078 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index cceaab07ad5..43973b084ab 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
14struct ctl_table_header; 14struct ctl_table_header;
15struct mempolicy;
15 16
16extern struct proc_dir_entry proc_root; 17extern struct proc_dir_entry proc_root;
17#ifdef CONFIG_PROC_SYSCTL 18#ifdef CONFIG_PROC_SYSCTL
@@ -74,6 +75,9 @@ struct proc_maps_private {
74#ifdef CONFIG_MMU 75#ifdef CONFIG_MMU
75 struct vm_area_struct *tail_vma; 76 struct vm_area_struct *tail_vma;
76#endif 77#endif
78#ifdef CONFIG_NUMA
79 struct mempolicy *task_mempolicy;
80#endif
77}; 81};
78 82
79void proc_init_inodecache(void); 83void proc_init_inodecache(void);
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 64c3b317236..e296572c73e 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -45,10 +45,13 @@ static cputime64_t get_iowait_time(int cpu)
45 45
46static u64 get_idle_time(int cpu) 46static u64 get_idle_time(int cpu)
47{ 47{
48 u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); 48 u64 idle, idle_time = -1ULL;
49
50 if (cpu_online(cpu))
51 idle_time = get_cpu_idle_time_us(cpu, NULL);
49 52
50 if (idle_time == -1ULL) 53 if (idle_time == -1ULL)
51 /* !NO_HZ so we can rely on cpustat.idle */ 54 /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
52 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; 55 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
53 else 56 else
54 idle = usecs_to_cputime64(idle_time); 57 idle = usecs_to_cputime64(idle_time);
@@ -58,10 +61,13 @@ static u64 get_idle_time(int cpu)
58 61
59static u64 get_iowait_time(int cpu) 62static u64 get_iowait_time(int cpu)
60{ 63{
61 u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL); 64 u64 iowait, iowait_time = -1ULL;
65
66 if (cpu_online(cpu))
67 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
62 68
63 if (iowait_time == -1ULL) 69 if (iowait_time == -1ULL)
64 /* !NO_HZ so we can rely on cpustat.iowait */ 70 /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
65 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; 71 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
66 else 72 else
67 iowait = usecs_to_cputime64(iowait_time); 73 iowait = usecs_to_cputime64(iowait_time);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 79827ce03e3..90c63f9392a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -90,10 +90,55 @@ static void pad_len_spaces(struct seq_file *m, int len)
90 seq_printf(m, "%*c", len, ' '); 90 seq_printf(m, "%*c", len, ' ');
91} 91}
92 92
93#ifdef CONFIG_NUMA
94/*
95 * These functions are for numa_maps but called in generic **maps seq_file
96 * ->start(), ->stop() ops.
97 *
98 * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
99 * Each mempolicy object is controlled by reference counting. The problem here
100 * is how to avoid accessing dead mempolicy object.
101 *
102 * Because we're holding mmap_sem while reading seq_file, it's safe to access
103 * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
104 *
105 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
106 * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
107 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
108 * gurantee the task never exits under us. But taking task_lock() around
109 * get_vma_plicy() causes lock order problem.
110 *
111 * To access task->mempolicy without lock, we hold a reference count of an
112 * object pointed by task->mempolicy and remember it. This will guarantee
113 * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
114 */
115static void hold_task_mempolicy(struct proc_maps_private *priv)
116{
117 struct task_struct *task = priv->task;
118
119 task_lock(task);
120 priv->task_mempolicy = task->mempolicy;
121 mpol_get(priv->task_mempolicy);
122 task_unlock(task);
123}
124static void release_task_mempolicy(struct proc_maps_private *priv)
125{
126 mpol_put(priv->task_mempolicy);
127}
128#else
129static void hold_task_mempolicy(struct proc_maps_private *priv)
130{
131}
132static void release_task_mempolicy(struct proc_maps_private *priv)
133{
134}
135#endif
136
93static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) 137static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
94{ 138{
95 if (vma && vma != priv->tail_vma) { 139 if (vma && vma != priv->tail_vma) {
96 struct mm_struct *mm = vma->vm_mm; 140 struct mm_struct *mm = vma->vm_mm;
141 release_task_mempolicy(priv);
97 up_read(&mm->mmap_sem); 142 up_read(&mm->mmap_sem);
98 mmput(mm); 143 mmput(mm);
99 } 144 }
@@ -132,7 +177,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
132 177
133 tail_vma = get_gate_vma(priv->task->mm); 178 tail_vma = get_gate_vma(priv->task->mm);
134 priv->tail_vma = tail_vma; 179 priv->tail_vma = tail_vma;
135 180 hold_task_mempolicy(priv);
136 /* Start with last addr hint */ 181 /* Start with last addr hint */
137 vma = find_vma(mm, last_addr); 182 vma = find_vma(mm, last_addr);
138 if (last_addr && vma) { 183 if (last_addr && vma) {
@@ -159,6 +204,7 @@ out:
159 if (vma) 204 if (vma)
160 return vma; 205 return vma;
161 206
207 release_task_mempolicy(priv);
162 /* End of vmas has been reached */ 208 /* End of vmas has been reached */
163 m->version = (tail_vma != NULL)? 0: -1UL; 209 m->version = (tail_vma != NULL)? 0: -1UL;
164 up_read(&mm->mmap_sem); 210 up_read(&mm->mmap_sem);
@@ -1158,6 +1204,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1158 struct vm_area_struct *vma = v; 1204 struct vm_area_struct *vma = v;
1159 struct numa_maps *md = &numa_priv->md; 1205 struct numa_maps *md = &numa_priv->md;
1160 struct file *file = vma->vm_file; 1206 struct file *file = vma->vm_file;
1207 struct task_struct *task = proc_priv->task;
1161 struct mm_struct *mm = vma->vm_mm; 1208 struct mm_struct *mm = vma->vm_mm;
1162 struct mm_walk walk = {}; 1209 struct mm_walk walk = {};
1163 struct mempolicy *pol; 1210 struct mempolicy *pol;
@@ -1177,7 +1224,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1177 walk.private = md; 1224 walk.private = md;
1178 walk.mm = mm; 1225 walk.mm = mm;
1179 1226
1180 pol = get_vma_policy(proc_priv->task, vma, vma->vm_start); 1227 pol = get_vma_policy(task, vma, vma->vm_start);
1181 mpol_to_str(buffer, sizeof(buffer), pol, 0); 1228 mpol_to_str(buffer, sizeof(buffer), pol, 0);
1182 mpol_cond_put(pol); 1229 mpol_cond_put(pol);
1183 1230
@@ -1189,7 +1236,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1189 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1236 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1190 seq_printf(m, " heap"); 1237 seq_printf(m, " heap");
1191 } else { 1238 } else {
1192 pid_t tid = vm_is_stack(proc_priv->task, vma, is_pid); 1239 pid_t tid = vm_is_stack(task, vma, is_pid);
1193 if (tid != 0) { 1240 if (tid != 0) {
1194 /* 1241 /*
1195 * Thread stack in /proc/PID/task/TID/maps or 1242 * Thread stack in /proc/PID/task/TID/maps or