diff options
author | Tejun Heo <tj@kernel.org> | 2013-08-08 20:11:22 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-08-08 20:11:22 -0400 |
commit | c9710d8018273b0740e0794858f1961fcea5e61a (patch) | |
tree | 889735f6c439e4cb49a7f0d47514522406f3d924 /kernel/cpuset.c | |
parent | 8af01f56a03e9cbd91a55d688fce1315021efba8 (diff) |
cpuset: drop "const" qualifiers from struct cpuset instances
cpuset uses "const" qualifiers on struct cpuset in some functions;
however, it doesn't work well when a value derived from returned const
pointer has to be passed to an accessor. It's C after all.
Drop the "const" qualifiers except for the trivially leaf ones. This
patch doesn't make any functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index be4512ba2c0c..f7371341d42a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -128,7 +128,7 @@ static inline struct cpuset *task_cs(struct task_struct *task) | |||
128 | struct cpuset, css); | 128 | struct cpuset, css); |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline struct cpuset *parent_cs(const struct cpuset *cs) | 131 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
132 | { | 132 | { |
133 | struct cgroup *pcgrp = cs->css.cgroup->parent; | 133 | struct cgroup *pcgrp = cs->css.cgroup->parent; |
134 | 134 | ||
@@ -319,8 +319,7 @@ static struct file_system_type cpuset_fs_type = { | |||
319 | * | 319 | * |
320 | * Call with callback_mutex held. | 320 | * Call with callback_mutex held. |
321 | */ | 321 | */ |
322 | static void guarantee_online_cpus(const struct cpuset *cs, | 322 | static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) |
323 | struct cpumask *pmask) | ||
324 | { | 323 | { |
325 | while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) | 324 | while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) |
326 | cs = parent_cs(cs); | 325 | cs = parent_cs(cs); |
@@ -338,7 +337,7 @@ static void guarantee_online_cpus(const struct cpuset *cs, | |||
338 | * | 337 | * |
339 | * Call with callback_mutex held. | 338 | * Call with callback_mutex held. |
340 | */ | 339 | */ |
341 | static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) | 340 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
342 | { | 341 | { |
343 | while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY])) | 342 | while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY])) |
344 | cs = parent_cs(cs); | 343 | cs = parent_cs(cs); |
@@ -383,7 +382,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) | |||
383 | * alloc_trial_cpuset - allocate a trial cpuset | 382 | * alloc_trial_cpuset - allocate a trial cpuset |
384 | * @cs: the cpuset that the trial cpuset duplicates | 383 | * @cs: the cpuset that the trial cpuset duplicates |
385 | */ | 384 | */ |
386 | static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs) | 385 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
387 | { | 386 | { |
388 | struct cpuset *trial; | 387 | struct cpuset *trial; |
389 | 388 | ||
@@ -430,7 +429,7 @@ static void free_trial_cpuset(struct cpuset *trial) | |||
430 | * Return 0 if valid, -errno if not. | 429 | * Return 0 if valid, -errno if not. |
431 | */ | 430 | */ |
432 | 431 | ||
433 | static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | 432 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
434 | { | 433 | { |
435 | struct cgroup *cgrp; | 434 | struct cgroup *cgrp; |
436 | struct cpuset *c, *par; | 435 | struct cpuset *c, *par; |
@@ -2343,7 +2342,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) | |||
2343 | 2342 | ||
2344 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) | 2343 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
2345 | { | 2344 | { |
2346 | const struct cpuset *cpus_cs; | 2345 | struct cpuset *cpus_cs; |
2347 | 2346 | ||
2348 | rcu_read_lock(); | 2347 | rcu_read_lock(); |
2349 | cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); | 2348 | cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); |
@@ -2416,7 +2415,7 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |||
2416 | * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall | 2415 | * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall |
2417 | * (an unusual configuration), then returns the root cpuset. | 2416 | * (an unusual configuration), then returns the root cpuset. |
2418 | */ | 2417 | */ |
2419 | static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) | 2418 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
2420 | { | 2419 | { |
2421 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) | 2420 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
2422 | cs = parent_cs(cs); | 2421 | cs = parent_cs(cs); |
@@ -2486,7 +2485,7 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) | |||
2486 | */ | 2485 | */ |
2487 | int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 2486 | int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
2488 | { | 2487 | { |
2489 | const struct cpuset *cs; /* current cpuset ancestors */ | 2488 | struct cpuset *cs; /* current cpuset ancestors */ |
2490 | int allowed; /* is allocation in zone z allowed? */ | 2489 | int allowed; /* is allocation in zone z allowed? */ |
2491 | 2490 | ||
2492 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | 2491 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |