diff options
author | Shaohua Li <shaohua.li@intel.com> | 2011-08-25 18:59:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-08-25 19:25:34 -0400 |
commit | f51bdd2e97098a5cbb3cba7c3a56fa0e9ac3c444 (patch) | |
tree | 90419a74be50f313f7fefe99aa7776f8ba832233 /mm | |
parent | 7e8aa048989bf7e0604996a3e2068fb1a81f81bd (diff) |
mm: fix a vmscan warning
I get the below warning:
BUG: using smp_processor_id() in preemptible [00000000] code: bash/746
caller is native_sched_clock+0x37/0x6e
Pid: 746, comm: bash Tainted: G W 3.0.0+ #254
Call Trace:
[<ffffffff813435c6>] debug_smp_processor_id+0xc2/0xdc
[<ffffffff8104158d>] native_sched_clock+0x37/0x6e
[<ffffffff81116219>] try_to_free_mem_cgroup_pages+0x7d/0x270
[<ffffffff8114f1f8>] mem_cgroup_force_empty+0x24b/0x27a
[<ffffffff8114ff21>] ? sys_close+0x38/0x138
[<ffffffff8114ff21>] ? sys_close+0x38/0x138
[<ffffffff8114f257>] mem_cgroup_force_empty_write+0x17/0x19
[<ffffffff810c72fb>] cgroup_file_write+0xa8/0xba
[<ffffffff811522d2>] vfs_write+0xb3/0x138
[<ffffffff8115241a>] sys_write+0x4a/0x71
[<ffffffff8114ffd9>] ? sys_close+0xf0/0x138
[<ffffffff8176deab>] system_call_fastpath+0x16/0x1b
sched_clock() can't be used with preempt enabled. And we don't need
fast approach to get clock here, so let's use ktime API.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ef69124fa3e..22631e0994b3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2283 | .mem_cgroup = mem, | 2283 | .mem_cgroup = mem, |
2284 | .memcg_record = rec, | 2284 | .memcg_record = rec, |
2285 | }; | 2285 | }; |
2286 | unsigned long start, end; | 2286 | ktime_t start, end; |
2287 | 2287 | ||
2288 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 2288 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
2289 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | 2289 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
@@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2292 | sc.may_writepage, | 2292 | sc.may_writepage, |
2293 | sc.gfp_mask); | 2293 | sc.gfp_mask); |
2294 | 2294 | ||
2295 | start = sched_clock(); | 2295 | start = ktime_get(); |
2296 | /* | 2296 | /* |
2297 | * NOTE: Although we can get the priority field, using it | 2297 | * NOTE: Although we can get the priority field, using it |
2298 | * here is not a good idea, since it limits the pages we can scan. | 2298 | * here is not a good idea, since it limits the pages we can scan. |
@@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2301 | * the priority and make it zero. | 2301 | * the priority and make it zero. |
2302 | */ | 2302 | */ |
2303 | shrink_zone(0, zone, &sc); | 2303 | shrink_zone(0, zone, &sc); |
2304 | end = sched_clock(); | 2304 | end = ktime_get(); |
2305 | 2305 | ||
2306 | if (rec) | 2306 | if (rec) |
2307 | rec->elapsed += end - start; | 2307 | rec->elapsed += ktime_to_ns(ktime_sub(end, start)); |
2308 | *scanned = sc.nr_scanned; | 2308 | *scanned = sc.nr_scanned; |
2309 | 2309 | ||
2310 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); | 2310 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); |
@@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2319 | { | 2319 | { |
2320 | struct zonelist *zonelist; | 2320 | struct zonelist *zonelist; |
2321 | unsigned long nr_reclaimed; | 2321 | unsigned long nr_reclaimed; |
2322 | unsigned long start, end; | 2322 | ktime_t start, end; |
2323 | int nid; | 2323 | int nid; |
2324 | struct scan_control sc = { | 2324 | struct scan_control sc = { |
2325 | .may_writepage = !laptop_mode, | 2325 | .may_writepage = !laptop_mode, |
@@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2337 | .gfp_mask = sc.gfp_mask, | 2337 | .gfp_mask = sc.gfp_mask, |
2338 | }; | 2338 | }; |
2339 | 2339 | ||
2340 | start = sched_clock(); | 2340 | start = ktime_get(); |
2341 | /* | 2341 | /* |
2342 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't | 2342 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't |
2343 | * take care of from where we get pages. So the node where we start the | 2343 | * take care of from where we get pages. So the node where we start the |
@@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2352 | sc.gfp_mask); | 2352 | sc.gfp_mask); |
2353 | 2353 | ||
2354 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); | 2354 | nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); |
2355 | end = sched_clock(); | 2355 | end = ktime_get(); |
2356 | if (rec) | 2356 | if (rec) |
2357 | rec->elapsed += end - start; | 2357 | rec->elapsed += ktime_to_ns(ktime_sub(end, start)); |
2358 | 2358 | ||
2359 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); | 2359 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); |
2360 | 2360 | ||