aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7ef69124fa3e..22631e0994b3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2283 .mem_cgroup = mem, 2283 .mem_cgroup = mem,
2284 .memcg_record = rec, 2284 .memcg_record = rec,
2285 }; 2285 };
2286 unsigned long start, end; 2286 ktime_t start, end;
2287 2287
2288 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2288 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2289 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2289 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2292 sc.may_writepage, 2292 sc.may_writepage,
2293 sc.gfp_mask); 2293 sc.gfp_mask);
2294 2294
2295 start = sched_clock(); 2295 start = ktime_get();
2296 /* 2296 /*
2297 * NOTE: Although we can get the priority field, using it 2297 * NOTE: Although we can get the priority field, using it
2298 * here is not a good idea, since it limits the pages we can scan. 2298 * here is not a good idea, since it limits the pages we can scan.
@@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2301 * the priority and make it zero. 2301 * the priority and make it zero.
2302 */ 2302 */
2303 shrink_zone(0, zone, &sc); 2303 shrink_zone(0, zone, &sc);
2304 end = sched_clock(); 2304 end = ktime_get();
2305 2305
2306 if (rec) 2306 if (rec)
2307 rec->elapsed += end - start; 2307 rec->elapsed += ktime_to_ns(ktime_sub(end, start));
2308 *scanned = sc.nr_scanned; 2308 *scanned = sc.nr_scanned;
2309 2309
2310 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2310 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
@@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2319{ 2319{
2320 struct zonelist *zonelist; 2320 struct zonelist *zonelist;
2321 unsigned long nr_reclaimed; 2321 unsigned long nr_reclaimed;
2322 unsigned long start, end; 2322 ktime_t start, end;
2323 int nid; 2323 int nid;
2324 struct scan_control sc = { 2324 struct scan_control sc = {
2325 .may_writepage = !laptop_mode, 2325 .may_writepage = !laptop_mode,
@@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2337 .gfp_mask = sc.gfp_mask, 2337 .gfp_mask = sc.gfp_mask,
2338 }; 2338 };
2339 2339
2340 start = sched_clock(); 2340 start = ktime_get();
2341 /* 2341 /*
2342 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2342 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2343 * take care of from where we get pages. So the node where we start the 2343 * take care of from where we get pages. So the node where we start the
@@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2352 sc.gfp_mask); 2352 sc.gfp_mask);
2353 2353
2354 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2354 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2355 end = sched_clock(); 2355 end = ktime_get();
2356 if (rec) 2356 if (rec)
2357 rec->elapsed += end - start; 2357 rec->elapsed += ktime_to_ns(ktime_sub(end, start));
2358 2358
2359 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2359 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2360 2360