diff options
author | Olof Johansson <olof@lixom.net> | 2013-10-28 00:42:44 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2013-10-28 00:42:44 -0400 |
commit | 3316dee245ef297155fa45b8d14263dfd6a9164b (patch) | |
tree | 7adbf9875893ce0405f771d92d266d70df1aedbb /mm/vmscan.c | |
parent | f2c4e82e350dab489ae0d8fcd84b780de508ab64 (diff) | |
parent | 1fecf8958eb7f90791f2c7e99afac393b64fa976 (diff) |
Merge tag 's3c24xx-dma' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung into next/drivers
From Kukjin Kim, this branch adds device-tree support to the DMA controller
on the older Samsung SoCs. It also adds support for one of the missing SoCs
in the family (2410).
The driver has been Ack:ed by Vinod Koul, but is merged through here due
to dependencies with platform code.
* tag 's3c24xx-dma' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung:
ARM: S3C24XX: add dma pdata for s3c2410, s3c2440 and s3c2442
dmaengine: s3c24xx-dma: add support for the s3c2410 type of controller
ARM: S3C24XX: Fix possible dma selection warning
ARM: SAMSUNG: set s3c24xx_dma_filter for s3c64xx-spi0 device
ARM: S3C24XX: add platform-devices for new dma driver for s3c2412 and s3c2443
dmaengine: add driver for Samsung s3c24xx SoCs
ARM: S3C24XX: number the dma clocks
+ Linux 3.12-rc3
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 83 |
1 files changed, 31 insertions, 52 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8ed1b775bdc9..beb35778c69f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -139,23 +139,11 @@ static bool global_reclaim(struct scan_control *sc) | |||
139 | { | 139 | { |
140 | return !sc->target_mem_cgroup; | 140 | return !sc->target_mem_cgroup; |
141 | } | 141 | } |
142 | |||
143 | static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc) | ||
144 | { | ||
145 | struct mem_cgroup *root = sc->target_mem_cgroup; | ||
146 | return !mem_cgroup_disabled() && | ||
147 | mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE; | ||
148 | } | ||
149 | #else | 142 | #else |
150 | static bool global_reclaim(struct scan_control *sc) | 143 | static bool global_reclaim(struct scan_control *sc) |
151 | { | 144 | { |
152 | return true; | 145 | return true; |
153 | } | 146 | } |
154 | |||
155 | static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc) | ||
156 | { | ||
157 | return false; | ||
158 | } | ||
159 | #endif | 147 | #endif |
160 | 148 | ||
161 | unsigned long zone_reclaimable_pages(struct zone *zone) | 149 | unsigned long zone_reclaimable_pages(struct zone *zone) |
@@ -2176,11 +2164,9 @@ static inline bool should_continue_reclaim(struct zone *zone, | |||
2176 | } | 2164 | } |
2177 | } | 2165 | } |
2178 | 2166 | ||
2179 | static int | 2167 | static void shrink_zone(struct zone *zone, struct scan_control *sc) |
2180 | __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) | ||
2181 | { | 2168 | { |
2182 | unsigned long nr_reclaimed, nr_scanned; | 2169 | unsigned long nr_reclaimed, nr_scanned; |
2183 | int groups_scanned = 0; | ||
2184 | 2170 | ||
2185 | do { | 2171 | do { |
2186 | struct mem_cgroup *root = sc->target_mem_cgroup; | 2172 | struct mem_cgroup *root = sc->target_mem_cgroup; |
@@ -2188,17 +2174,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) | |||
2188 | .zone = zone, | 2174 | .zone = zone, |
2189 | .priority = sc->priority, | 2175 | .priority = sc->priority, |
2190 | }; | 2176 | }; |
2191 | struct mem_cgroup *memcg = NULL; | 2177 | struct mem_cgroup *memcg; |
2192 | mem_cgroup_iter_filter filter = (soft_reclaim) ? | ||
2193 | mem_cgroup_soft_reclaim_eligible : NULL; | ||
2194 | 2178 | ||
2195 | nr_reclaimed = sc->nr_reclaimed; | 2179 | nr_reclaimed = sc->nr_reclaimed; |
2196 | nr_scanned = sc->nr_scanned; | 2180 | nr_scanned = sc->nr_scanned; |
2197 | 2181 | ||
2198 | while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) { | 2182 | memcg = mem_cgroup_iter(root, NULL, &reclaim); |
2183 | do { | ||
2199 | struct lruvec *lruvec; | 2184 | struct lruvec *lruvec; |
2200 | 2185 | ||
2201 | groups_scanned++; | ||
2202 | lruvec = mem_cgroup_zone_lruvec(zone, memcg); | 2186 | lruvec = mem_cgroup_zone_lruvec(zone, memcg); |
2203 | 2187 | ||
2204 | shrink_lruvec(lruvec, sc); | 2188 | shrink_lruvec(lruvec, sc); |
@@ -2218,7 +2202,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) | |||
2218 | mem_cgroup_iter_break(root, memcg); | 2202 | mem_cgroup_iter_break(root, memcg); |
2219 | break; | 2203 | break; |
2220 | } | 2204 | } |
2221 | } | 2205 | memcg = mem_cgroup_iter(root, memcg, &reclaim); |
2206 | } while (memcg); | ||
2222 | 2207 | ||
2223 | vmpressure(sc->gfp_mask, sc->target_mem_cgroup, | 2208 | vmpressure(sc->gfp_mask, sc->target_mem_cgroup, |
2224 | sc->nr_scanned - nr_scanned, | 2209 | sc->nr_scanned - nr_scanned, |
@@ -2226,37 +2211,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) | |||
2226 | 2211 | ||
2227 | } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, | 2212 | } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, |
2228 | sc->nr_scanned - nr_scanned, sc)); | 2213 | sc->nr_scanned - nr_scanned, sc)); |
2229 | |||
2230 | return groups_scanned; | ||
2231 | } | ||
2232 | |||
2233 | |||
2234 | static void shrink_zone(struct zone *zone, struct scan_control *sc) | ||
2235 | { | ||
2236 | bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc); | ||
2237 | unsigned long nr_scanned = sc->nr_scanned; | ||
2238 | int scanned_groups; | ||
2239 | |||
2240 | scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim); | ||
2241 | /* | ||
2242 | * memcg iterator might race with other reclaimer or start from | ||
2243 | * a incomplete tree walk so the tree walk in __shrink_zone | ||
2244 | * might have missed groups that are above the soft limit. Try | ||
2245 | * another loop to catch up with others. Do it just once to | ||
2246 | * prevent from reclaim latencies when other reclaimers always | ||
2247 | * preempt this one. | ||
2248 | */ | ||
2249 | if (do_soft_reclaim && !scanned_groups) | ||
2250 | __shrink_zone(zone, sc, do_soft_reclaim); | ||
2251 | |||
2252 | /* | ||
2253 | * No group is over the soft limit or those that are do not have | ||
2254 | * pages in the zone we are reclaiming so we have to reclaim everybody | ||
2255 | */ | ||
2256 | if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) { | ||
2257 | __shrink_zone(zone, sc, false); | ||
2258 | return; | ||
2259 | } | ||
2260 | } | 2214 | } |
2261 | 2215 | ||
2262 | /* Returns true if compaction should go ahead for a high-order request */ | 2216 | /* Returns true if compaction should go ahead for a high-order request */ |
@@ -2320,6 +2274,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) | |||
2320 | { | 2274 | { |
2321 | struct zoneref *z; | 2275 | struct zoneref *z; |
2322 | struct zone *zone; | 2276 | struct zone *zone; |
2277 | unsigned long nr_soft_reclaimed; | ||
2278 | unsigned long nr_soft_scanned; | ||
2323 | bool aborted_reclaim = false; | 2279 | bool aborted_reclaim = false; |
2324 | 2280 | ||
2325 | /* | 2281 | /* |
@@ -2359,6 +2315,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) | |||
2359 | continue; | 2315 | continue; |
2360 | } | 2316 | } |
2361 | } | 2317 | } |
2318 | /* | ||
2319 | * This steals pages from memory cgroups over softlimit | ||
2320 | * and returns the number of reclaimed pages and | ||
2321 | * scanned pages. This works for global memory pressure | ||
2322 | * and balancing, not for a memcg's limit. | ||
2323 | */ | ||
2324 | nr_soft_scanned = 0; | ||
2325 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, | ||
2326 | sc->order, sc->gfp_mask, | ||
2327 | &nr_soft_scanned); | ||
2328 | sc->nr_reclaimed += nr_soft_reclaimed; | ||
2329 | sc->nr_scanned += nr_soft_scanned; | ||
2362 | /* need some check for avoid more shrink_zone() */ | 2330 | /* need some check for avoid more shrink_zone() */ |
2363 | } | 2331 | } |
2364 | 2332 | ||
@@ -2952,6 +2920,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | |||
2952 | { | 2920 | { |
2953 | int i; | 2921 | int i; |
2954 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ | 2922 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ |
2923 | unsigned long nr_soft_reclaimed; | ||
2924 | unsigned long nr_soft_scanned; | ||
2955 | struct scan_control sc = { | 2925 | struct scan_control sc = { |
2956 | .gfp_mask = GFP_KERNEL, | 2926 | .gfp_mask = GFP_KERNEL, |
2957 | .priority = DEF_PRIORITY, | 2927 | .priority = DEF_PRIORITY, |
@@ -3066,6 +3036,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | |||
3066 | 3036 | ||
3067 | sc.nr_scanned = 0; | 3037 | sc.nr_scanned = 0; |
3068 | 3038 | ||
3039 | nr_soft_scanned = 0; | ||
3040 | /* | ||
3041 | * Call soft limit reclaim before calling shrink_zone. | ||
3042 | */ | ||
3043 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, | ||
3044 | order, sc.gfp_mask, | ||
3045 | &nr_soft_scanned); | ||
3046 | sc.nr_reclaimed += nr_soft_reclaimed; | ||
3047 | |||
3069 | /* | 3048 | /* |
3070 | * There should be no need to raise the scanning | 3049 | * There should be no need to raise the scanning |
3071 | * priority if enough pages are already being scanned | 3050 | * priority if enough pages are already being scanned |