diff options
author | Akinobu Mita <akinobu.mita@gmail.com> | 2007-10-18 06:05:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-18 17:37:21 -0400 |
commit | fbf1e473bd0ecc080a4c37bb89848b16c59ac18b (patch) | |
tree | 179a7844c0c7722c3fa61d0787845db3c225df4d /mm/slab.c | |
parent | 6c72ffaab9851e3ee380c77169c0cfcba6b58d4c (diff) |
cpu hotplug: slab: cleanup cpuup_callback()
cpuup_callback() is too long. This patch factors out CPU_UP_CANCELLED and
CPU_UP_PREPARE handlings from cpuup_callback().
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 303 |
1 files changed, 160 insertions, 143 deletions
@@ -1156,105 +1156,181 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1156 | } | 1156 | } |
1157 | #endif | 1157 | #endif |
1158 | 1158 | ||
1159 | static int __cpuinit cpuup_callback(struct notifier_block *nfb, | 1159 | static void __cpuinit cpuup_canceled(long cpu) |
1160 | unsigned long action, void *hcpu) | 1160 | { |
1161 | struct kmem_cache *cachep; | ||
1162 | struct kmem_list3 *l3 = NULL; | ||
1163 | int node = cpu_to_node(cpu); | ||
1164 | |||
1165 | list_for_each_entry(cachep, &cache_chain, next) { | ||
1166 | struct array_cache *nc; | ||
1167 | struct array_cache *shared; | ||
1168 | struct array_cache **alien; | ||
1169 | cpumask_t mask; | ||
1170 | |||
1171 | mask = node_to_cpumask(node); | ||
1172 | /* cpu is dead; no one can alloc from it. */ | ||
1173 | nc = cachep->array[cpu]; | ||
1174 | cachep->array[cpu] = NULL; | ||
1175 | l3 = cachep->nodelists[node]; | ||
1176 | |||
1177 | if (!l3) | ||
1178 | goto free_array_cache; | ||
1179 | |||
1180 | spin_lock_irq(&l3->list_lock); | ||
1181 | |||
1182 | /* Free limit for this kmem_list3 */ | ||
1183 | l3->free_limit -= cachep->batchcount; | ||
1184 | if (nc) | ||
1185 | free_block(cachep, nc->entry, nc->avail, node); | ||
1186 | |||
1187 | if (!cpus_empty(mask)) { | ||
1188 | spin_unlock_irq(&l3->list_lock); | ||
1189 | goto free_array_cache; | ||
1190 | } | ||
1191 | |||
1192 | shared = l3->shared; | ||
1193 | if (shared) { | ||
1194 | free_block(cachep, shared->entry, | ||
1195 | shared->avail, node); | ||
1196 | l3->shared = NULL; | ||
1197 | } | ||
1198 | |||
1199 | alien = l3->alien; | ||
1200 | l3->alien = NULL; | ||
1201 | |||
1202 | spin_unlock_irq(&l3->list_lock); | ||
1203 | |||
1204 | kfree(shared); | ||
1205 | if (alien) { | ||
1206 | drain_alien_cache(cachep, alien); | ||
1207 | free_alien_cache(alien); | ||
1208 | } | ||
1209 | free_array_cache: | ||
1210 | kfree(nc); | ||
1211 | } | ||
1212 | /* | ||
1213 | * In the previous loop, all the objects were freed to | ||
1214 | * the respective cache's slabs, now we can go ahead and | ||
1215 | * shrink each nodelist to its limit. | ||
1216 | */ | ||
1217 | list_for_each_entry(cachep, &cache_chain, next) { | ||
1218 | l3 = cachep->nodelists[node]; | ||
1219 | if (!l3) | ||
1220 | continue; | ||
1221 | drain_freelist(cachep, l3, l3->free_objects); | ||
1222 | } | ||
1223 | } | ||
1224 | |||
1225 | static int __cpuinit cpuup_prepare(long cpu) | ||
1161 | { | 1226 | { |
1162 | long cpu = (long)hcpu; | ||
1163 | struct kmem_cache *cachep; | 1227 | struct kmem_cache *cachep; |
1164 | struct kmem_list3 *l3 = NULL; | 1228 | struct kmem_list3 *l3 = NULL; |
1165 | int node = cpu_to_node(cpu); | 1229 | int node = cpu_to_node(cpu); |
1166 | const int memsize = sizeof(struct kmem_list3); | 1230 | const int memsize = sizeof(struct kmem_list3); |
1167 | 1231 | ||
1168 | switch (action) { | 1232 | /* |
1169 | case CPU_LOCK_ACQUIRE: | 1233 | * We need to do this right in the beginning since |
1170 | mutex_lock(&cache_chain_mutex); | 1234 | * alloc_arraycache's are going to use this list. |
1171 | break; | 1235 | * kmalloc_node allows us to add the slab to the right |
1172 | case CPU_UP_PREPARE: | 1236 | * kmem_list3 and not this cpu's kmem_list3 |
1173 | case CPU_UP_PREPARE_FROZEN: | 1237 | */ |
1238 | |||
1239 | list_for_each_entry(cachep, &cache_chain, next) { | ||
1174 | /* | 1240 | /* |
1175 | * We need to do this right in the beginning since | 1241 | * Set up the size64 kmemlist for cpu before we can |
1176 | * alloc_arraycache's are going to use this list. | 1242 | * begin anything. Make sure some other cpu on this |
1177 | * kmalloc_node allows us to add the slab to the right | 1243 | * node has not already allocated this |
1178 | * kmem_list3 and not this cpu's kmem_list3 | ||
1179 | */ | 1244 | */ |
1245 | if (!cachep->nodelists[node]) { | ||
1246 | l3 = kmalloc_node(memsize, GFP_KERNEL, node); | ||
1247 | if (!l3) | ||
1248 | goto bad; | ||
1249 | kmem_list3_init(l3); | ||
1250 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | ||
1251 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | ||
1180 | 1252 | ||
1181 | list_for_each_entry(cachep, &cache_chain, next) { | ||
1182 | /* | 1253 | /* |
1183 | * Set up the size64 kmemlist for cpu before we can | 1254 | * The l3s don't come and go as CPUs come and |
1184 | * begin anything. Make sure some other cpu on this | 1255 | * go. cache_chain_mutex is sufficient |
1185 | * node has not already allocated this | 1256 | * protection here. |
1186 | */ | 1257 | */ |
1187 | if (!cachep->nodelists[node]) { | 1258 | cachep->nodelists[node] = l3; |
1188 | l3 = kmalloc_node(memsize, GFP_KERNEL, node); | ||
1189 | if (!l3) | ||
1190 | goto bad; | ||
1191 | kmem_list3_init(l3); | ||
1192 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | ||
1193 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | ||
1194 | |||
1195 | /* | ||
1196 | * The l3s don't come and go as CPUs come and | ||
1197 | * go. cache_chain_mutex is sufficient | ||
1198 | * protection here. | ||
1199 | */ | ||
1200 | cachep->nodelists[node] = l3; | ||
1201 | } | ||
1202 | |||
1203 | spin_lock_irq(&cachep->nodelists[node]->list_lock); | ||
1204 | cachep->nodelists[node]->free_limit = | ||
1205 | (1 + nr_cpus_node(node)) * | ||
1206 | cachep->batchcount + cachep->num; | ||
1207 | spin_unlock_irq(&cachep->nodelists[node]->list_lock); | ||
1208 | } | 1259 | } |
1209 | 1260 | ||
1210 | /* | 1261 | spin_lock_irq(&cachep->nodelists[node]->list_lock); |
1211 | * Now we can go ahead with allocating the shared arrays and | 1262 | cachep->nodelists[node]->free_limit = |
1212 | * array caches | 1263 | (1 + nr_cpus_node(node)) * |
1213 | */ | 1264 | cachep->batchcount + cachep->num; |
1214 | list_for_each_entry(cachep, &cache_chain, next) { | 1265 | spin_unlock_irq(&cachep->nodelists[node]->list_lock); |
1215 | struct array_cache *nc; | 1266 | } |
1216 | struct array_cache *shared = NULL; | 1267 | |
1217 | struct array_cache **alien = NULL; | 1268 | /* |
1218 | 1269 | * Now we can go ahead with allocating the shared arrays and | |
1219 | nc = alloc_arraycache(node, cachep->limit, | 1270 | * array caches |
1220 | cachep->batchcount); | 1271 | */ |
1221 | if (!nc) | 1272 | list_for_each_entry(cachep, &cache_chain, next) { |
1273 | struct array_cache *nc; | ||
1274 | struct array_cache *shared = NULL; | ||
1275 | struct array_cache **alien = NULL; | ||
1276 | |||
1277 | nc = alloc_arraycache(node, cachep->limit, | ||
1278 | cachep->batchcount); | ||
1279 | if (!nc) | ||
1280 | goto bad; | ||
1281 | if (cachep->shared) { | ||
1282 | shared = alloc_arraycache(node, | ||
1283 | cachep->shared * cachep->batchcount, | ||
1284 | 0xbaadf00d); | ||
1285 | if (!shared) | ||
1222 | goto bad; | 1286 | goto bad; |
1223 | if (cachep->shared) { | 1287 | } |
1224 | shared = alloc_arraycache(node, | 1288 | if (use_alien_caches) { |
1225 | cachep->shared * cachep->batchcount, | 1289 | alien = alloc_alien_cache(node, cachep->limit); |
1226 | 0xbaadf00d); | 1290 | if (!alien) |
1227 | if (!shared) | 1291 | goto bad; |
1228 | goto bad; | 1292 | } |
1229 | } | 1293 | cachep->array[cpu] = nc; |
1230 | if (use_alien_caches) { | 1294 | l3 = cachep->nodelists[node]; |
1231 | alien = alloc_alien_cache(node, cachep->limit); | 1295 | BUG_ON(!l3); |
1232 | if (!alien) | ||
1233 | goto bad; | ||
1234 | } | ||
1235 | cachep->array[cpu] = nc; | ||
1236 | l3 = cachep->nodelists[node]; | ||
1237 | BUG_ON(!l3); | ||
1238 | 1296 | ||
1239 | spin_lock_irq(&l3->list_lock); | 1297 | spin_lock_irq(&l3->list_lock); |
1240 | if (!l3->shared) { | 1298 | if (!l3->shared) { |
1241 | /* | 1299 | /* |
1242 | * We are serialised from CPU_DEAD or | 1300 | * We are serialised from CPU_DEAD or |
1243 | * CPU_UP_CANCELLED by the cpucontrol lock | 1301 | * CPU_UP_CANCELLED by the cpucontrol lock |
1244 | */ | 1302 | */ |
1245 | l3->shared = shared; | 1303 | l3->shared = shared; |
1246 | shared = NULL; | 1304 | shared = NULL; |
1247 | } | 1305 | } |
1248 | #ifdef CONFIG_NUMA | 1306 | #ifdef CONFIG_NUMA |
1249 | if (!l3->alien) { | 1307 | if (!l3->alien) { |
1250 | l3->alien = alien; | 1308 | l3->alien = alien; |
1251 | alien = NULL; | 1309 | alien = NULL; |
1252 | } | ||
1253 | #endif | ||
1254 | spin_unlock_irq(&l3->list_lock); | ||
1255 | kfree(shared); | ||
1256 | free_alien_cache(alien); | ||
1257 | } | 1310 | } |
1311 | #endif | ||
1312 | spin_unlock_irq(&l3->list_lock); | ||
1313 | kfree(shared); | ||
1314 | free_alien_cache(alien); | ||
1315 | } | ||
1316 | return 0; | ||
1317 | bad: | ||
1318 | return -ENOMEM; | ||
1319 | } | ||
1320 | |||
1321 | static int __cpuinit cpuup_callback(struct notifier_block *nfb, | ||
1322 | unsigned long action, void *hcpu) | ||
1323 | { | ||
1324 | long cpu = (long)hcpu; | ||
1325 | int err = 0; | ||
1326 | |||
1327 | switch (action) { | ||
1328 | case CPU_LOCK_ACQUIRE: | ||
1329 | mutex_lock(&cache_chain_mutex); | ||
1330 | break; | ||
1331 | case CPU_UP_PREPARE: | ||
1332 | case CPU_UP_PREPARE_FROZEN: | ||
1333 | err = cpuup_prepare(cpu); | ||
1258 | break; | 1334 | break; |
1259 | case CPU_ONLINE: | 1335 | case CPU_ONLINE: |
1260 | case CPU_ONLINE_FROZEN: | 1336 | case CPU_ONLINE_FROZEN: |
@@ -1291,72 +1367,13 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1291 | #endif | 1367 | #endif |
1292 | case CPU_UP_CANCELED: | 1368 | case CPU_UP_CANCELED: |
1293 | case CPU_UP_CANCELED_FROZEN: | 1369 | case CPU_UP_CANCELED_FROZEN: |
1294 | list_for_each_entry(cachep, &cache_chain, next) { | 1370 | cpuup_canceled(cpu); |
1295 | struct array_cache *nc; | ||
1296 | struct array_cache *shared; | ||
1297 | struct array_cache **alien; | ||
1298 | cpumask_t mask; | ||
1299 | |||
1300 | mask = node_to_cpumask(node); | ||
1301 | /* cpu is dead; no one can alloc from it. */ | ||
1302 | nc = cachep->array[cpu]; | ||
1303 | cachep->array[cpu] = NULL; | ||
1304 | l3 = cachep->nodelists[node]; | ||
1305 | |||
1306 | if (!l3) | ||
1307 | goto free_array_cache; | ||
1308 | |||
1309 | spin_lock_irq(&l3->list_lock); | ||
1310 | |||
1311 | /* Free limit for this kmem_list3 */ | ||
1312 | l3->free_limit -= cachep->batchcount; | ||
1313 | if (nc) | ||
1314 | free_block(cachep, nc->entry, nc->avail, node); | ||
1315 | |||
1316 | if (!cpus_empty(mask)) { | ||
1317 | spin_unlock_irq(&l3->list_lock); | ||
1318 | goto free_array_cache; | ||
1319 | } | ||
1320 | |||
1321 | shared = l3->shared; | ||
1322 | if (shared) { | ||
1323 | free_block(cachep, shared->entry, | ||
1324 | shared->avail, node); | ||
1325 | l3->shared = NULL; | ||
1326 | } | ||
1327 | |||
1328 | alien = l3->alien; | ||
1329 | l3->alien = NULL; | ||
1330 | |||
1331 | spin_unlock_irq(&l3->list_lock); | ||
1332 | |||
1333 | kfree(shared); | ||
1334 | if (alien) { | ||
1335 | drain_alien_cache(cachep, alien); | ||
1336 | free_alien_cache(alien); | ||
1337 | } | ||
1338 | free_array_cache: | ||
1339 | kfree(nc); | ||
1340 | } | ||
1341 | /* | ||
1342 | * In the previous loop, all the objects were freed to | ||
1343 | * the respective cache's slabs, now we can go ahead and | ||
1344 | * shrink each nodelist to its limit. | ||
1345 | */ | ||
1346 | list_for_each_entry(cachep, &cache_chain, next) { | ||
1347 | l3 = cachep->nodelists[node]; | ||
1348 | if (!l3) | ||
1349 | continue; | ||
1350 | drain_freelist(cachep, l3, l3->free_objects); | ||
1351 | } | ||
1352 | break; | 1371 | break; |
1353 | case CPU_LOCK_RELEASE: | 1372 | case CPU_LOCK_RELEASE: |
1354 | mutex_unlock(&cache_chain_mutex); | 1373 | mutex_unlock(&cache_chain_mutex); |
1355 | break; | 1374 | break; |
1356 | } | 1375 | } |
1357 | return NOTIFY_OK; | 1376 | return err ? NOTIFY_BAD : NOTIFY_OK; |
1358 | bad: | ||
1359 | return NOTIFY_BAD; | ||
1360 | } | 1377 | } |
1361 | 1378 | ||
1362 | static struct notifier_block __cpuinitdata cpucache_notifier = { | 1379 | static struct notifier_block __cpuinitdata cpucache_notifier = { |