aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorPaul Menage <menage@google.com>2008-04-29 04:00:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:11 -0400
commitaddf2c739d9015d3e9c0500b58a3af051cd58ea7 (patch)
tree0dec81fe642817c32c8c6b3a77dc72fb41c1a145 /kernel/cpuset.c
parent9e0c914cabc6d75d2eafdff00671a2ad683a5e3c (diff)
Cpuset hardwall flag: switch cpusets to use the bulk cgroup_add_files() API
Currently the cpusets mem_exclusive flag is overloaded to mean both "no-overlapping" and "no GFP_KERNEL allocations outside this cpuset". These patches add a new mem_hardwall flag with just the allocation restriction part of the mem_exclusive semantics, without breaking backwards-compatibility for those who continue to use just mem_exclusive. Additionally, the cgroup control file registration for cpusets is cleaned up to reduce boilerplate. This patch: This change tidies up the cpusets control file definitions, and reduces the amount of boilerplate required to add/change control files in the future. Signed-off-by: Paul Menage <menage@google.com> Reviewed-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c166
1 files changed, 75 insertions, 91 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b5571272132c..fe5407ca2f1e 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1445,53 +1445,76 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1445 * for the common functions, 'private' gives the type of file 1445 * for the common functions, 'private' gives the type of file
1446 */ 1446 */
1447 1447
1448static struct cftype cft_cpus = { 1448static struct cftype files[] = {
1449 .name = "cpus", 1449 {
1450 .read = cpuset_common_file_read, 1450 .name = "cpus",
1451 .write = cpuset_common_file_write, 1451 .read = cpuset_common_file_read,
1452 .private = FILE_CPULIST, 1452 .write = cpuset_common_file_write,
1453}; 1453 .private = FILE_CPULIST,
1454 1454 },
1455static struct cftype cft_mems = { 1455
1456 .name = "mems", 1456 {
1457 .read = cpuset_common_file_read, 1457 .name = "mems",
1458 .write = cpuset_common_file_write, 1458 .read = cpuset_common_file_read,
1459 .private = FILE_MEMLIST, 1459 .write = cpuset_common_file_write,
1460}; 1460 .private = FILE_MEMLIST,
1461 1461 },
1462static struct cftype cft_cpu_exclusive = { 1462
1463 .name = "cpu_exclusive", 1463 {
1464 .read_u64 = cpuset_read_u64, 1464 .name = "cpu_exclusive",
1465 .write_u64 = cpuset_write_u64, 1465 .read_u64 = cpuset_read_u64,
1466 .private = FILE_CPU_EXCLUSIVE, 1466 .write_u64 = cpuset_write_u64,
1467}; 1467 .private = FILE_CPU_EXCLUSIVE,
1468 1468 },
1469static struct cftype cft_mem_exclusive = { 1469
1470 .name = "mem_exclusive", 1470 {
1471 .read_u64 = cpuset_read_u64, 1471 .name = "mem_exclusive",
1472 .write_u64 = cpuset_write_u64, 1472 .read_u64 = cpuset_read_u64,
1473 .private = FILE_MEM_EXCLUSIVE, 1473 .write_u64 = cpuset_write_u64,
1474}; 1474 .private = FILE_MEM_EXCLUSIVE,
1475 1475 },
1476static struct cftype cft_sched_load_balance = { 1476
1477 .name = "sched_load_balance", 1477 {
1478 .read_u64 = cpuset_read_u64, 1478 .name = "sched_load_balance",
1479 .write_u64 = cpuset_write_u64, 1479 .read_u64 = cpuset_read_u64,
1480 .private = FILE_SCHED_LOAD_BALANCE, 1480 .write_u64 = cpuset_write_u64,
1481}; 1481 .private = FILE_SCHED_LOAD_BALANCE,
1482 1482 },
1483static struct cftype cft_sched_relax_domain_level = { 1483
1484 .name = "sched_relax_domain_level", 1484 {
1485 .read = cpuset_common_file_read, 1485 .name = "sched_relax_domain_level",
1486 .write = cpuset_common_file_write, 1486 .read_u64 = cpuset_read_u64,
1487 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, 1487 .write_u64 = cpuset_write_u64,
1488}; 1488 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1489 1489 },
1490static struct cftype cft_memory_migrate = { 1490
1491 .name = "memory_migrate", 1491 {
1492 .read_u64 = cpuset_read_u64, 1492 .name = "memory_migrate",
1493 .write_u64 = cpuset_write_u64, 1493 .read_u64 = cpuset_read_u64,
1494 .private = FILE_MEMORY_MIGRATE, 1494 .write_u64 = cpuset_write_u64,
1495 .private = FILE_MEMORY_MIGRATE,
1496 },
1497
1498 {
1499 .name = "memory_pressure",
1500 .read_u64 = cpuset_read_u64,
1501 .write_u64 = cpuset_write_u64,
1502 .private = FILE_MEMORY_PRESSURE,
1503 },
1504
1505 {
1506 .name = "memory_spread_page",
1507 .read_u64 = cpuset_read_u64,
1508 .write_u64 = cpuset_write_u64,
1509 .private = FILE_SPREAD_PAGE,
1510 },
1511
1512 {
1513 .name = "memory_spread_slab",
1514 .read_u64 = cpuset_read_u64,
1515 .write_u64 = cpuset_write_u64,
1516 .private = FILE_SPREAD_SLAB,
1517 },
1495}; 1518};
1496 1519
1497static struct cftype cft_memory_pressure_enabled = { 1520static struct cftype cft_memory_pressure_enabled = {
@@ -1501,57 +1524,18 @@ static struct cftype cft_memory_pressure_enabled = {
1501 .private = FILE_MEMORY_PRESSURE_ENABLED, 1524 .private = FILE_MEMORY_PRESSURE_ENABLED,
1502}; 1525};
1503 1526
1504static struct cftype cft_memory_pressure = {
1505 .name = "memory_pressure",
1506 .read_u64 = cpuset_read_u64,
1507 .write_u64 = cpuset_write_u64,
1508 .private = FILE_MEMORY_PRESSURE,
1509};
1510
1511static struct cftype cft_spread_page = {
1512 .name = "memory_spread_page",
1513 .read_u64 = cpuset_read_u64,
1514 .write_u64 = cpuset_write_u64,
1515 .private = FILE_SPREAD_PAGE,
1516};
1517
1518static struct cftype cft_spread_slab = {
1519 .name = "memory_spread_slab",
1520 .read_u64 = cpuset_read_u64,
1521 .write_u64 = cpuset_write_u64,
1522 .private = FILE_SPREAD_SLAB,
1523};
1524
1525static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) 1527static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1526{ 1528{
1527 int err; 1529 int err;
1528 1530
1529 if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0) 1531 err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
1530 return err; 1532 if (err)
1531 if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0)
1532 return err;
1533 if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0)
1534 return err;
1535 if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0)
1536 return err;
1537 if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
1538 return err;
1539 if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
1540 return err;
1541 if ((err = cgroup_add_file(cont, ss,
1542 &cft_sched_relax_domain_level)) < 0)
1543 return err;
1544 if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
1545 return err;
1546 if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
1547 return err;
1548 if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0)
1549 return err; 1533 return err;
1550 /* memory_pressure_enabled is in root cpuset only */ 1534 /* memory_pressure_enabled is in root cpuset only */
1551 if (err == 0 && !cont->parent) 1535 if (!cont->parent)
1552 err = cgroup_add_file(cont, ss, 1536 err = cgroup_add_file(cont, ss,
1553 &cft_memory_pressure_enabled); 1537 &cft_memory_pressure_enabled);
1554 return 0; 1538 return err;
1555} 1539}
1556 1540
1557/* 1541/*