aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJörg Sommer <joerg@alea.gnuu.de>2011-06-15 15:59:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-06-16 00:52:50 -0400
commitf6e07d38078e82a6aeaae00bb134591ef5ac1167 (patch)
tree9ebadc1c483a5d96b3d33141a229397c4f594df5
parent06a2c45d6b4a7586eba7cd20dd656b08d8b63c2f (diff)
Documentation: update cgroupfs mount point
According to commit 676db4af0430 ("cgroupfs: create /sys/fs/cgroup to mount cgroupfs on") the canonical mountpoint for the cgroup filesystem is /sys/fs/cgroup. Hence, this should be used in the documentation. Signed-off-by: Jörg Sommer <joerg@alea.gnuu.de> Acked-by: Paul Menage <menage@google.com> Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/accounting/cgroupstats.txt4
-rw-r--r--Documentation/cgroups/blkio-controller.txt29
-rw-r--r--Documentation/cgroups/cgroups.txt58
-rw-r--r--Documentation/cgroups/cpuacct.txt21
-rw-r--r--Documentation/cgroups/cpusets.txt28
-rw-r--r--Documentation/cgroups/devices.txt6
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt20
-rw-r--r--Documentation/cgroups/memory.txt17
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt7
-rw-r--r--Documentation/scheduler/sched-rt-group.txt7
-rw-r--r--Documentation/vm/hwpoison.txt6
11 files changed, 109 insertions, 94 deletions
diff --git a/Documentation/accounting/cgroupstats.txt b/Documentation/accounting/cgroupstats.txt
index eda40fd39cad..d16a9849e60e 100644
--- a/Documentation/accounting/cgroupstats.txt
+++ b/Documentation/accounting/cgroupstats.txt
@@ -21,7 +21,7 @@ information will not be available.
21To extract cgroup statistics a utility very similar to getdelays.c 21To extract cgroup statistics a utility very similar to getdelays.c
22has been developed, the sample output of the utility is shown below 22has been developed, the sample output of the utility is shown below
23 23
24~/balbir/cgroupstats # ./getdelays -C "/cgroup/a" 24~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup/a"
25sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0 25sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0
26~/balbir/cgroupstats # ./getdelays -C "/cgroup" 26~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup"
27sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2 27sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 465351d4cf85..b1b1bfadc9c0 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -28,16 +28,19 @@ cgroups. Here is what you can do.
28- Enable group scheduling in CFQ 28- Enable group scheduling in CFQ
29 CONFIG_CFQ_GROUP_IOSCHED=y 29 CONFIG_CFQ_GROUP_IOSCHED=y
30 30
31- Compile and boot into kernel and mount IO controller (blkio). 31- Compile and boot into kernel and mount IO controller (blkio); see
32 cgroups.txt, Why are cgroups needed?.
32 33
33 mount -t cgroup -o blkio none /cgroup 34 mount -t tmpfs cgroup_root /sys/fs/cgroup
35 mkdir /sys/fs/cgroup/blkio
36 mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
34 37
35- Create two cgroups 38- Create two cgroups
36 mkdir -p /cgroup/test1/ /cgroup/test2 39 mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
37 40
38- Set weights of group test1 and test2 41- Set weights of group test1 and test2
39 echo 1000 > /cgroup/test1/blkio.weight 42 echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
40 echo 500 > /cgroup/test2/blkio.weight 43 echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
41 44
42- Create two same size files (say 512MB each) on same disk (file1, file2) and 45- Create two same size files (say 512MB each) on same disk (file1, file2) and
43 launch two dd threads in different cgroup to read those files. 46 launch two dd threads in different cgroup to read those files.
@@ -46,12 +49,12 @@ cgroups. Here is what you can do.
46 echo 3 > /proc/sys/vm/drop_caches 49 echo 3 > /proc/sys/vm/drop_caches
47 50
48 dd if=/mnt/sdb/zerofile1 of=/dev/null & 51 dd if=/mnt/sdb/zerofile1 of=/dev/null &
49 echo $! > /cgroup/test1/tasks 52 echo $! > /sys/fs/cgroup/blkio/test1/tasks
50 cat /cgroup/test1/tasks 53 cat /sys/fs/cgroup/blkio/test1/tasks
51 54
52 dd if=/mnt/sdb/zerofile2 of=/dev/null & 55 dd if=/mnt/sdb/zerofile2 of=/dev/null &
53 echo $! > /cgroup/test2/tasks 56 echo $! > /sys/fs/cgroup/blkio/test2/tasks
54 cat /cgroup/test2/tasks 57 cat /sys/fs/cgroup/blkio/test2/tasks
55 58
56- At macro level, first dd should finish first. To get more precise data, keep 59- At macro level, first dd should finish first. To get more precise data, keep
57 on looking at (with the help of script), at blkio.disk_time and 60 on looking at (with the help of script), at blkio.disk_time and
@@ -68,13 +71,13 @@ Throttling/Upper Limit policy
68- Enable throttling in block layer 71- Enable throttling in block layer
69 CONFIG_BLK_DEV_THROTTLING=y 72 CONFIG_BLK_DEV_THROTTLING=y
70 73
71- Mount blkio controller 74- Mount blkio controller (see cgroups.txt, Why are cgroups needed?)
72 mount -t cgroup -o blkio none /cgroup/blkio 75 mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
73 76
74- Specify a bandwidth rate on particular device for root group. The format 77- Specify a bandwidth rate on particular device for root group. The format
75 for policy is "<major>:<minor> <byes_per_second>". 78 for policy is "<major>:<minor> <byes_per_second>".
76 79
77 echo "8:16 1048576" > /cgroup/blkio/blkio.read_bps_device 80 echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device
78 81
79 Above will put a limit of 1MB/second on reads happening for root group 82 Above will put a limit of 1MB/second on reads happening for root group
80 on device having major/minor number 8:16. 83 on device having major/minor number 8:16.
@@ -149,7 +152,7 @@ Proportional weight policy files
149 152
150 Following is the format. 153 Following is the format.
151 154
152 #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device 155 # echo dev_maj:dev_minor weight > blkio.weight_device
153 Configure weight=300 on /dev/sdb (8:16) in this cgroup 156 Configure weight=300 on /dev/sdb (8:16) in this cgroup
154 # echo 8:16 300 > blkio.weight_device 157 # echo 8:16 300 > blkio.weight_device
155 # cat blkio.weight_device 158 # cat blkio.weight_device
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 0ed99f08f1f3..15bca101ff62 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -138,7 +138,7 @@ With the ability to classify tasks differently for different resources
138the admin can easily set up a script which receives exec notifications 138the admin can easily set up a script which receives exec notifications
139and depending on who is launching the browser he can 139and depending on who is launching the browser he can
140 140
141 # echo browser_pid > /mnt/<restype>/<userclass>/tasks 141 # echo browser_pid > /sys/fs/cgroup/<restype>/<userclass>/tasks
142 142
143With only a single hierarchy, he now would potentially have to create 143With only a single hierarchy, he now would potentially have to create
144a separate cgroup for every browser launched and associate it with 144a separate cgroup for every browser launched and associate it with
@@ -153,9 +153,9 @@ apps enhanced CPU power,
153With ability to write pids directly to resource classes, it's just a 153With ability to write pids directly to resource classes, it's just a
154matter of : 154matter of :
155 155
156 # echo pid > /mnt/network/<new_class>/tasks 156 # echo pid > /sys/fs/cgroup/network/<new_class>/tasks
157 (after some time) 157 (after some time)
158 # echo pid > /mnt/network/<orig_class>/tasks 158 # echo pid > /sys/fs/cgroup/network/<orig_class>/tasks
159 159
160Without this ability, he would have to split the cgroup into 160Without this ability, he would have to split the cgroup into
161multiple separate ones and then associate the new cgroups with the 161multiple separate ones and then associate the new cgroups with the
@@ -310,21 +310,24 @@ subsystem, this is the case for the cpuset.
310To start a new job that is to be contained within a cgroup, using 310To start a new job that is to be contained within a cgroup, using
311the "cpuset" cgroup subsystem, the steps are something like: 311the "cpuset" cgroup subsystem, the steps are something like:
312 312
313 1) mkdir /dev/cgroup 313 1) mount -t tmpfs cgroup_root /sys/fs/cgroup
314 2) mount -t cgroup -ocpuset cpuset /dev/cgroup 314 2) mkdir /sys/fs/cgroup/cpuset
315 3) Create the new cgroup by doing mkdir's and write's (or echo's) in 315 3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
316 the /dev/cgroup virtual file system. 316 4) Create the new cgroup by doing mkdir's and write's (or echo's) in
317 4) Start a task that will be the "founding father" of the new job. 317 the /sys/fs/cgroup virtual file system.
318 5) Attach that task to the new cgroup by writing its pid to the 318 5) Start a task that will be the "founding father" of the new job.
319 /dev/cgroup tasks file for that cgroup. 319 6) Attach that task to the new cgroup by writing its pid to the
320 6) fork, exec or clone the job tasks from this founding father task. 320 /sys/fs/cgroup/cpuset/tasks file for that cgroup.
321 7) fork, exec or clone the job tasks from this founding father task.
321 322
322For example, the following sequence of commands will setup a cgroup 323For example, the following sequence of commands will setup a cgroup
323named "Charlie", containing just CPUs 2 and 3, and Memory Node 1, 324named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
324and then start a subshell 'sh' in that cgroup: 325and then start a subshell 'sh' in that cgroup:
325 326
326 mount -t cgroup cpuset -ocpuset /dev/cgroup 327 mount -t tmpfs cgroup_root /sys/fs/cgroup
327 cd /dev/cgroup 328 mkdir /sys/fs/cgroup/cpuset
329 mount -t cgroup cpuset -ocpuset /sys/fs/cgroup/cpuset
330 cd /sys/fs/cgroup/cpuset
328 mkdir Charlie 331 mkdir Charlie
329 cd Charlie 332 cd Charlie
330 /bin/echo 2-3 > cpuset.cpus 333 /bin/echo 2-3 > cpuset.cpus
@@ -345,7 +348,7 @@ Creating, modifying, using the cgroups can be done through the cgroup
345virtual filesystem. 348virtual filesystem.
346 349
347To mount a cgroup hierarchy with all available subsystems, type: 350To mount a cgroup hierarchy with all available subsystems, type:
348# mount -t cgroup xxx /dev/cgroup 351# mount -t cgroup xxx /sys/fs/cgroup
349 352
350The "xxx" is not interpreted by the cgroup code, but will appear in 353The "xxx" is not interpreted by the cgroup code, but will appear in
351/proc/mounts so may be any useful identifying string that you like. 354/proc/mounts so may be any useful identifying string that you like.
@@ -354,23 +357,32 @@ Note: Some subsystems do not work without some user input first. For instance,
354if cpusets are enabled the user will have to populate the cpus and mems files 357if cpusets are enabled the user will have to populate the cpus and mems files
355for each new cgroup created before that group can be used. 358for each new cgroup created before that group can be used.
356 359
360As explained in section `1.2 Why are cgroups needed?' you should create
361different hierarchies of cgroups for each single resource or group of
362resources you want to control. Therefore, you should mount a tmpfs on
363/sys/fs/cgroup and create directories for each cgroup resource or resource
364group.
365
366# mount -t tmpfs cgroup_root /sys/fs/cgroup
367# mkdir /sys/fs/cgroup/rg1
368
357To mount a cgroup hierarchy with just the cpuset and memory 369To mount a cgroup hierarchy with just the cpuset and memory
358subsystems, type: 370subsystems, type:
359# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup 371# mount -t cgroup -o cpuset,memory hier1 /sys/fs/cgroup/rg1
360 372
361To change the set of subsystems bound to a mounted hierarchy, just 373To change the set of subsystems bound to a mounted hierarchy, just
362remount with different options: 374remount with different options:
363# mount -o remount,cpuset,blkio hier1 /dev/cgroup 375# mount -o remount,cpuset,blkio hier1 /sys/fs/cgroup/rg1
364 376
365Now memory is removed from the hierarchy and blkio is added. 377Now memory is removed from the hierarchy and blkio is added.
366 378
367Note this will add blkio to the hierarchy but won't remove memory or 379Note this will add blkio to the hierarchy but won't remove memory or
368cpuset, because the new options are appended to the old ones: 380cpuset, because the new options are appended to the old ones:
369# mount -o remount,blkio /dev/cgroup 381# mount -o remount,blkio /sys/fs/cgroup/rg1
370 382
371To Specify a hierarchy's release_agent: 383To Specify a hierarchy's release_agent:
372# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \ 384# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
373 xxx /dev/cgroup 385 xxx /sys/fs/cgroup/rg1
374 386
375Note that specifying 'release_agent' more than once will return failure. 387Note that specifying 'release_agent' more than once will return failure.
376 388
@@ -379,17 +391,17 @@ when the hierarchy consists of a single (root) cgroup. Supporting
379the ability to arbitrarily bind/unbind subsystems from an existing 391the ability to arbitrarily bind/unbind subsystems from an existing
380cgroup hierarchy is intended to be implemented in the future. 392cgroup hierarchy is intended to be implemented in the future.
381 393
382Then under /dev/cgroup you can find a tree that corresponds to the 394Then under /sys/fs/cgroup/rg1 you can find a tree that corresponds to the
383tree of the cgroups in the system. For instance, /dev/cgroup 395tree of the cgroups in the system. For instance, /sys/fs/cgroup/rg1
384is the cgroup that holds the whole system. 396is the cgroup that holds the whole system.
385 397
386If you want to change the value of release_agent: 398If you want to change the value of release_agent:
387# echo "/sbin/new_release_agent" > /dev/cgroup/release_agent 399# echo "/sbin/new_release_agent" > /sys/fs/cgroup/rg1/release_agent
388 400
389It can also be changed via remount. 401It can also be changed via remount.
390 402
391If you want to create a new cgroup under /dev/cgroup: 403If you want to create a new cgroup under /sys/fs/cgroup/rg1:
392# cd /dev/cgroup 404# cd /sys/fs/cgroup/rg1
393# mkdir my_cgroup 405# mkdir my_cgroup
394 406
395Now you want to do something with this cgroup. 407Now you want to do something with this cgroup.
diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt
index 8b930946c52a..9ad85df4b983 100644
--- a/Documentation/cgroups/cpuacct.txt
+++ b/Documentation/cgroups/cpuacct.txt
@@ -10,26 +10,25 @@ directly present in its group.
10 10
11Accounting groups can be created by first mounting the cgroup filesystem. 11Accounting groups can be created by first mounting the cgroup filesystem.
12 12
13# mkdir /cgroups 13# mount -t cgroup -ocpuacct none /sys/fs/cgroup
14# mount -t cgroup -ocpuacct none /cgroups 14
15 15With the above step, the initial or the parent accounting group becomes
16With the above step, the initial or the parent accounting group 16visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in
17becomes visible at /cgroups. At bootup, this group includes all the 17the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup.
18tasks in the system. /cgroups/tasks lists the tasks in this cgroup. 18/sys/fs/cgroup/cpuacct.usage gives the CPU time (in nanoseconds) obtained
19/cgroups/cpuacct.usage gives the CPU time (in nanoseconds) obtained by 19by this group which is essentially the CPU time obtained by all the tasks
20this group which is essentially the CPU time obtained by all the tasks
21in the system. 20in the system.
22 21
23New accounting groups can be created under the parent group /cgroups. 22New accounting groups can be created under the parent group /sys/fs/cgroup.
24 23
25# cd /cgroups 24# cd /sys/fs/cgroup
26# mkdir g1 25# mkdir g1
27# echo $$ > g1 26# echo $$ > g1
28 27
29The above steps create a new group g1 and move the current shell 28The above steps create a new group g1 and move the current shell
30process (bash) into it. CPU time consumed by this bash and its children 29process (bash) into it. CPU time consumed by this bash and its children
31can be obtained from g1/cpuacct.usage and the same is accumulated in 30can be obtained from g1/cpuacct.usage and the same is accumulated in
32/cgroups/cpuacct.usage also. 31/sys/fs/cgroup/cpuacct.usage also.
33 32
34cpuacct.stat file lists a few statistics which further divide the 33cpuacct.stat file lists a few statistics which further divide the
35CPU time obtained by the cgroup into user and system times. Currently 34CPU time obtained by the cgroup into user and system times. Currently
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 98a30829af7a..5b0d78e55ccc 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -661,21 +661,21 @@ than stress the kernel.
661 661
662To start a new job that is to be contained within a cpuset, the steps are: 662To start a new job that is to be contained within a cpuset, the steps are:
663 663
664 1) mkdir /dev/cpuset 664 1) mkdir /sys/fs/cgroup/cpuset
665 2) mount -t cgroup -ocpuset cpuset /dev/cpuset 665 2) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
666 3) Create the new cpuset by doing mkdir's and write's (or echo's) in 666 3) Create the new cpuset by doing mkdir's and write's (or echo's) in
667 the /dev/cpuset virtual file system. 667 the /sys/fs/cgroup/cpuset virtual file system.
668 4) Start a task that will be the "founding father" of the new job. 668 4) Start a task that will be the "founding father" of the new job.
669 5) Attach that task to the new cpuset by writing its pid to the 669 5) Attach that task to the new cpuset by writing its pid to the
670 /dev/cpuset tasks file for that cpuset. 670 /sys/fs/cgroup/cpuset tasks file for that cpuset.
671 6) fork, exec or clone the job tasks from this founding father task. 671 6) fork, exec or clone the job tasks from this founding father task.
672 672
673For example, the following sequence of commands will setup a cpuset 673For example, the following sequence of commands will setup a cpuset
674named "Charlie", containing just CPUs 2 and 3, and Memory Node 1, 674named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
675and then start a subshell 'sh' in that cpuset: 675and then start a subshell 'sh' in that cpuset:
676 676
677 mount -t cgroup -ocpuset cpuset /dev/cpuset 677 mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
678 cd /dev/cpuset 678 cd /sys/fs/cgroup/cpuset
679 mkdir Charlie 679 mkdir Charlie
680 cd Charlie 680 cd Charlie
681 /bin/echo 2-3 > cpuset.cpus 681 /bin/echo 2-3 > cpuset.cpus
@@ -710,14 +710,14 @@ Creating, modifying, using the cpusets can be done through the cpuset
710virtual filesystem. 710virtual filesystem.
711 711
712To mount it, type: 712To mount it, type:
713# mount -t cgroup -o cpuset cpuset /dev/cpuset 713# mount -t cgroup -o cpuset cpuset /sys/fs/cgroup/cpuset
714 714
715Then under /dev/cpuset you can find a tree that corresponds to the 715Then under /sys/fs/cgroup/cpuset you can find a tree that corresponds to the
716tree of the cpusets in the system. For instance, /dev/cpuset 716tree of the cpusets in the system. For instance, /sys/fs/cgroup/cpuset
717is the cpuset that holds the whole system. 717is the cpuset that holds the whole system.
718 718
719If you want to create a new cpuset under /dev/cpuset: 719If you want to create a new cpuset under /sys/fs/cgroup/cpuset:
720# cd /dev/cpuset 720# cd /sys/fs/cgroup/cpuset
721# mkdir my_cpuset 721# mkdir my_cpuset
722 722
723Now you want to do something with this cpuset. 723Now you want to do something with this cpuset.
@@ -765,12 +765,12 @@ wrapper around the cgroup filesystem.
765 765
766The command 766The command
767 767
768mount -t cpuset X /dev/cpuset 768mount -t cpuset X /sys/fs/cgroup/cpuset
769 769
770is equivalent to 770is equivalent to
771 771
772mount -t cgroup -ocpuset,noprefix X /dev/cpuset 772mount -t cgroup -ocpuset,noprefix X /sys/fs/cgroup/cpuset
773echo "/sbin/cpuset_release_agent" > /dev/cpuset/release_agent 773echo "/sbin/cpuset_release_agent" > /sys/fs/cgroup/cpuset/release_agent
774 774
7752.2 Adding/removing cpus 7752.2 Adding/removing cpus
776------------------------ 776------------------------
diff --git a/Documentation/cgroups/devices.txt b/Documentation/cgroups/devices.txt
index 57ca4c89fe5c..16624a7f8222 100644
--- a/Documentation/cgroups/devices.txt
+++ b/Documentation/cgroups/devices.txt
@@ -22,16 +22,16 @@ removed from the child(ren).
22An entry is added using devices.allow, and removed using 22An entry is added using devices.allow, and removed using
23devices.deny. For instance 23devices.deny. For instance
24 24
25 echo 'c 1:3 mr' > /cgroups/1/devices.allow 25 echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow
26 26
27allows cgroup 1 to read and mknod the device usually known as 27allows cgroup 1 to read and mknod the device usually known as
28/dev/null. Doing 28/dev/null. Doing
29 29
30 echo a > /cgroups/1/devices.deny 30 echo a > /sys/fs/cgroup/1/devices.deny
31 31
32will remove the default 'a *:* rwm' entry. Doing 32will remove the default 'a *:* rwm' entry. Doing
33 33
34 echo a > /cgroups/1/devices.allow 34 echo a > /sys/fs/cgroup/1/devices.allow
35 35
36will add the 'a *:* rwm' entry to the whitelist. 36will add the 'a *:* rwm' entry to the whitelist.
37 37
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
index 41f37fea1276..c21d77742a07 100644
--- a/Documentation/cgroups/freezer-subsystem.txt
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -59,28 +59,28 @@ is non-freezable.
59 59
60* Examples of usage : 60* Examples of usage :
61 61
62 # mkdir /containers 62 # mkdir /sys/fs/cgroup/freezer
63 # mount -t cgroup -ofreezer freezer /containers 63 # mount -t cgroup -ofreezer freezer /sys/fs/cgroup/freezer
64 # mkdir /containers/0 64 # mkdir /sys/fs/cgroup/freezer/0
65 # echo $some_pid > /containers/0/tasks 65 # echo $some_pid > /sys/fs/cgroup/freezer/0/tasks
66 66
67to get status of the freezer subsystem : 67to get status of the freezer subsystem :
68 68
69 # cat /containers/0/freezer.state 69 # cat /sys/fs/cgroup/freezer/0/freezer.state
70 THAWED 70 THAWED
71 71
72to freeze all tasks in the container : 72to freeze all tasks in the container :
73 73
74 # echo FROZEN > /containers/0/freezer.state 74 # echo FROZEN > /sys/fs/cgroup/freezer/0/freezer.state
75 # cat /containers/0/freezer.state 75 # cat /sys/fs/cgroup/freezer/0/freezer.state
76 FREEZING 76 FREEZING
77 # cat /containers/0/freezer.state 77 # cat /sys/fs/cgroup/freezer/0/freezer.state
78 FROZEN 78 FROZEN
79 79
80to unfreeze all tasks in the container : 80to unfreeze all tasks in the container :
81 81
82 # echo THAWED > /containers/0/freezer.state 82 # echo THAWED > /sys/fs/cgroup/freezer/0/freezer.state
83 # cat /containers/0/freezer.state 83 # cat /sys/fs/cgroup/freezer/0/freezer.state
84 THAWED 84 THAWED
85 85
86This is the basic mechanism which should do the right thing for user space task 86This is the basic mechanism which should do the right thing for user space task
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 510d64570d60..ffec2416aa74 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -264,16 +264,17 @@ b. Enable CONFIG_RESOURCE_COUNTERS
264c. Enable CONFIG_CGROUP_MEM_RES_CTLR 264c. Enable CONFIG_CGROUP_MEM_RES_CTLR
265d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension) 265d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
266 266
2671. Prepare the cgroups 2671. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
268# mkdir -p /cgroups 268# mount -t tmpfs none /sys/fs/cgroup
269# mount -t cgroup none /cgroups -o memory 269# mkdir /sys/fs/cgroup/memory
270# mount -t cgroup none /sys/fs/cgroup/memory -o memory
270 271
2712. Make the new group and move bash into it 2722. Make the new group and move bash into it
272# mkdir /cgroups/0 273# mkdir /sys/fs/cgroup/memory/0
273# echo $$ > /cgroups/0/tasks 274# echo $$ > /sys/fs/cgroup/memory/0/tasks
274 275
275Since now we're in the 0 cgroup, we can alter the memory limit: 276Since now we're in the 0 cgroup, we can alter the memory limit:
276# echo 4M > /cgroups/0/memory.limit_in_bytes 277# echo 4M > /sys/fs/cgroup/memory/0/memory.limit_in_bytes
277 278
278NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, 279NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
279mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.) 280mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
@@ -281,11 +282,11 @@ mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
281NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited). 282NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
282NOTE: We cannot set limits on the root cgroup any more. 283NOTE: We cannot set limits on the root cgroup any more.
283 284
284# cat /cgroups/0/memory.limit_in_bytes 285# cat /sys/fs/cgroup/memory/0/memory.limit_in_bytes
2854194304 2864194304
286 287
287We can check the usage: 288We can check the usage:
288# cat /cgroups/0/memory.usage_in_bytes 289# cat /sys/fs/cgroup/memory/0/memory.usage_in_bytes
2891216512 2901216512
290 291
291A successful write to this file does not guarantee a successful set of 292A successful write to this file does not guarantee a successful set of
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 99961993257a..91ecff07cede 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -223,9 +223,10 @@ When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
223group created using the pseudo filesystem. See example steps below to create 223group created using the pseudo filesystem. See example steps below to create
224task groups and modify their CPU share using the "cgroups" pseudo filesystem. 224task groups and modify their CPU share using the "cgroups" pseudo filesystem.
225 225
226 # mkdir /dev/cpuctl 226 # mount -t tmpfs cgroup_root /sys/fs/cgroup
227 # mount -t cgroup -ocpu none /dev/cpuctl 227 # mkdir /sys/fs/cgroup/cpu
228 # cd /dev/cpuctl 228 # mount -t cgroup -ocpu none /sys/fs/cgroup/cpu
229 # cd /sys/fs/cgroup/cpu
229 230
230 # mkdir multimedia # create "multimedia" group of tasks 231 # mkdir multimedia # create "multimedia" group of tasks
231 # mkdir browser # create "browser" group of tasks 232 # mkdir browser # create "browser" group of tasks
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 605b0d40329d..71b54d549987 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -129,9 +129,8 @@ priority!
129Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real 129Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
130CPU bandwidth to task groups. 130CPU bandwidth to task groups.
131 131
132This uses the /cgroup virtual file system and 132This uses the cgroup virtual file system and "<cgroup>/cpu.rt_runtime_us"
133"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each 133to control the CPU time reserved for each control group.
134control group.
135 134
136For more information on working with control groups, you should read 135For more information on working with control groups, you should read
137Documentation/cgroups/cgroups.txt as well. 136Documentation/cgroups/cgroups.txt as well.
@@ -150,7 +149,7 @@ For now, this can be simplified to just the following (but see Future plans):
150=============== 149===============
151 150
152There is work in progress to make the scheduling period for each group 151There is work in progress to make the scheduling period for each group
153("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well. 152("<cgroup>/cpu.rt_period_us") configurable as well.
154 153
155The constraint on the period is that a subgroup must have a smaller or 154The constraint on the period is that a subgroup must have a smaller or
156equal period to its parent. But realistically its not very useful _yet_ 155equal period to its parent. But realistically its not very useful _yet_
diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
index 12f9ba20ccb7..550068466605 100644
--- a/Documentation/vm/hwpoison.txt
+++ b/Documentation/vm/hwpoison.txt
@@ -129,12 +129,12 @@ Limit injection to pages owned by memgroup. Specified by inode number
129of the memcg. 129of the memcg.
130 130
131Example: 131Example:
132 mkdir /cgroup/hwpoison 132 mkdir /sys/fs/cgroup/mem/hwpoison
133 133
134 usemem -m 100 -s 1000 & 134 usemem -m 100 -s 1000 &
135 echo `jobs -p` > /cgroup/hwpoison/tasks 135 echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
136 136
137 memcg_ino=$(ls -id /cgroup/hwpoison | cut -f1 -d' ') 137 memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ')
138 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg 138 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
139 139
140 page-types -p `pidof init` --hwpoison # shall do nothing 140 page-types -p `pidof init` --hwpoison # shall do nothing