aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/res_counter.c
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2008-02-07 03:13:57 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 11:42:18 -0500
commit0eea10301708c64a6b793894c156e21ddd15eb64 (patch)
treea0dcbe47d48d35ec0554faa5f86068cfab94ca6e /kernel/res_counter.c
parent66e1707bc34609f626e2e7b4fe7e454c9748bad5 (diff)
Memory controller improve user interface
Change the interface to use bytes instead of pages. Page sizes can vary across platforms and configurations. A new strategy routine has been added to the resource counters infrastructure to format the data as desired. Suggested by David Rientjes, Andrew Morton and Herbert Poetzl Tested on a UML setup with the config for memory control enabled. [kamezawa.hiroyu@jp.fujitsu.com: possible race fix in res_counter] Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Pavel Emelianov <xemul@openvz.org> Cc: Paul Menage <menage@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Kirill Korotaev <dev@sw.ru> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: David Rientjes <rientjes@google.com> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/res_counter.c')
-rw-r--r--kernel/res_counter.c36
1 files changed, 25 insertions, 11 deletions
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 722c484b068..16cbec2d5d6 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -16,7 +16,7 @@
16void res_counter_init(struct res_counter *counter) 16void res_counter_init(struct res_counter *counter)
17{ 17{
18 spin_lock_init(&counter->lock); 18 spin_lock_init(&counter->lock);
19 counter->limit = (unsigned long)LONG_MAX; 19 counter->limit = (unsigned long long)LLONG_MAX;
20} 20}
21 21
22int res_counter_charge_locked(struct res_counter *counter, unsigned long val) 22int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
@@ -59,8 +59,8 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
59} 59}
60 60
61 61
62static inline unsigned long *res_counter_member(struct res_counter *counter, 62static inline unsigned long long *
63 int member) 63res_counter_member(struct res_counter *counter, int member)
64{ 64{
65 switch (member) { 65 switch (member) {
66 case RES_USAGE: 66 case RES_USAGE:
@@ -76,24 +76,30 @@ static inline unsigned long *res_counter_member(struct res_counter *counter,
76} 76}
77 77
78ssize_t res_counter_read(struct res_counter *counter, int member, 78ssize_t res_counter_read(struct res_counter *counter, int member,
79 const char __user *userbuf, size_t nbytes, loff_t *pos) 79 const char __user *userbuf, size_t nbytes, loff_t *pos,
80 int (*read_strategy)(unsigned long long val, char *st_buf))
80{ 81{
81 unsigned long *val; 82 unsigned long long *val;
82 char buf[64], *s; 83 char buf[64], *s;
83 84
84 s = buf; 85 s = buf;
85 val = res_counter_member(counter, member); 86 val = res_counter_member(counter, member);
86 s += sprintf(s, "%lu\n", *val); 87 if (read_strategy)
88 s += read_strategy(*val, s);
89 else
90 s += sprintf(s, "%llu\n", *val);
87 return simple_read_from_buffer((void __user *)userbuf, nbytes, 91 return simple_read_from_buffer((void __user *)userbuf, nbytes,
88 pos, buf, s - buf); 92 pos, buf, s - buf);
89} 93}
90 94
91ssize_t res_counter_write(struct res_counter *counter, int member, 95ssize_t res_counter_write(struct res_counter *counter, int member,
92 const char __user *userbuf, size_t nbytes, loff_t *pos) 96 const char __user *userbuf, size_t nbytes, loff_t *pos,
97 int (*write_strategy)(char *st_buf, unsigned long long *val))
93{ 98{
94 int ret; 99 int ret;
95 char *buf, *end; 100 char *buf, *end;
96 unsigned long tmp, *val; 101 unsigned long flags;
102 unsigned long long tmp, *val;
97 103
98 buf = kmalloc(nbytes + 1, GFP_KERNEL); 104 buf = kmalloc(nbytes + 1, GFP_KERNEL);
99 ret = -ENOMEM; 105 ret = -ENOMEM;
@@ -106,12 +112,20 @@ ssize_t res_counter_write(struct res_counter *counter, int member,
106 goto out_free; 112 goto out_free;
107 113
108 ret = -EINVAL; 114 ret = -EINVAL;
109 tmp = simple_strtoul(buf, &end, 10);
110 if (*end != '\0')
111 goto out_free;
112 115
116 if (write_strategy) {
117 if (write_strategy(buf, &tmp)) {
118 goto out_free;
119 }
120 } else {
121 tmp = simple_strtoull(buf, &end, 10);
122 if (*end != '\0')
123 goto out_free;
124 }
125 spin_lock_irqsave(&counter->lock, flags);
113 val = res_counter_member(counter, member); 126 val = res_counter_member(counter, member);
114 *val = tmp; 127 *val = tmp;
128 spin_unlock_irqrestore(&counter->lock, flags);
115 ret = nbytes; 129 ret = nbytes;
116out_free: 130out_free:
117 kfree(buf); 131 kfree(buf);