aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-02-06 04:37:56 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 13:41:09 -0500
commit1bf47346d75790ebd2563d909d48046961c7ffd5 (patch)
tree0f478764beb8dc4e0c71c5f3d6a657535579fe3a
parent6b2fb3c65844452bb9e8b449d50863d1b36c5dc0 (diff)
kernel/sys.c: get rid of expensive divides in groups_sort()
groups_sort() can be quite long if user loads a large gid table. This is because GROUP_AT(group_info, some_integer) uses an integer divide. So having to do XXX thousand divides during one syscall can lead to very high latencies. (NGROUPS_MAX=65536) In the past (25 Mar 2006), an analog problem was found in groups_search() (commit d74beb9f33a5f16d2965f11b275e401f225c949d ) and at that time I changed some variables to unsigned int. I believe that a more generic fix is to make sure NGROUPS_PER_BLOCK is unsigned. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sys.c20
2 files changed, 11 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9c13be3a21e8..7c8ca05c3cae 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -810,7 +810,7 @@ static inline int above_background_load(void)
810 810
811struct io_context; /* See blkdev.h */ 811struct io_context; /* See blkdev.h */
812#define NGROUPS_SMALL 32 812#define NGROUPS_SMALL 32
813#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t))) 813#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
814struct group_info { 814struct group_info {
815 int ngroups; 815 int ngroups;
816 atomic_t usage; 816 atomic_t usage;
diff --git a/kernel/sys.c b/kernel/sys.c
index 2b8e2daa9d95..e3c08d4324de 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1145,16 +1145,16 @@ static int groups_to_user(gid_t __user *grouplist,
1145 struct group_info *group_info) 1145 struct group_info *group_info)
1146{ 1146{
1147 int i; 1147 int i;
1148 int count = group_info->ngroups; 1148 unsigned int count = group_info->ngroups;
1149 1149
1150 for (i = 0; i < group_info->nblocks; i++) { 1150 for (i = 0; i < group_info->nblocks; i++) {
1151 int cp_count = min(NGROUPS_PER_BLOCK, count); 1151 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1152 int off = i * NGROUPS_PER_BLOCK; 1152 unsigned int len = cp_count * sizeof(*grouplist);
1153 int len = cp_count * sizeof(*grouplist);
1154 1153
1155 if (copy_to_user(grouplist+off, group_info->blocks[i], len)) 1154 if (copy_to_user(grouplist, group_info->blocks[i], len))
1156 return -EFAULT; 1155 return -EFAULT;
1157 1156
1157 grouplist += NGROUPS_PER_BLOCK;
1158 count -= cp_count; 1158 count -= cp_count;
1159 } 1159 }
1160 return 0; 1160 return 0;
@@ -1165,16 +1165,16 @@ static int groups_from_user(struct group_info *group_info,
1165 gid_t __user *grouplist) 1165 gid_t __user *grouplist)
1166{ 1166{
1167 int i; 1167 int i;
1168 int count = group_info->ngroups; 1168 unsigned int count = group_info->ngroups;
1169 1169
1170 for (i = 0; i < group_info->nblocks; i++) { 1170 for (i = 0; i < group_info->nblocks; i++) {
1171 int cp_count = min(NGROUPS_PER_BLOCK, count); 1171 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1172 int off = i * NGROUPS_PER_BLOCK; 1172 unsigned int len = cp_count * sizeof(*grouplist);
1173 int len = cp_count * sizeof(*grouplist);
1174 1173
1175 if (copy_from_user(group_info->blocks[i], grouplist+off, len)) 1174 if (copy_from_user(group_info->blocks[i], grouplist, len))
1176 return -EFAULT; 1175 return -EFAULT;
1177 1176
1177 grouplist += NGROUPS_PER_BLOCK;
1178 count -= cp_count; 1178 count -= cp_count;
1179 } 1179 }
1180 return 0; 1180 return 0;