aboutsummaryrefslogtreecommitdiffstats
path: root/fs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/file.c')
-rw-r--r--fs/file.c60
1 files changed, 23 insertions, 37 deletions
diff --git a/fs/file.c b/fs/file.c
index 34bb7f71d994..0be344755c02 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -39,28 +39,27 @@ int sysctl_nr_open_max = 1024 * 1024; /* raised later */
39 */ 39 */
40static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); 40static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
41 41
42static inline void * alloc_fdmem(unsigned int size) 42static inline void *alloc_fdmem(unsigned int size)
43{ 43{
44 if (size <= PAGE_SIZE) 44 void *data;
45 return kmalloc(size, GFP_KERNEL); 45
46 else 46 data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
47 return vmalloc(size); 47 if (data != NULL)
48 return data;
49
50 return vmalloc(size);
48} 51}
49 52
50static inline void free_fdarr(struct fdtable *fdt) 53static void free_fdmem(void *ptr)
51{ 54{
52 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) 55 is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
53 kfree(fdt->fd);
54 else
55 vfree(fdt->fd);
56} 56}
57 57
58static inline void free_fdset(struct fdtable *fdt) 58static void __free_fdtable(struct fdtable *fdt)
59{ 59{
60 if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2)) 60 free_fdmem(fdt->fd);
61 kfree(fdt->open_fds); 61 free_fdmem(fdt->open_fds);
62 else 62 kfree(fdt);
63 vfree(fdt->open_fds);
64} 63}
65 64
66static void free_fdtable_work(struct work_struct *work) 65static void free_fdtable_work(struct work_struct *work)
@@ -75,9 +74,8 @@ static void free_fdtable_work(struct work_struct *work)
75 spin_unlock_bh(&f->lock); 74 spin_unlock_bh(&f->lock);
76 while(fdt) { 75 while(fdt) {
77 struct fdtable *next = fdt->next; 76 struct fdtable *next = fdt->next;
78 vfree(fdt->fd); 77
79 free_fdset(fdt); 78 __free_fdtable(fdt);
80 kfree(fdt);
81 fdt = next; 79 fdt = next;
82 } 80 }
83} 81}
@@ -98,7 +96,7 @@ void free_fdtable_rcu(struct rcu_head *rcu)
98 container_of(fdt, struct files_struct, fdtab)); 96 container_of(fdt, struct files_struct, fdtab));
99 return; 97 return;
100 } 98 }
101 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) { 99 if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
102 kfree(fdt->fd); 100 kfree(fdt->fd);
103 kfree(fdt->open_fds); 101 kfree(fdt->open_fds);
104 kfree(fdt); 102 kfree(fdt);
@@ -178,13 +176,12 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
178 fdt->open_fds = (fd_set *)data; 176 fdt->open_fds = (fd_set *)data;
179 data += nr / BITS_PER_BYTE; 177 data += nr / BITS_PER_BYTE;
180 fdt->close_on_exec = (fd_set *)data; 178 fdt->close_on_exec = (fd_set *)data;
181 INIT_RCU_HEAD(&fdt->rcu);
182 fdt->next = NULL; 179 fdt->next = NULL;
183 180
184 return fdt; 181 return fdt;
185 182
186out_arr: 183out_arr:
187 free_fdarr(fdt); 184 free_fdmem(fdt->fd);
188out_fdt: 185out_fdt:
189 kfree(fdt); 186 kfree(fdt);
190out: 187out:
@@ -214,9 +211,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
214 * caller and alloc_fdtable(). Cheaper to catch it here... 211 * caller and alloc_fdtable(). Cheaper to catch it here...
215 */ 212 */
216 if (unlikely(new_fdt->max_fds <= nr)) { 213 if (unlikely(new_fdt->max_fds <= nr)) {
217 free_fdarr(new_fdt); 214 __free_fdtable(new_fdt);
218 free_fdset(new_fdt);
219 kfree(new_fdt);
220 return -EMFILE; 215 return -EMFILE;
221 } 216 }
222 /* 217 /*
@@ -232,9 +227,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
232 free_fdtable(cur_fdt); 227 free_fdtable(cur_fdt);
233 } else { 228 } else {
234 /* Somebody else expanded, so undo our attempt */ 229 /* Somebody else expanded, so undo our attempt */
235 free_fdarr(new_fdt); 230 __free_fdtable(new_fdt);
236 free_fdset(new_fdt);
237 kfree(new_fdt);
238 } 231 }
239 return 1; 232 return 1;
240} 233}
@@ -312,7 +305,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
312 new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; 305 new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
313 new_fdt->open_fds = (fd_set *)&newf->open_fds_init; 306 new_fdt->open_fds = (fd_set *)&newf->open_fds_init;
314 new_fdt->fd = &newf->fd_array[0]; 307 new_fdt->fd = &newf->fd_array[0];
315 INIT_RCU_HEAD(&new_fdt->rcu);
316 new_fdt->next = NULL; 308 new_fdt->next = NULL;
317 309
318 spin_lock(&oldf->file_lock); 310 spin_lock(&oldf->file_lock);
@@ -325,11 +317,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
325 while (unlikely(open_files > new_fdt->max_fds)) { 317 while (unlikely(open_files > new_fdt->max_fds)) {
326 spin_unlock(&oldf->file_lock); 318 spin_unlock(&oldf->file_lock);
327 319
328 if (new_fdt != &newf->fdtab) { 320 if (new_fdt != &newf->fdtab)
329 free_fdarr(new_fdt); 321 __free_fdtable(new_fdt);
330 free_fdset(new_fdt);
331 kfree(new_fdt);
332 }
333 322
334 new_fdt = alloc_fdtable(open_files - 1); 323 new_fdt = alloc_fdtable(open_files - 1);
335 if (!new_fdt) { 324 if (!new_fdt) {
@@ -339,9 +328,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
339 328
340 /* beyond sysctl_nr_open; nothing to do */ 329 /* beyond sysctl_nr_open; nothing to do */
341 if (unlikely(new_fdt->max_fds < open_files)) { 330 if (unlikely(new_fdt->max_fds < open_files)) {
342 free_fdarr(new_fdt); 331 __free_fdtable(new_fdt);
343 free_fdset(new_fdt);
344 kfree(new_fdt);
345 *errorp = -EMFILE; 332 *errorp = -EMFILE;
346 goto out_release; 333 goto out_release;
347 } 334 }
@@ -430,7 +417,6 @@ struct files_struct init_files = {
430 .fd = &init_files.fd_array[0], 417 .fd = &init_files.fd_array[0],
431 .close_on_exec = (fd_set *)&init_files.close_on_exec_init, 418 .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
432 .open_fds = (fd_set *)&init_files.open_fds_init, 419 .open_fds = (fd_set *)&init_files.open_fds_init,
433 .rcu = RCU_HEAD_INIT,
434 }, 420 },
435 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), 421 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
436}; 422};