aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c124
-rw-r--r--fs/compat.c11
-rw-r--r--fs/exec.c25
-rw-r--r--fs/gfs2/rgrp.c13
-rw-r--r--fs/proc/stat.c5
5 files changed, 89 insertions, 89 deletions
diff --git a/fs/bio.c b/fs/bio.c
index cd42bb882f30..7bbc98f0eda1 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -175,14 +175,6 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
175 struct bio_vec *bvl; 175 struct bio_vec *bvl;
176 176
177 /* 177 /*
178 * If 'bs' is given, lookup the pool and do the mempool alloc.
179 * If not, this is a bio_kmalloc() allocation and just do a
180 * kzalloc() for the exact number of vecs right away.
181 */
182 if (!bs)
183 bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask);
184
185 /*
186 * see comment near bvec_array define! 178 * see comment near bvec_array define!
187 */ 179 */
188 switch (nr) { 180 switch (nr) {
@@ -260,21 +252,6 @@ void bio_free(struct bio *bio, struct bio_set *bs)
260 mempool_free(p, bs->bio_pool); 252 mempool_free(p, bs->bio_pool);
261} 253}
262 254
263/*
264 * default destructor for a bio allocated with bio_alloc_bioset()
265 */
266static void bio_fs_destructor(struct bio *bio)
267{
268 bio_free(bio, fs_bio_set);
269}
270
271static void bio_kmalloc_destructor(struct bio *bio)
272{
273 if (bio_has_allocated_vec(bio))
274 kfree(bio->bi_io_vec);
275 kfree(bio);
276}
277
278void bio_init(struct bio *bio) 255void bio_init(struct bio *bio)
279{ 256{
280 memset(bio, 0, sizeof(*bio)); 257 memset(bio, 0, sizeof(*bio));
@@ -301,21 +278,15 @@ void bio_init(struct bio *bio)
301 **/ 278 **/
302struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 279struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
303{ 280{
281 unsigned long idx = BIO_POOL_NONE;
304 struct bio_vec *bvl = NULL; 282 struct bio_vec *bvl = NULL;
305 struct bio *bio = NULL; 283 struct bio *bio;
306 unsigned long idx = 0; 284 void *p;
307 void *p = NULL; 285
308 286 p = mempool_alloc(bs->bio_pool, gfp_mask);
309 if (bs) { 287 if (unlikely(!p))
310 p = mempool_alloc(bs->bio_pool, gfp_mask); 288 return NULL;
311 if (!p) 289 bio = p + bs->front_pad;
312 goto err;
313 bio = p + bs->front_pad;
314 } else {
315 bio = kmalloc(sizeof(*bio), gfp_mask);
316 if (!bio)
317 goto err;
318 }
319 290
320 bio_init(bio); 291 bio_init(bio);
321 292
@@ -332,22 +303,50 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
332 303
333 nr_iovecs = bvec_nr_vecs(idx); 304 nr_iovecs = bvec_nr_vecs(idx);
334 } 305 }
306out_set:
335 bio->bi_flags |= idx << BIO_POOL_OFFSET; 307 bio->bi_flags |= idx << BIO_POOL_OFFSET;
336 bio->bi_max_vecs = nr_iovecs; 308 bio->bi_max_vecs = nr_iovecs;
337out_set:
338 bio->bi_io_vec = bvl; 309 bio->bi_io_vec = bvl;
339
340 return bio; 310 return bio;
341 311
342err_free: 312err_free:
343 if (bs) 313 mempool_free(p, bs->bio_pool);
344 mempool_free(p, bs->bio_pool);
345 else
346 kfree(bio);
347err:
348 return NULL; 314 return NULL;
349} 315}
350 316
317static void bio_fs_destructor(struct bio *bio)
318{
319 bio_free(bio, fs_bio_set);
320}
321
322/**
323 * bio_alloc - allocate a new bio, memory pool backed
324 * @gfp_mask: allocation mask to use
325 * @nr_iovecs: number of iovecs
326 *
327 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask
328 * contains __GFP_WAIT, the allocation is guaranteed to succeed.
329 *
330 * RETURNS:
331 * Pointer to new bio on success, NULL on failure.
332 */
333struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
334{
335 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
336
337 if (bio)
338 bio->bi_destructor = bio_fs_destructor;
339
340 return bio;
341}
342
343static void bio_kmalloc_destructor(struct bio *bio)
344{
345 if (bio_integrity(bio))
346 bio_integrity_free(bio);
347 kfree(bio);
348}
349
351/** 350/**
352 * bio_alloc - allocate a bio for I/O 351 * bio_alloc - allocate a bio for I/O
353 * @gfp_mask: the GFP_ mask given to the slab allocator 352 * @gfp_mask: the GFP_ mask given to the slab allocator
@@ -366,29 +365,20 @@ err:
366 * do so can cause livelocks under memory pressure. 365 * do so can cause livelocks under memory pressure.
367 * 366 *
368 **/ 367 **/
369struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
370{
371 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
372
373 if (bio)
374 bio->bi_destructor = bio_fs_destructor;
375
376 return bio;
377}
378
379/*
380 * Like bio_alloc(), but doesn't use a mempool backing. This means that
381 * it CAN fail, but while bio_alloc() can only be used for allocations
382 * that have a short (finite) life span, bio_kmalloc() should be used
383 * for more permanent bio allocations (like allocating some bio's for
384 * initalization or setup purposes).
385 */
386struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) 368struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
387{ 369{
388 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); 370 struct bio *bio;
389 371
390 if (bio) 372 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
391 bio->bi_destructor = bio_kmalloc_destructor; 373 gfp_mask);
374 if (unlikely(!bio))
375 return NULL;
376
377 bio_init(bio);
378 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
379 bio->bi_max_vecs = nr_iovecs;
380 bio->bi_io_vec = bio->bi_inline_vecs;
381 bio->bi_destructor = bio_kmalloc_destructor;
392 382
393 return bio; 383 return bio;
394} 384}
@@ -832,7 +822,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
832 return ERR_PTR(-ENOMEM); 822 return ERR_PTR(-ENOMEM);
833 823
834 ret = -ENOMEM; 824 ret = -ENOMEM;
835 bio = bio_alloc(gfp_mask, nr_pages); 825 bio = bio_kmalloc(gfp_mask, nr_pages);
836 if (!bio) 826 if (!bio)
837 goto out_bmd; 827 goto out_bmd;
838 828
@@ -956,7 +946,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
956 if (!nr_pages) 946 if (!nr_pages)
957 return ERR_PTR(-EINVAL); 947 return ERR_PTR(-EINVAL);
958 948
959 bio = bio_alloc(gfp_mask, nr_pages); 949 bio = bio_kmalloc(gfp_mask, nr_pages);
960 if (!bio) 950 if (!bio)
961 return ERR_PTR(-ENOMEM); 951 return ERR_PTR(-ENOMEM);
962 952
@@ -1140,7 +1130,7 @@ static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1140 int offset, i; 1130 int offset, i;
1141 struct bio *bio; 1131 struct bio *bio;
1142 1132
1143 bio = bio_alloc(gfp_mask, nr_pages); 1133 bio = bio_kmalloc(gfp_mask, nr_pages);
1144 if (!bio) 1134 if (!bio)
1145 return ERR_PTR(-ENOMEM); 1135 return ERR_PTR(-ENOMEM);
1146 1136
diff --git a/fs/compat.c b/fs/compat.c
index 379a399bf5c3..681ed81e6be0 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1476,6 +1476,7 @@ int compat_do_execve(char * filename,
1476 struct linux_binprm *bprm; 1476 struct linux_binprm *bprm;
1477 struct file *file; 1477 struct file *file;
1478 struct files_struct *displaced; 1478 struct files_struct *displaced;
1479 bool clear_in_exec;
1479 int retval; 1480 int retval;
1480 1481
1481 retval = unshare_files(&displaced); 1482 retval = unshare_files(&displaced);
@@ -1498,8 +1499,9 @@ int compat_do_execve(char * filename,
1498 goto out_unlock; 1499 goto out_unlock;
1499 1500
1500 retval = check_unsafe_exec(bprm); 1501 retval = check_unsafe_exec(bprm);
1501 if (retval) 1502 if (retval < 0)
1502 goto out_unlock; 1503 goto out_unlock;
1504 clear_in_exec = retval;
1503 1505
1504 file = open_exec(filename); 1506 file = open_exec(filename);
1505 retval = PTR_ERR(file); 1507 retval = PTR_ERR(file);
@@ -1546,9 +1548,7 @@ int compat_do_execve(char * filename,
1546 goto out; 1548 goto out;
1547 1549
1548 /* execve succeeded */ 1550 /* execve succeeded */
1549 write_lock(&current->fs->lock);
1550 current->fs->in_exec = 0; 1551 current->fs->in_exec = 0;
1551 write_unlock(&current->fs->lock);
1552 current->in_execve = 0; 1552 current->in_execve = 0;
1553 mutex_unlock(&current->cred_exec_mutex); 1553 mutex_unlock(&current->cred_exec_mutex);
1554 acct_update_integrals(current); 1554 acct_update_integrals(current);
@@ -1568,9 +1568,8 @@ out_file:
1568 } 1568 }
1569 1569
1570out_unmark: 1570out_unmark:
1571 write_lock(&current->fs->lock); 1571 if (clear_in_exec)
1572 current->fs->in_exec = 0; 1572 current->fs->in_exec = 0;
1573 write_unlock(&current->fs->lock);
1574 1573
1575out_unlock: 1574out_unlock:
1576 current->in_execve = 0; 1575 current->in_execve = 0;
diff --git a/fs/exec.c b/fs/exec.c
index 052a961e41aa..a3a8ce83940f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1060,7 +1060,6 @@ EXPORT_SYMBOL(install_exec_creds);
1060int check_unsafe_exec(struct linux_binprm *bprm) 1060int check_unsafe_exec(struct linux_binprm *bprm)
1061{ 1061{
1062 struct task_struct *p = current, *t; 1062 struct task_struct *p = current, *t;
1063 unsigned long flags;
1064 unsigned n_fs; 1063 unsigned n_fs;
1065 int res = 0; 1064 int res = 0;
1066 1065
@@ -1068,21 +1067,22 @@ int check_unsafe_exec(struct linux_binprm *bprm)
1068 1067
1069 n_fs = 1; 1068 n_fs = 1;
1070 write_lock(&p->fs->lock); 1069 write_lock(&p->fs->lock);
1071 lock_task_sighand(p, &flags); 1070 rcu_read_lock();
1072 for (t = next_thread(p); t != p; t = next_thread(t)) { 1071 for (t = next_thread(p); t != p; t = next_thread(t)) {
1073 if (t->fs == p->fs) 1072 if (t->fs == p->fs)
1074 n_fs++; 1073 n_fs++;
1075 } 1074 }
1075 rcu_read_unlock();
1076 1076
1077 if (p->fs->users > n_fs) { 1077 if (p->fs->users > n_fs) {
1078 bprm->unsafe |= LSM_UNSAFE_SHARE; 1078 bprm->unsafe |= LSM_UNSAFE_SHARE;
1079 } else { 1079 } else {
1080 if (p->fs->in_exec) 1080 res = -EAGAIN;
1081 res = -EAGAIN; 1081 if (!p->fs->in_exec) {
1082 p->fs->in_exec = 1; 1082 p->fs->in_exec = 1;
1083 res = 1;
1084 }
1083 } 1085 }
1084
1085 unlock_task_sighand(p, &flags);
1086 write_unlock(&p->fs->lock); 1086 write_unlock(&p->fs->lock);
1087 1087
1088 return res; 1088 return res;
@@ -1284,6 +1284,7 @@ int do_execve(char * filename,
1284 struct linux_binprm *bprm; 1284 struct linux_binprm *bprm;
1285 struct file *file; 1285 struct file *file;
1286 struct files_struct *displaced; 1286 struct files_struct *displaced;
1287 bool clear_in_exec;
1287 int retval; 1288 int retval;
1288 1289
1289 retval = unshare_files(&displaced); 1290 retval = unshare_files(&displaced);
@@ -1306,8 +1307,9 @@ int do_execve(char * filename,
1306 goto out_unlock; 1307 goto out_unlock;
1307 1308
1308 retval = check_unsafe_exec(bprm); 1309 retval = check_unsafe_exec(bprm);
1309 if (retval) 1310 if (retval < 0)
1310 goto out_unlock; 1311 goto out_unlock;
1312 clear_in_exec = retval;
1311 1313
1312 file = open_exec(filename); 1314 file = open_exec(filename);
1313 retval = PTR_ERR(file); 1315 retval = PTR_ERR(file);
@@ -1355,9 +1357,7 @@ int do_execve(char * filename,
1355 goto out; 1357 goto out;
1356 1358
1357 /* execve succeeded */ 1359 /* execve succeeded */
1358 write_lock(&current->fs->lock);
1359 current->fs->in_exec = 0; 1360 current->fs->in_exec = 0;
1360 write_unlock(&current->fs->lock);
1361 current->in_execve = 0; 1361 current->in_execve = 0;
1362 mutex_unlock(&current->cred_exec_mutex); 1362 mutex_unlock(&current->cred_exec_mutex);
1363 acct_update_integrals(current); 1363 acct_update_integrals(current);
@@ -1377,9 +1377,8 @@ out_file:
1377 } 1377 }
1378 1378
1379out_unmark: 1379out_unmark:
1380 write_lock(&current->fs->lock); 1380 if (clear_in_exec)
1381 current->fs->in_exec = 0; 1381 current->fs->in_exec = 0;
1382 write_unlock(&current->fs->lock);
1383 1382
1384out_unlock: 1383out_unlock:
1385 current->in_execve = 0; 1384 current->in_execve = 0;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index f03d024038ea..565038243fa2 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -212,8 +212,7 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
212 if (tmp == 0) 212 if (tmp == 0)
213 return BFITNOENT; 213 return BFITNOENT;
214 ptr--; 214 ptr--;
215 bit = fls64(tmp); 215 bit = __ffs64(tmp);
216 bit--; /* fls64 always adds one to the bit count */
217 bit /= 2; /* two bits per entry in the bitmap */ 216 bit /= 2; /* two bits per entry in the bitmap */
218 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit; 217 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
219} 218}
@@ -1445,10 +1444,12 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1445u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n) 1444u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
1446{ 1445{
1447 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1446 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1447 struct buffer_head *dibh;
1448 struct gfs2_alloc *al = ip->i_alloc; 1448 struct gfs2_alloc *al = ip->i_alloc;
1449 struct gfs2_rgrpd *rgd = al->al_rgd; 1449 struct gfs2_rgrpd *rgd = al->al_rgd;
1450 u32 goal, blk; 1450 u32 goal, blk;
1451 u64 block; 1451 u64 block;
1452 int error;
1452 1453
1453 if (rgrp_contains_block(rgd, ip->i_goal)) 1454 if (rgrp_contains_block(rgd, ip->i_goal))
1454 goal = ip->i_goal - rgd->rd_data0; 1455 goal = ip->i_goal - rgd->rd_data0;
@@ -1461,7 +1462,13 @@ u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
1461 rgd->rd_last_alloc = blk; 1462 rgd->rd_last_alloc = blk;
1462 block = rgd->rd_data0 + blk; 1463 block = rgd->rd_data0 + blk;
1463 ip->i_goal = block; 1464 ip->i_goal = block;
1464 1465 error = gfs2_meta_inode_buffer(ip, &dibh);
1466 if (error == 0) {
1467 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
1468 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1469 di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_goal);
1470 brelse(dibh);
1471 }
1465 gfs2_assert_withdraw(sdp, rgd->rd_free >= *n); 1472 gfs2_assert_withdraw(sdp, rgd->rd_free >= *n);
1466 rgd->rd_free -= *n; 1473 rgd->rd_free -= *n;
1467 1474
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index f75efa22df5e..81e4eb60972e 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -18,6 +18,9 @@
18#ifndef arch_irq_stat 18#ifndef arch_irq_stat
19#define arch_irq_stat() 0 19#define arch_irq_stat() 0
20#endif 20#endif
21#ifndef arch_idle_time
22#define arch_idle_time(cpu) 0
23#endif
21 24
22static int show_stat(struct seq_file *p, void *v) 25static int show_stat(struct seq_file *p, void *v)
23{ 26{
@@ -40,6 +43,7 @@ static int show_stat(struct seq_file *p, void *v)
40 nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); 43 nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
41 system = cputime64_add(system, kstat_cpu(i).cpustat.system); 44 system = cputime64_add(system, kstat_cpu(i).cpustat.system);
42 idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle); 45 idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
46 idle = cputime64_add(idle, arch_idle_time(i));
43 iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait); 47 iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
44 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); 48 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
45 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); 49 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
@@ -69,6 +73,7 @@ static int show_stat(struct seq_file *p, void *v)
69 nice = kstat_cpu(i).cpustat.nice; 73 nice = kstat_cpu(i).cpustat.nice;
70 system = kstat_cpu(i).cpustat.system; 74 system = kstat_cpu(i).cpustat.system;
71 idle = kstat_cpu(i).cpustat.idle; 75 idle = kstat_cpu(i).cpustat.idle;
76 idle = cputime64_add(idle, arch_idle_time(i));
72 iowait = kstat_cpu(i).cpustat.iowait; 77 iowait = kstat_cpu(i).cpustat.iowait;
73 irq = kstat_cpu(i).cpustat.irq; 78 irq = kstat_cpu(i).cpustat.irq;
74 softirq = kstat_cpu(i).cpustat.softirq; 79 softirq = kstat_cpu(i).cpustat.softirq;