diff options
author | Jan Kara <jack@suse.cz> | 2015-01-30 04:16:33 -0500 |
---|---|---|
committer | Jan Kara <jack@suse.cz> | 2015-01-30 04:16:33 -0500 |
commit | 1cd6b7be92016538ea1f2a8e1f955e9b974d93ea (patch) | |
tree | 77ca10be1da7aaf19cde95de0cb9c4bfcde636a0 /fs | |
parent | a39427007e7ccd83dbb7cd81b18156cebeab4d1e (diff) | |
parent | 14bf61ffe6ac54afcd1e888a4407fe16054483db (diff) |
Merge branch 'for_linus' into for_next
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/backref.c | 13 | ||||
-rw-r--r-- | fs/btrfs/delayed-inode.c | 8 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 12 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 4 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 2 | ||||
-rw-r--r-- | fs/ceph/addr.c | 2 | ||||
-rw-r--r-- | fs/ext4/extents.c | 4 | ||||
-rw-r--r-- | fs/ext4/file.c | 220 | ||||
-rw-r--r-- | fs/ext4/resize.c | 24 | ||||
-rw-r--r-- | fs/ext4/super.c | 2 | ||||
-rw-r--r-- | fs/fcntl.c | 5 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 49 | ||||
-rw-r--r-- | fs/nfsd/nfs4state.c | 2 | ||||
-rw-r--r-- | fs/notify/fanotify/fanotify_user.c | 10 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 5 | ||||
-rw-r--r-- | fs/ocfs2/namei.c | 43 | ||||
-rw-r--r-- | fs/quota/dquot.c | 83 | ||||
-rw-r--r-- | fs/quota/quota.c | 162 | ||||
-rw-r--r-- | fs/udf/file.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_qm_syscalls.c | 156 | ||||
-rw-r--r-- | fs/xfs/xfs_quotaops.c | 8 |
22 files changed, 475 insertions, 345 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 2d3e32ebfd15..8729cf68d2fe 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, | |||
1552 | { | 1552 | { |
1553 | int ret; | 1553 | int ret; |
1554 | int type; | 1554 | int type; |
1555 | struct btrfs_tree_block_info *info; | ||
1556 | struct btrfs_extent_inline_ref *eiref; | 1555 | struct btrfs_extent_inline_ref *eiref; |
1557 | 1556 | ||
1558 | if (*ptr == (unsigned long)-1) | 1557 | if (*ptr == (unsigned long)-1) |
@@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, | |||
1573 | } | 1572 | } |
1574 | 1573 | ||
1575 | /* we can treat both ref types equally here */ | 1574 | /* we can treat both ref types equally here */ |
1576 | info = (struct btrfs_tree_block_info *)(ei + 1); | ||
1577 | *out_root = btrfs_extent_inline_ref_offset(eb, eiref); | 1575 | *out_root = btrfs_extent_inline_ref_offset(eb, eiref); |
1578 | *out_level = btrfs_tree_block_level(eb, info); | 1576 | |
1577 | if (key->type == BTRFS_EXTENT_ITEM_KEY) { | ||
1578 | struct btrfs_tree_block_info *info; | ||
1579 | |||
1580 | info = (struct btrfs_tree_block_info *)(ei + 1); | ||
1581 | *out_level = btrfs_tree_block_level(eb, info); | ||
1582 | } else { | ||
1583 | ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); | ||
1584 | *out_level = (u8)key->offset; | ||
1585 | } | ||
1579 | 1586 | ||
1580 | if (ret == 1) | 1587 | if (ret == 1) |
1581 | *ptr = (unsigned long)-1; | 1588 | *ptr = (unsigned long)-1; |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 054577bddaf2..de4e70fb3cbb 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode) | |||
1857 | { | 1857 | { |
1858 | struct btrfs_delayed_node *delayed_node; | 1858 | struct btrfs_delayed_node *delayed_node; |
1859 | 1859 | ||
1860 | /* | ||
1861 | * we don't do delayed inode updates during log recovery because it | ||
1862 | * leads to enospc problems. This means we also can't do | ||
1863 | * delayed inode refs | ||
1864 | */ | ||
1865 | if (BTRFS_I(inode)->root->fs_info->log_root_recovering) | ||
1866 | return -EAGAIN; | ||
1867 | |||
1860 | delayed_node = btrfs_get_or_create_delayed_node(inode); | 1868 | delayed_node = btrfs_get_or_create_delayed_node(inode); |
1861 | if (IS_ERR(delayed_node)) | 1869 | if (IS_ERR(delayed_node)) |
1862 | return PTR_ERR(delayed_node); | 1870 | return PTR_ERR(delayed_node); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a80b97100d90..15116585e714 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, | |||
3139 | struct extent_buffer *leaf; | 3139 | struct extent_buffer *leaf; |
3140 | 3140 | ||
3141 | ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); | 3141 | ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); |
3142 | if (ret < 0) | 3142 | if (ret) { |
3143 | if (ret > 0) | ||
3144 | ret = -ENOENT; | ||
3143 | goto fail; | 3145 | goto fail; |
3144 | BUG_ON(ret); /* Corruption */ | 3146 | } |
3145 | 3147 | ||
3146 | leaf = path->nodes[0]; | 3148 | leaf = path->nodes[0]; |
3147 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); | 3149 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); |
@@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, | |||
3149 | btrfs_mark_buffer_dirty(leaf); | 3151 | btrfs_mark_buffer_dirty(leaf); |
3150 | btrfs_release_path(path); | 3152 | btrfs_release_path(path); |
3151 | fail: | 3153 | fail: |
3152 | if (ret) { | 3154 | if (ret) |
3153 | btrfs_abort_transaction(trans, root, ret); | 3155 | btrfs_abort_transaction(trans, root, ret); |
3154 | return ret; | 3156 | return ret; |
3155 | } | ||
3156 | return 0; | ||
3157 | 3157 | ||
3158 | } | 3158 | } |
3159 | 3159 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e687bb0dc73a..8bf326affb94 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
6255 | 6255 | ||
6256 | out_fail: | 6256 | out_fail: |
6257 | btrfs_end_transaction(trans, root); | 6257 | btrfs_end_transaction(trans, root); |
6258 | if (drop_on_err) | 6258 | if (drop_on_err) { |
6259 | inode_dec_link_count(inode); | ||
6259 | iput(inode); | 6260 | iput(inode); |
6261 | } | ||
6260 | btrfs_balance_delayed_items(root); | 6262 | btrfs_balance_delayed_items(root); |
6261 | btrfs_btree_balance_dirty(root); | 6263 | btrfs_btree_balance_dirty(root); |
6262 | return err; | 6264 | return err; |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f2bb13a23f86..9e1569ffbf6e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity, | |||
2607 | ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, | 2607 | ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, |
2608 | flags, gen, mirror_num, | 2608 | flags, gen, mirror_num, |
2609 | have_csum ? csum : NULL); | 2609 | have_csum ? csum : NULL); |
2610 | skip: | ||
2611 | if (ret) | 2610 | if (ret) |
2612 | return ret; | 2611 | return ret; |
2612 | skip: | ||
2613 | len -= l; | 2613 | len -= l; |
2614 | logical += l; | 2614 | logical += l; |
2615 | physical += l; | 2615 | physical += l; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index f5013d92a7e6..c81c0e004588 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, | |||
1416 | } | 1416 | } |
1417 | } | 1417 | } |
1418 | 1418 | ||
1419 | dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n", | 1419 | dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", |
1420 | inode, ceph_vinop(inode), len, locked_page); | 1420 | inode, ceph_vinop(inode), len, locked_page); |
1421 | 1421 | ||
1422 | if (len > 0) { | 1422 | if (len > 0) { |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e5d3eadf47b1..bed43081720f 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
5166 | 5166 | ||
5167 | /* fallback to generic here if not in extents fmt */ | 5167 | /* fallback to generic here if not in extents fmt */ |
5168 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) | 5168 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
5169 | return __generic_block_fiemap(inode, fieinfo, start, len, | 5169 | return generic_block_fiemap(inode, fieinfo, start, len, |
5170 | ext4_get_block); | 5170 | ext4_get_block); |
5171 | 5171 | ||
5172 | if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) | 5172 | if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) |
5173 | return -EBADR; | 5173 | return -EBADR; |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 513c12cf444c..8131be8c0af3 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
273 | * we determine this extent as a data or a hole according to whether the | 273 | * we determine this extent as a data or a hole according to whether the |
274 | * page cache has data or not. | 274 | * page cache has data or not. |
275 | */ | 275 | */ |
276 | static int ext4_find_unwritten_pgoff(struct inode *inode, int whence, | 276 | static int ext4_find_unwritten_pgoff(struct inode *inode, |
277 | loff_t endoff, loff_t *offset) | 277 | int whence, |
278 | struct ext4_map_blocks *map, | ||
279 | loff_t *offset) | ||
278 | { | 280 | { |
279 | struct pagevec pvec; | 281 | struct pagevec pvec; |
282 | unsigned int blkbits; | ||
280 | pgoff_t index; | 283 | pgoff_t index; |
281 | pgoff_t end; | 284 | pgoff_t end; |
285 | loff_t endoff; | ||
282 | loff_t startoff; | 286 | loff_t startoff; |
283 | loff_t lastoff; | 287 | loff_t lastoff; |
284 | int found = 0; | 288 | int found = 0; |
285 | 289 | ||
290 | blkbits = inode->i_sb->s_blocksize_bits; | ||
286 | startoff = *offset; | 291 | startoff = *offset; |
287 | lastoff = startoff; | 292 | lastoff = startoff; |
288 | 293 | endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; | |
289 | 294 | ||
290 | index = startoff >> PAGE_CACHE_SHIFT; | 295 | index = startoff >> PAGE_CACHE_SHIFT; |
291 | end = endoff >> PAGE_CACHE_SHIFT; | 296 | end = endoff >> PAGE_CACHE_SHIFT; |
@@ -403,144 +408,147 @@ out: | |||
403 | static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) | 408 | static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) |
404 | { | 409 | { |
405 | struct inode *inode = file->f_mapping->host; | 410 | struct inode *inode = file->f_mapping->host; |
406 | struct fiemap_extent_info fie; | 411 | struct ext4_map_blocks map; |
407 | struct fiemap_extent ext[2]; | 412 | struct extent_status es; |
408 | loff_t next; | 413 | ext4_lblk_t start, last, end; |
409 | int i, ret = 0; | 414 | loff_t dataoff, isize; |
415 | int blkbits; | ||
416 | int ret = 0; | ||
410 | 417 | ||
411 | mutex_lock(&inode->i_mutex); | 418 | mutex_lock(&inode->i_mutex); |
412 | if (offset >= inode->i_size) { | 419 | |
420 | isize = i_size_read(inode); | ||
421 | if (offset >= isize) { | ||
413 | mutex_unlock(&inode->i_mutex); | 422 | mutex_unlock(&inode->i_mutex); |
414 | return -ENXIO; | 423 | return -ENXIO; |
415 | } | 424 | } |
416 | fie.fi_flags = 0; | 425 | |
417 | fie.fi_extents_max = 2; | 426 | blkbits = inode->i_sb->s_blocksize_bits; |
418 | fie.fi_extents_start = (struct fiemap_extent __user *) &ext; | 427 | start = offset >> blkbits; |
419 | while (1) { | 428 | last = start; |
420 | mm_segment_t old_fs = get_fs(); | 429 | end = isize >> blkbits; |
421 | 430 | dataoff = offset; | |
422 | fie.fi_extents_mapped = 0; | 431 | |
423 | memset(ext, 0, sizeof(*ext) * fie.fi_extents_max); | 432 | do { |
424 | 433 | map.m_lblk = last; | |
425 | set_fs(get_ds()); | 434 | map.m_len = end - last + 1; |
426 | ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); | 435 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
427 | set_fs(old_fs); | 436 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { |
428 | if (ret) | 437 | if (last != start) |
438 | dataoff = (loff_t)last << blkbits; | ||
429 | break; | 439 | break; |
440 | } | ||
430 | 441 | ||
431 | /* No extents found, EOF */ | 442 | /* |
432 | if (!fie.fi_extents_mapped) { | 443 | * If there is a delay extent at this offset, |
433 | ret = -ENXIO; | 444 | * it will be as a data. |
445 | */ | ||
446 | ext4_es_find_delayed_extent_range(inode, last, last, &es); | ||
447 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { | ||
448 | if (last != start) | ||
449 | dataoff = (loff_t)last << blkbits; | ||
434 | break; | 450 | break; |
435 | } | 451 | } |
436 | for (i = 0; i < fie.fi_extents_mapped; i++) { | ||
437 | next = (loff_t)(ext[i].fe_length + ext[i].fe_logical); | ||
438 | 452 | ||
439 | if (offset < (loff_t)ext[i].fe_logical) | 453 | /* |
440 | offset = (loff_t)ext[i].fe_logical; | 454 | * If there is a unwritten extent at this offset, |
441 | /* | 455 | * it will be as a data or a hole according to page |
442 | * If extent is not unwritten, then it contains valid | 456 | * cache that has data or not. |
443 | * data, mapped or delayed. | 457 | */ |
444 | */ | 458 | if (map.m_flags & EXT4_MAP_UNWRITTEN) { |
445 | if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) | 459 | int unwritten; |
446 | goto out; | 460 | unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, |
461 | &map, &dataoff); | ||
462 | if (unwritten) | ||
463 | break; | ||
464 | } | ||
447 | 465 | ||
448 | /* | 466 | last++; |
449 | * If there is a unwritten extent at this offset, | 467 | dataoff = (loff_t)last << blkbits; |
450 | * it will be as a data or a hole according to page | 468 | } while (last <= end); |
451 | * cache that has data or not. | ||
452 | */ | ||
453 | if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, | ||
454 | next, &offset)) | ||
455 | goto out; | ||
456 | 469 | ||
457 | if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) { | ||
458 | ret = -ENXIO; | ||
459 | goto out; | ||
460 | } | ||
461 | offset = next; | ||
462 | } | ||
463 | } | ||
464 | if (offset > inode->i_size) | ||
465 | offset = inode->i_size; | ||
466 | out: | ||
467 | mutex_unlock(&inode->i_mutex); | 470 | mutex_unlock(&inode->i_mutex); |
468 | if (ret) | ||
469 | return ret; | ||
470 | 471 | ||
471 | return vfs_setpos(file, offset, maxsize); | 472 | if (dataoff > isize) |
473 | return -ENXIO; | ||
474 | |||
475 | return vfs_setpos(file, dataoff, maxsize); | ||
472 | } | 476 | } |
473 | 477 | ||
474 | /* | 478 | /* |
475 | * ext4_seek_hole() retrieves the offset for SEEK_HOLE | 479 | * ext4_seek_hole() retrieves the offset for SEEK_HOLE. |
476 | */ | 480 | */ |
477 | static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) | 481 | static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) |
478 | { | 482 | { |
479 | struct inode *inode = file->f_mapping->host; | 483 | struct inode *inode = file->f_mapping->host; |
480 | struct fiemap_extent_info fie; | 484 | struct ext4_map_blocks map; |
481 | struct fiemap_extent ext[2]; | 485 | struct extent_status es; |
482 | loff_t next; | 486 | ext4_lblk_t start, last, end; |
483 | int i, ret = 0; | 487 | loff_t holeoff, isize; |
488 | int blkbits; | ||
489 | int ret = 0; | ||
484 | 490 | ||
485 | mutex_lock(&inode->i_mutex); | 491 | mutex_lock(&inode->i_mutex); |
486 | if (offset >= inode->i_size) { | 492 | |
493 | isize = i_size_read(inode); | ||
494 | if (offset >= isize) { | ||
487 | mutex_unlock(&inode->i_mutex); | 495 | mutex_unlock(&inode->i_mutex); |
488 | return -ENXIO; | 496 | return -ENXIO; |
489 | } | 497 | } |
490 | 498 | ||
491 | fie.fi_flags = 0; | 499 | blkbits = inode->i_sb->s_blocksize_bits; |
492 | fie.fi_extents_max = 2; | 500 | start = offset >> blkbits; |
493 | fie.fi_extents_start = (struct fiemap_extent __user *)&ext; | 501 | last = start; |
494 | while (1) { | 502 | end = isize >> blkbits; |
495 | mm_segment_t old_fs = get_fs(); | 503 | holeoff = offset; |
496 | |||
497 | fie.fi_extents_mapped = 0; | ||
498 | memset(ext, 0, sizeof(*ext)); | ||
499 | 504 | ||
500 | set_fs(get_ds()); | 505 | do { |
501 | ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); | 506 | map.m_lblk = last; |
502 | set_fs(old_fs); | 507 | map.m_len = end - last + 1; |
503 | if (ret) | 508 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
504 | break; | 509 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { |
510 | last += ret; | ||
511 | holeoff = (loff_t)last << blkbits; | ||
512 | continue; | ||
513 | } | ||
505 | 514 | ||
506 | /* No extents found */ | 515 | /* |
507 | if (!fie.fi_extents_mapped) | 516 | * If there is a delay extent at this offset, |
508 | break; | 517 | * we will skip this extent. |
518 | */ | ||
519 | ext4_es_find_delayed_extent_range(inode, last, last, &es); | ||
520 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { | ||
521 | last = es.es_lblk + es.es_len; | ||
522 | holeoff = (loff_t)last << blkbits; | ||
523 | continue; | ||
524 | } | ||
509 | 525 | ||
510 | for (i = 0; i < fie.fi_extents_mapped; i++) { | 526 | /* |
511 | next = (loff_t)(ext[i].fe_logical + ext[i].fe_length); | 527 | * If there is a unwritten extent at this offset, |
512 | /* | 528 | * it will be as a data or a hole according to page |
513 | * If extent is not unwritten, then it contains valid | 529 | * cache that has data or not. |
514 | * data, mapped or delayed. | 530 | */ |
515 | */ | 531 | if (map.m_flags & EXT4_MAP_UNWRITTEN) { |
516 | if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) { | 532 | int unwritten; |
517 | if (offset < (loff_t)ext[i].fe_logical) | 533 | unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, |
518 | goto out; | 534 | &map, &holeoff); |
519 | offset = next; | 535 | if (!unwritten) { |
536 | last += ret; | ||
537 | holeoff = (loff_t)last << blkbits; | ||
520 | continue; | 538 | continue; |
521 | } | 539 | } |
522 | /* | ||
523 | * If there is a unwritten extent at this offset, | ||
524 | * it will be as a data or a hole according to page | ||
525 | * cache that has data or not. | ||
526 | */ | ||
527 | if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE, | ||
528 | next, &offset)) | ||
529 | goto out; | ||
530 | |||
531 | offset = next; | ||
532 | if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) | ||
533 | goto out; | ||
534 | } | 540 | } |
535 | } | 541 | |
536 | if (offset > inode->i_size) | 542 | /* find a hole */ |
537 | offset = inode->i_size; | 543 | break; |
538 | out: | 544 | } while (last <= end); |
545 | |||
539 | mutex_unlock(&inode->i_mutex); | 546 | mutex_unlock(&inode->i_mutex); |
540 | if (ret) | ||
541 | return ret; | ||
542 | 547 | ||
543 | return vfs_setpos(file, offset, maxsize); | 548 | if (holeoff > isize) |
549 | holeoff = isize; | ||
550 | |||
551 | return vfs_setpos(file, holeoff, maxsize); | ||
544 | } | 552 | } |
545 | 553 | ||
546 | /* | 554 | /* |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index bf76f405a5f9..8a8ec6293b19 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb) | |||
24 | return -EPERM; | 24 | return -EPERM; |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * If we are not using the primary superblock/GDT copy don't resize, | ||
28 | * because the user tools have no way of handling this. Probably a | ||
29 | * bad time to do it anyways. | ||
30 | */ | ||
31 | if (EXT4_SB(sb)->s_sbh->b_blocknr != | ||
32 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { | ||
33 | ext4_warning(sb, "won't resize using backup superblock at %llu", | ||
34 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); | ||
35 | return -EPERM; | ||
36 | } | ||
37 | |||
38 | /* | ||
27 | * We are not allowed to do online-resizing on a filesystem mounted | 39 | * We are not allowed to do online-resizing on a filesystem mounted |
28 | * with error, because it can destroy the filesystem easily. | 40 | * with error, because it can destroy the filesystem easily. |
29 | */ | 41 | */ |
@@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
758 | "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", | 770 | "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", |
759 | gdb_num); | 771 | gdb_num); |
760 | 772 | ||
761 | /* | ||
762 | * If we are not using the primary superblock/GDT copy don't resize, | ||
763 | * because the user tools have no way of handling this. Probably a | ||
764 | * bad time to do it anyways. | ||
765 | */ | ||
766 | if (EXT4_SB(sb)->s_sbh->b_blocknr != | ||
767 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { | ||
768 | ext4_warning(sb, "won't resize using backup superblock at %llu", | ||
769 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); | ||
770 | return -EPERM; | ||
771 | } | ||
772 | |||
773 | gdb_bh = sb_bread(sb, gdblock); | 773 | gdb_bh = sb_bread(sb, gdblock); |
774 | if (!gdb_bh) | 774 | if (!gdb_bh) |
775 | return -EIO; | 775 | return -EIO; |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 43c92b1685cb..74c5f53595fb 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3482 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, | 3482 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, |
3483 | EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && | 3483 | EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && |
3484 | EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) | 3484 | EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) |
3485 | ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " | 3485 | ext4_warning(sb, "metadata_csum and uninit_bg are " |
3486 | "redundant flags; please run fsck."); | 3486 | "redundant flags; please run fsck."); |
3487 | 3487 | ||
3488 | /* Check for a known checksum algorithm */ | 3488 | /* Check for a known checksum algorithm */ |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 99d440a4a6ba..ee85cd4e136a 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -740,14 +740,15 @@ static int __init fcntl_init(void) | |||
740 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY | 740 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY |
741 | * is defined as O_NONBLOCK on some platforms and not on others. | 741 | * is defined as O_NONBLOCK on some platforms and not on others. |
742 | */ | 742 | */ |
743 | BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( | 743 | BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( |
744 | O_RDONLY | O_WRONLY | O_RDWR | | 744 | O_RDONLY | O_WRONLY | O_RDWR | |
745 | O_CREAT | O_EXCL | O_NOCTTY | | 745 | O_CREAT | O_EXCL | O_NOCTTY | |
746 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ | 746 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ |
747 | __O_SYNC | O_DSYNC | FASYNC | | 747 | __O_SYNC | O_DSYNC | FASYNC | |
748 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | | 748 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | |
749 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | | 749 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | |
750 | __FMODE_EXEC | O_PATH | __O_TMPFILE | 750 | __FMODE_EXEC | O_PATH | __O_TMPFILE | |
751 | __FMODE_NONOTIFY | ||
751 | )); | 752 | )); |
752 | 753 | ||
753 | fasync_cache = kmem_cache_create("fasync_cache", | 754 | fasync_cache = kmem_cache_create("fasync_cache", |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index c8b148bbdc8b..3e193cb36996 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
667 | 667 | ||
668 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | 668 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, |
669 | s64 change, struct gfs2_quota_data *qd, | 669 | s64 change, struct gfs2_quota_data *qd, |
670 | struct fs_disk_quota *fdq) | 670 | struct qc_dqblk *fdq) |
671 | { | 671 | { |
672 | struct inode *inode = &ip->i_inode; | 672 | struct inode *inode = &ip->i_inode; |
673 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 673 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
697 | be64_add_cpu(&q.qu_value, change); | 697 | be64_add_cpu(&q.qu_value, change); |
698 | qd->qd_qb.qb_value = q.qu_value; | 698 | qd->qd_qb.qb_value = q.qu_value; |
699 | if (fdq) { | 699 | if (fdq) { |
700 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | 700 | if (fdq->d_fieldmask & QC_SPC_SOFT) { |
701 | q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); | 701 | q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); |
702 | qd->qd_qb.qb_warn = q.qu_warn; | 702 | qd->qd_qb.qb_warn = q.qu_warn; |
703 | } | 703 | } |
704 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | 704 | if (fdq->d_fieldmask & QC_SPC_HARD) { |
705 | q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); | 705 | q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); |
706 | qd->qd_qb.qb_limit = q.qu_limit; | 706 | qd->qd_qb.qb_limit = q.qu_limit; |
707 | } | 707 | } |
708 | if (fdq->d_fieldmask & FS_DQ_BCOUNT) { | 708 | if (fdq->d_fieldmask & QC_SPACE) { |
709 | q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); | 709 | q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); |
710 | qd->qd_qb.qb_value = q.qu_value; | 710 | qd->qd_qb.qb_value = q.qu_value; |
711 | } | 711 | } |
712 | } | 712 | } |
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb, | |||
1497 | } | 1497 | } |
1498 | 1498 | ||
1499 | static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | 1499 | static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, |
1500 | struct fs_disk_quota *fdq) | 1500 | struct qc_dqblk *fdq) |
1501 | { | 1501 | { |
1502 | struct gfs2_sbd *sdp = sb->s_fs_info; | 1502 | struct gfs2_sbd *sdp = sb->s_fs_info; |
1503 | struct gfs2_quota_lvb *qlvb; | 1503 | struct gfs2_quota_lvb *qlvb; |
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | |||
1505 | struct gfs2_holder q_gh; | 1505 | struct gfs2_holder q_gh; |
1506 | int error; | 1506 | int error; |
1507 | 1507 | ||
1508 | memset(fdq, 0, sizeof(struct fs_disk_quota)); | 1508 | memset(fdq, 0, sizeof(*fdq)); |
1509 | 1509 | ||
1510 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | 1510 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) |
1511 | return -ESRCH; /* Crazy XFS error code */ | 1511 | return -ESRCH; /* Crazy XFS error code */ |
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | |||
1522 | goto out; | 1522 | goto out; |
1523 | 1523 | ||
1524 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; | 1524 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; |
1525 | fdq->d_version = FS_DQUOT_VERSION; | 1525 | fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; |
1526 | fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA; | 1526 | fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; |
1527 | fdq->d_id = from_kqid_munged(current_user_ns(), qid); | 1527 | fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; |
1528 | fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; | ||
1529 | fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; | ||
1530 | fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; | ||
1531 | 1528 | ||
1532 | gfs2_glock_dq_uninit(&q_gh); | 1529 | gfs2_glock_dq_uninit(&q_gh); |
1533 | out: | 1530 | out: |
@@ -1536,10 +1533,10 @@ out: | |||
1536 | } | 1533 | } |
1537 | 1534 | ||
1538 | /* GFS2 only supports a subset of the XFS fields */ | 1535 | /* GFS2 only supports a subset of the XFS fields */ |
1539 | #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) | 1536 | #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) |
1540 | 1537 | ||
1541 | static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, | 1538 | static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, |
1542 | struct fs_disk_quota *fdq) | 1539 | struct qc_dqblk *fdq) |
1543 | { | 1540 | { |
1544 | struct gfs2_sbd *sdp = sb->s_fs_info; | 1541 | struct gfs2_sbd *sdp = sb->s_fs_info; |
1545 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | 1542 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); |
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, | |||
1583 | goto out_i; | 1580 | goto out_i; |
1584 | 1581 | ||
1585 | /* If nothing has changed, this is a no-op */ | 1582 | /* If nothing has changed, this is a no-op */ |
1586 | if ((fdq->d_fieldmask & FS_DQ_BSOFT) && | 1583 | if ((fdq->d_fieldmask & QC_SPC_SOFT) && |
1587 | ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) | 1584 | ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) |
1588 | fdq->d_fieldmask ^= FS_DQ_BSOFT; | 1585 | fdq->d_fieldmask ^= QC_SPC_SOFT; |
1589 | 1586 | ||
1590 | if ((fdq->d_fieldmask & FS_DQ_BHARD) && | 1587 | if ((fdq->d_fieldmask & QC_SPC_HARD) && |
1591 | ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) | 1588 | ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) |
1592 | fdq->d_fieldmask ^= FS_DQ_BHARD; | 1589 | fdq->d_fieldmask ^= QC_SPC_HARD; |
1593 | 1590 | ||
1594 | if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && | 1591 | if ((fdq->d_fieldmask & QC_SPACE) && |
1595 | ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) | 1592 | ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) |
1596 | fdq->d_fieldmask ^= FS_DQ_BCOUNT; | 1593 | fdq->d_fieldmask ^= QC_SPACE; |
1597 | 1594 | ||
1598 | if (fdq->d_fieldmask == 0) | 1595 | if (fdq->d_fieldmask == 0) |
1599 | goto out_i; | 1596 | goto out_i; |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3550a9c87616..c06a1ba80d73 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, | |||
3897 | status = nfs4_setlease(dp); | 3897 | status = nfs4_setlease(dp); |
3898 | goto out; | 3898 | goto out; |
3899 | } | 3899 | } |
3900 | atomic_inc(&fp->fi_delegees); | ||
3901 | if (fp->fi_had_conflict) { | 3900 | if (fp->fi_had_conflict) { |
3902 | status = -EAGAIN; | 3901 | status = -EAGAIN; |
3903 | goto out_unlock; | 3902 | goto out_unlock; |
3904 | } | 3903 | } |
3904 | atomic_inc(&fp->fi_delegees); | ||
3905 | hash_delegation_locked(dp, fp); | 3905 | hash_delegation_locked(dp, fp); |
3906 | status = 0; | 3906 | status = 0; |
3907 | out_unlock: | 3907 | out_unlock: |
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index c991616acca9..bff8567aa42d 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c | |||
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, | |||
259 | struct fsnotify_event *kevent; | 259 | struct fsnotify_event *kevent; |
260 | char __user *start; | 260 | char __user *start; |
261 | int ret; | 261 | int ret; |
262 | DEFINE_WAIT(wait); | 262 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
263 | 263 | ||
264 | start = buf; | 264 | start = buf; |
265 | group = file->private_data; | 265 | group = file->private_data; |
266 | 266 | ||
267 | pr_debug("%s: group=%p\n", __func__, group); | 267 | pr_debug("%s: group=%p\n", __func__, group); |
268 | 268 | ||
269 | add_wait_queue(&group->notification_waitq, &wait); | ||
269 | while (1) { | 270 | while (1) { |
270 | prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); | ||
271 | |||
272 | mutex_lock(&group->notification_mutex); | 271 | mutex_lock(&group->notification_mutex); |
273 | kevent = get_one_event(group, count); | 272 | kevent = get_one_event(group, count); |
274 | mutex_unlock(&group->notification_mutex); | 273 | mutex_unlock(&group->notification_mutex); |
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, | |||
289 | 288 | ||
290 | if (start != buf) | 289 | if (start != buf) |
291 | break; | 290 | break; |
292 | schedule(); | 291 | |
292 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
293 | continue; | 293 | continue; |
294 | } | 294 | } |
295 | 295 | ||
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, | |||
318 | buf += ret; | 318 | buf += ret; |
319 | count -= ret; | 319 | count -= ret; |
320 | } | 320 | } |
321 | remove_wait_queue(&group->notification_waitq, &wait); | ||
321 | 322 | ||
322 | finish_wait(&group->notification_waitq, &wait); | ||
323 | if (start != buf && ret != -EFAULT) | 323 | if (start != buf && ret != -EFAULT) |
324 | ret = buf - start; | 324 | ret = buf - start; |
325 | return ret; | 325 | return ret; |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 79b5af5e6a7b..cecd875653e4 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -2023,11 +2023,8 @@ leave: | |||
2023 | dlm_lockres_drop_inflight_ref(dlm, res); | 2023 | dlm_lockres_drop_inflight_ref(dlm, res); |
2024 | spin_unlock(&res->spinlock); | 2024 | spin_unlock(&res->spinlock); |
2025 | 2025 | ||
2026 | if (ret < 0) { | 2026 | if (ret < 0) |
2027 | mlog_errno(ret); | 2027 | mlog_errno(ret); |
2028 | if (newlock) | ||
2029 | dlm_lock_put(newlock); | ||
2030 | } | ||
2031 | 2028 | ||
2032 | return ret; | 2029 | return ret; |
2033 | } | 2030 | } |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index b931e04e3388..914c121ec890 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, | |||
94 | struct inode *inode, | 94 | struct inode *inode, |
95 | const char *symname); | 95 | const char *symname); |
96 | 96 | ||
97 | static int ocfs2_double_lock(struct ocfs2_super *osb, | ||
98 | struct buffer_head **bh1, | ||
99 | struct inode *inode1, | ||
100 | struct buffer_head **bh2, | ||
101 | struct inode *inode2, | ||
102 | int rename); | ||
103 | |||
104 | static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); | ||
97 | /* An orphan dir name is an 8 byte value, printed as a hex string */ | 105 | /* An orphan dir name is an 8 byte value, printed as a hex string */ |
98 | #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) | 106 | #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) |
99 | 107 | ||
@@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry, | |||
678 | { | 686 | { |
679 | handle_t *handle; | 687 | handle_t *handle; |
680 | struct inode *inode = old_dentry->d_inode; | 688 | struct inode *inode = old_dentry->d_inode; |
689 | struct inode *old_dir = old_dentry->d_parent->d_inode; | ||
681 | int err; | 690 | int err; |
682 | struct buffer_head *fe_bh = NULL; | 691 | struct buffer_head *fe_bh = NULL; |
692 | struct buffer_head *old_dir_bh = NULL; | ||
683 | struct buffer_head *parent_fe_bh = NULL; | 693 | struct buffer_head *parent_fe_bh = NULL; |
684 | struct ocfs2_dinode *fe = NULL; | 694 | struct ocfs2_dinode *fe = NULL; |
685 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | 695 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
@@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry, | |||
696 | 706 | ||
697 | dquot_initialize(dir); | 707 | dquot_initialize(dir); |
698 | 708 | ||
699 | err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); | 709 | err = ocfs2_double_lock(osb, &old_dir_bh, old_dir, |
710 | &parent_fe_bh, dir, 0); | ||
700 | if (err < 0) { | 711 | if (err < 0) { |
701 | if (err != -ENOENT) | 712 | if (err != -ENOENT) |
702 | mlog_errno(err); | 713 | mlog_errno(err); |
703 | return err; | 714 | return err; |
704 | } | 715 | } |
705 | 716 | ||
717 | /* make sure both dirs have bhs | ||
718 | * get an extra ref on old_dir_bh if old==new */ | ||
719 | if (!parent_fe_bh) { | ||
720 | if (old_dir_bh) { | ||
721 | parent_fe_bh = old_dir_bh; | ||
722 | get_bh(parent_fe_bh); | ||
723 | } else { | ||
724 | mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str); | ||
725 | err = -EIO; | ||
726 | goto out; | ||
727 | } | ||
728 | } | ||
729 | |||
706 | if (!dir->i_nlink) { | 730 | if (!dir->i_nlink) { |
707 | err = -ENOENT; | 731 | err = -ENOENT; |
708 | goto out; | 732 | goto out; |
709 | } | 733 | } |
710 | 734 | ||
711 | err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, | 735 | err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, |
712 | old_dentry->d_name.len, &old_de_ino); | 736 | old_dentry->d_name.len, &old_de_ino); |
713 | if (err) { | 737 | if (err) { |
714 | err = -ENOENT; | 738 | err = -ENOENT; |
@@ -801,10 +825,11 @@ out_unlock_inode: | |||
801 | ocfs2_inode_unlock(inode, 1); | 825 | ocfs2_inode_unlock(inode, 1); |
802 | 826 | ||
803 | out: | 827 | out: |
804 | ocfs2_inode_unlock(dir, 1); | 828 | ocfs2_double_unlock(old_dir, dir); |
805 | 829 | ||
806 | brelse(fe_bh); | 830 | brelse(fe_bh); |
807 | brelse(parent_fe_bh); | 831 | brelse(parent_fe_bh); |
832 | brelse(old_dir_bh); | ||
808 | 833 | ||
809 | ocfs2_free_dir_lookup_result(&lookup); | 834 | ocfs2_free_dir_lookup_result(&lookup); |
810 | 835 | ||
@@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb, | |||
1072 | } | 1097 | } |
1073 | 1098 | ||
1074 | /* | 1099 | /* |
1075 | * The only place this should be used is rename! | 1100 | * The only place this should be used is rename and link! |
1076 | * if they have the same id, then the 1st one is the only one locked. | 1101 | * if they have the same id, then the 1st one is the only one locked. |
1077 | */ | 1102 | */ |
1078 | static int ocfs2_double_lock(struct ocfs2_super *osb, | 1103 | static int ocfs2_double_lock(struct ocfs2_super *osb, |
1079 | struct buffer_head **bh1, | 1104 | struct buffer_head **bh1, |
1080 | struct inode *inode1, | 1105 | struct inode *inode1, |
1081 | struct buffer_head **bh2, | 1106 | struct buffer_head **bh2, |
1082 | struct inode *inode2) | 1107 | struct inode *inode2, |
1108 | int rename) | ||
1083 | { | 1109 | { |
1084 | int status; | 1110 | int status; |
1085 | int inode1_is_ancestor, inode2_is_ancestor; | 1111 | int inode1_is_ancestor, inode2_is_ancestor; |
@@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, | |||
1127 | } | 1153 | } |
1128 | /* lock id2 */ | 1154 | /* lock id2 */ |
1129 | status = ocfs2_inode_lock_nested(inode2, bh2, 1, | 1155 | status = ocfs2_inode_lock_nested(inode2, bh2, 1, |
1130 | OI_LS_RENAME1); | 1156 | rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT); |
1131 | if (status < 0) { | 1157 | if (status < 0) { |
1132 | if (status != -ENOENT) | 1158 | if (status != -ENOENT) |
1133 | mlog_errno(status); | 1159 | mlog_errno(status); |
@@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, | |||
1136 | } | 1162 | } |
1137 | 1163 | ||
1138 | /* lock id1 */ | 1164 | /* lock id1 */ |
1139 | status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); | 1165 | status = ocfs2_inode_lock_nested(inode1, bh1, 1, |
1166 | rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT); | ||
1140 | if (status < 0) { | 1167 | if (status < 0) { |
1141 | /* | 1168 | /* |
1142 | * An error return must mean that no cluster locks | 1169 | * An error return must mean that no cluster locks |
@@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir, | |||
1252 | 1279 | ||
1253 | /* if old and new are the same, this'll just do one lock. */ | 1280 | /* if old and new are the same, this'll just do one lock. */ |
1254 | status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, | 1281 | status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, |
1255 | &new_dir_bh, new_dir); | 1282 | &new_dir_bh, new_dir, 1); |
1256 | if (status < 0) { | 1283 | if (status < 0) { |
1257 | mlog_errno(status); | 1284 | mlog_errno(status); |
1258 | goto bail; | 1285 | goto bail; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index d25c3243c196..29eb9dc5728a 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space) | |||
2396 | } | 2396 | } |
2397 | 2397 | ||
2398 | /* Generic routine for getting common part of quota structure */ | 2398 | /* Generic routine for getting common part of quota structure */ |
2399 | static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | 2399 | static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) |
2400 | { | 2400 | { |
2401 | struct mem_dqblk *dm = &dquot->dq_dqb; | 2401 | struct mem_dqblk *dm = &dquot->dq_dqb; |
2402 | 2402 | ||
2403 | memset(di, 0, sizeof(*di)); | 2403 | memset(di, 0, sizeof(*di)); |
2404 | di->d_version = FS_DQUOT_VERSION; | ||
2405 | di->d_flags = dquot->dq_id.type == USRQUOTA ? | ||
2406 | FS_USER_QUOTA : FS_GROUP_QUOTA; | ||
2407 | di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id); | ||
2408 | |||
2409 | spin_lock(&dq_data_lock); | 2404 | spin_lock(&dq_data_lock); |
2410 | di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); | 2405 | di->d_spc_hardlimit = dm->dqb_bhardlimit; |
2411 | di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); | 2406 | di->d_spc_softlimit = dm->dqb_bsoftlimit; |
2412 | di->d_ino_hardlimit = dm->dqb_ihardlimit; | 2407 | di->d_ino_hardlimit = dm->dqb_ihardlimit; |
2413 | di->d_ino_softlimit = dm->dqb_isoftlimit; | 2408 | di->d_ino_softlimit = dm->dqb_isoftlimit; |
2414 | di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; | 2409 | di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; |
2415 | di->d_icount = dm->dqb_curinodes; | 2410 | di->d_ino_count = dm->dqb_curinodes; |
2416 | di->d_btimer = dm->dqb_btime; | 2411 | di->d_spc_timer = dm->dqb_btime; |
2417 | di->d_itimer = dm->dqb_itime; | 2412 | di->d_ino_timer = dm->dqb_itime; |
2418 | spin_unlock(&dq_data_lock); | 2413 | spin_unlock(&dq_data_lock); |
2419 | } | 2414 | } |
2420 | 2415 | ||
2421 | int dquot_get_dqblk(struct super_block *sb, struct kqid qid, | 2416 | int dquot_get_dqblk(struct super_block *sb, struct kqid qid, |
2422 | struct fs_disk_quota *di) | 2417 | struct qc_dqblk *di) |
2423 | { | 2418 | { |
2424 | struct dquot *dquot; | 2419 | struct dquot *dquot; |
2425 | 2420 | ||
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid, | |||
2433 | } | 2428 | } |
2434 | EXPORT_SYMBOL(dquot_get_dqblk); | 2429 | EXPORT_SYMBOL(dquot_get_dqblk); |
2435 | 2430 | ||
2436 | #define VFS_FS_DQ_MASK \ | 2431 | #define VFS_QC_MASK \ |
2437 | (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ | 2432 | (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ |
2438 | FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ | 2433 | QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ |
2439 | FS_DQ_BTIMER | FS_DQ_ITIMER) | 2434 | QC_SPC_TIMER | QC_INO_TIMER) |
2440 | 2435 | ||
2441 | /* Generic routine for setting common part of quota structure */ | 2436 | /* Generic routine for setting common part of quota structure */ |
2442 | static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | 2437 | static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) |
2443 | { | 2438 | { |
2444 | struct mem_dqblk *dm = &dquot->dq_dqb; | 2439 | struct mem_dqblk *dm = &dquot->dq_dqb; |
2445 | int check_blim = 0, check_ilim = 0; | 2440 | int check_blim = 0, check_ilim = 0; |
2446 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; | 2441 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; |
2447 | 2442 | ||
2448 | if (di->d_fieldmask & ~VFS_FS_DQ_MASK) | 2443 | if (di->d_fieldmask & ~VFS_QC_MASK) |
2449 | return -EINVAL; | 2444 | return -EINVAL; |
2450 | 2445 | ||
2451 | if (((di->d_fieldmask & FS_DQ_BSOFT) && | 2446 | if (((di->d_fieldmask & QC_SPC_SOFT) && |
2452 | (di->d_blk_softlimit > dqi->dqi_maxblimit)) || | 2447 | stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) || |
2453 | ((di->d_fieldmask & FS_DQ_BHARD) && | 2448 | ((di->d_fieldmask & QC_SPC_HARD) && |
2454 | (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || | 2449 | stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) || |
2455 | ((di->d_fieldmask & FS_DQ_ISOFT) && | 2450 | ((di->d_fieldmask & QC_INO_SOFT) && |
2456 | (di->d_ino_softlimit > dqi->dqi_maxilimit)) || | 2451 | (di->d_ino_softlimit > dqi->dqi_maxilimit)) || |
2457 | ((di->d_fieldmask & FS_DQ_IHARD) && | 2452 | ((di->d_fieldmask & QC_INO_HARD) && |
2458 | (di->d_ino_hardlimit > dqi->dqi_maxilimit))) | 2453 | (di->d_ino_hardlimit > dqi->dqi_maxilimit))) |
2459 | return -ERANGE; | 2454 | return -ERANGE; |
2460 | 2455 | ||
2461 | spin_lock(&dq_data_lock); | 2456 | spin_lock(&dq_data_lock); |
2462 | if (di->d_fieldmask & FS_DQ_BCOUNT) { | 2457 | if (di->d_fieldmask & QC_SPACE) { |
2463 | dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; | 2458 | dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; |
2464 | check_blim = 1; | 2459 | check_blim = 1; |
2465 | set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); | 2460 | set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); |
2466 | } | 2461 | } |
2467 | 2462 | ||
2468 | if (di->d_fieldmask & FS_DQ_BSOFT) | 2463 | if (di->d_fieldmask & QC_SPC_SOFT) |
2469 | dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); | 2464 | dm->dqb_bsoftlimit = di->d_spc_softlimit; |
2470 | if (di->d_fieldmask & FS_DQ_BHARD) | 2465 | if (di->d_fieldmask & QC_SPC_HARD) |
2471 | dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); | 2466 | dm->dqb_bhardlimit = di->d_spc_hardlimit; |
2472 | if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { | 2467 | if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { |
2473 | check_blim = 1; | 2468 | check_blim = 1; |
2474 | set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); | 2469 | set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); |
2475 | } | 2470 | } |
2476 | 2471 | ||
2477 | if (di->d_fieldmask & FS_DQ_ICOUNT) { | 2472 | if (di->d_fieldmask & QC_INO_COUNT) { |
2478 | dm->dqb_curinodes = di->d_icount; | 2473 | dm->dqb_curinodes = di->d_ino_count; |
2479 | check_ilim = 1; | 2474 | check_ilim = 1; |
2480 | set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); | 2475 | set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); |
2481 | } | 2476 | } |
2482 | 2477 | ||
2483 | if (di->d_fieldmask & FS_DQ_ISOFT) | 2478 | if (di->d_fieldmask & QC_INO_SOFT) |
2484 | dm->dqb_isoftlimit = di->d_ino_softlimit; | 2479 | dm->dqb_isoftlimit = di->d_ino_softlimit; |
2485 | if (di->d_fieldmask & FS_DQ_IHARD) | 2480 | if (di->d_fieldmask & QC_INO_HARD) |
2486 | dm->dqb_ihardlimit = di->d_ino_hardlimit; | 2481 | dm->dqb_ihardlimit = di->d_ino_hardlimit; |
2487 | if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { | 2482 | if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { |
2488 | check_ilim = 1; | 2483 | check_ilim = 1; |
2489 | set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); | 2484 | set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); |
2490 | } | 2485 | } |
2491 | 2486 | ||
2492 | if (di->d_fieldmask & FS_DQ_BTIMER) { | 2487 | if (di->d_fieldmask & QC_SPC_TIMER) { |
2493 | dm->dqb_btime = di->d_btimer; | 2488 | dm->dqb_btime = di->d_spc_timer; |
2494 | check_blim = 1; | 2489 | check_blim = 1; |
2495 | set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); | 2490 | set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); |
2496 | } | 2491 | } |
2497 | 2492 | ||
2498 | if (di->d_fieldmask & FS_DQ_ITIMER) { | 2493 | if (di->d_fieldmask & QC_INO_TIMER) { |
2499 | dm->dqb_itime = di->d_itimer; | 2494 | dm->dqb_itime = di->d_ino_timer; |
2500 | check_ilim = 1; | 2495 | check_ilim = 1; |
2501 | set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); | 2496 | set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); |
2502 | } | 2497 | } |
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2506 | dm->dqb_curspace < dm->dqb_bsoftlimit) { | 2501 | dm->dqb_curspace < dm->dqb_bsoftlimit) { |
2507 | dm->dqb_btime = 0; | 2502 | dm->dqb_btime = 0; |
2508 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | 2503 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); |
2509 | } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) | 2504 | } else if (!(di->d_fieldmask & QC_SPC_TIMER)) |
2510 | /* Set grace only if user hasn't provided his own... */ | 2505 | /* Set grace only if user hasn't provided his own... */ |
2511 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; | 2506 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; |
2512 | } | 2507 | } |
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2515 | dm->dqb_curinodes < dm->dqb_isoftlimit) { | 2510 | dm->dqb_curinodes < dm->dqb_isoftlimit) { |
2516 | dm->dqb_itime = 0; | 2511 | dm->dqb_itime = 0; |
2517 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | 2512 | clear_bit(DQ_INODES_B, &dquot->dq_flags); |
2518 | } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) | 2513 | } else if (!(di->d_fieldmask & QC_INO_TIMER)) |
2519 | /* Set grace only if user hasn't provided his own... */ | 2514 | /* Set grace only if user hasn't provided his own... */ |
2520 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; | 2515 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; |
2521 | } | 2516 | } |
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2531 | } | 2526 | } |
2532 | 2527 | ||
2533 | int dquot_set_dqblk(struct super_block *sb, struct kqid qid, | 2528 | int dquot_set_dqblk(struct super_block *sb, struct kqid qid, |
2534 | struct fs_disk_quota *di) | 2529 | struct qc_dqblk *di) |
2535 | { | 2530 | { |
2536 | struct dquot *dquot; | 2531 | struct dquot *dquot; |
2537 | int rc; | 2532 | int rc; |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 2aa4151f99d2..6f3856328eea 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr) | |||
118 | return sb->s_qcop->set_info(sb, type, &info); | 118 | return sb->s_qcop->set_info(sb, type, &info); |
119 | } | 119 | } |
120 | 120 | ||
121 | static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) | 121 | static inline qsize_t qbtos(qsize_t blocks) |
122 | { | ||
123 | return blocks << QIF_DQBLKSIZE_BITS; | ||
124 | } | ||
125 | |||
126 | static inline qsize_t stoqb(qsize_t space) | ||
127 | { | ||
128 | return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; | ||
129 | } | ||
130 | |||
131 | static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) | ||
122 | { | 132 | { |
123 | memset(dst, 0, sizeof(*dst)); | 133 | memset(dst, 0, sizeof(*dst)); |
124 | dst->dqb_bhardlimit = src->d_blk_hardlimit; | 134 | dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); |
125 | dst->dqb_bsoftlimit = src->d_blk_softlimit; | 135 | dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); |
126 | dst->dqb_curspace = src->d_bcount; | 136 | dst->dqb_curspace = src->d_space; |
127 | dst->dqb_ihardlimit = src->d_ino_hardlimit; | 137 | dst->dqb_ihardlimit = src->d_ino_hardlimit; |
128 | dst->dqb_isoftlimit = src->d_ino_softlimit; | 138 | dst->dqb_isoftlimit = src->d_ino_softlimit; |
129 | dst->dqb_curinodes = src->d_icount; | 139 | dst->dqb_curinodes = src->d_ino_count; |
130 | dst->dqb_btime = src->d_btimer; | 140 | dst->dqb_btime = src->d_spc_timer; |
131 | dst->dqb_itime = src->d_itimer; | 141 | dst->dqb_itime = src->d_ino_timer; |
132 | dst->dqb_valid = QIF_ALL; | 142 | dst->dqb_valid = QIF_ALL; |
133 | } | 143 | } |
134 | 144 | ||
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id, | |||
136 | void __user *addr) | 146 | void __user *addr) |
137 | { | 147 | { |
138 | struct kqid qid; | 148 | struct kqid qid; |
139 | struct fs_disk_quota fdq; | 149 | struct qc_dqblk fdq; |
140 | struct if_dqblk idq; | 150 | struct if_dqblk idq; |
141 | int ret; | 151 | int ret; |
142 | 152 | ||
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id, | |||
154 | return 0; | 164 | return 0; |
155 | } | 165 | } |
156 | 166 | ||
157 | static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) | 167 | static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) |
158 | { | 168 | { |
159 | dst->d_blk_hardlimit = src->dqb_bhardlimit; | 169 | dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); |
160 | dst->d_blk_softlimit = src->dqb_bsoftlimit; | 170 | dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); |
161 | dst->d_bcount = src->dqb_curspace; | 171 | dst->d_space = src->dqb_curspace; |
162 | dst->d_ino_hardlimit = src->dqb_ihardlimit; | 172 | dst->d_ino_hardlimit = src->dqb_ihardlimit; |
163 | dst->d_ino_softlimit = src->dqb_isoftlimit; | 173 | dst->d_ino_softlimit = src->dqb_isoftlimit; |
164 | dst->d_icount = src->dqb_curinodes; | 174 | dst->d_ino_count = src->dqb_curinodes; |
165 | dst->d_btimer = src->dqb_btime; | 175 | dst->d_spc_timer = src->dqb_btime; |
166 | dst->d_itimer = src->dqb_itime; | 176 | dst->d_ino_timer = src->dqb_itime; |
167 | 177 | ||
168 | dst->d_fieldmask = 0; | 178 | dst->d_fieldmask = 0; |
169 | if (src->dqb_valid & QIF_BLIMITS) | 179 | if (src->dqb_valid & QIF_BLIMITS) |
170 | dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; | 180 | dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; |
171 | if (src->dqb_valid & QIF_SPACE) | 181 | if (src->dqb_valid & QIF_SPACE) |
172 | dst->d_fieldmask |= FS_DQ_BCOUNT; | 182 | dst->d_fieldmask |= QC_SPACE; |
173 | if (src->dqb_valid & QIF_ILIMITS) | 183 | if (src->dqb_valid & QIF_ILIMITS) |
174 | dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; | 184 | dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; |
175 | if (src->dqb_valid & QIF_INODES) | 185 | if (src->dqb_valid & QIF_INODES) |
176 | dst->d_fieldmask |= FS_DQ_ICOUNT; | 186 | dst->d_fieldmask |= QC_INO_COUNT; |
177 | if (src->dqb_valid & QIF_BTIME) | 187 | if (src->dqb_valid & QIF_BTIME) |
178 | dst->d_fieldmask |= FS_DQ_BTIMER; | 188 | dst->d_fieldmask |= QC_SPC_TIMER; |
179 | if (src->dqb_valid & QIF_ITIME) | 189 | if (src->dqb_valid & QIF_ITIME) |
180 | dst->d_fieldmask |= FS_DQ_ITIMER; | 190 | dst->d_fieldmask |= QC_INO_TIMER; |
181 | } | 191 | } |
182 | 192 | ||
183 | static int quota_setquota(struct super_block *sb, int type, qid_t id, | 193 | static int quota_setquota(struct super_block *sb, int type, qid_t id, |
184 | void __user *addr) | 194 | void __user *addr) |
185 | { | 195 | { |
186 | struct fs_disk_quota fdq; | 196 | struct qc_dqblk fdq; |
187 | struct if_dqblk idq; | 197 | struct if_dqblk idq; |
188 | struct kqid qid; | 198 | struct kqid qid; |
189 | 199 | ||
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr) | |||
247 | return ret; | 257 | return ret; |
248 | } | 258 | } |
249 | 259 | ||
260 | /* | ||
261 | * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them | ||
262 | * out of there as xfsprogs rely on definitions being in that header file. So | ||
263 | * just define same functions here for quota purposes. | ||
264 | */ | ||
265 | #define XFS_BB_SHIFT 9 | ||
266 | |||
267 | static inline u64 quota_bbtob(u64 blocks) | ||
268 | { | ||
269 | return blocks << XFS_BB_SHIFT; | ||
270 | } | ||
271 | |||
272 | static inline u64 quota_btobb(u64 bytes) | ||
273 | { | ||
274 | return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; | ||
275 | } | ||
276 | |||
277 | static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) | ||
278 | { | ||
279 | dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); | ||
280 | dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); | ||
281 | dst->d_ino_hardlimit = src->d_ino_hardlimit; | ||
282 | dst->d_ino_softlimit = src->d_ino_softlimit; | ||
283 | dst->d_space = quota_bbtob(src->d_bcount); | ||
284 | dst->d_ino_count = src->d_icount; | ||
285 | dst->d_ino_timer = src->d_itimer; | ||
286 | dst->d_spc_timer = src->d_btimer; | ||
287 | dst->d_ino_warns = src->d_iwarns; | ||
288 | dst->d_spc_warns = src->d_bwarns; | ||
289 | dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); | ||
290 | dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); | ||
291 | dst->d_rt_space = quota_bbtob(src->d_rtbcount); | ||
292 | dst->d_rt_spc_timer = src->d_rtbtimer; | ||
293 | dst->d_rt_spc_warns = src->d_rtbwarns; | ||
294 | dst->d_fieldmask = 0; | ||
295 | if (src->d_fieldmask & FS_DQ_ISOFT) | ||
296 | dst->d_fieldmask |= QC_INO_SOFT; | ||
297 | if (src->d_fieldmask & FS_DQ_IHARD) | ||
298 | dst->d_fieldmask |= QC_INO_HARD; | ||
299 | if (src->d_fieldmask & FS_DQ_BSOFT) | ||
300 | dst->d_fieldmask |= QC_SPC_SOFT; | ||
301 | if (src->d_fieldmask & FS_DQ_BHARD) | ||
302 | dst->d_fieldmask |= QC_SPC_HARD; | ||
303 | if (src->d_fieldmask & FS_DQ_RTBSOFT) | ||
304 | dst->d_fieldmask |= QC_RT_SPC_SOFT; | ||
305 | if (src->d_fieldmask & FS_DQ_RTBHARD) | ||
306 | dst->d_fieldmask |= QC_RT_SPC_HARD; | ||
307 | if (src->d_fieldmask & FS_DQ_BTIMER) | ||
308 | dst->d_fieldmask |= QC_SPC_TIMER; | ||
309 | if (src->d_fieldmask & FS_DQ_ITIMER) | ||
310 | dst->d_fieldmask |= QC_INO_TIMER; | ||
311 | if (src->d_fieldmask & FS_DQ_RTBTIMER) | ||
312 | dst->d_fieldmask |= QC_RT_SPC_TIMER; | ||
313 | if (src->d_fieldmask & FS_DQ_BWARNS) | ||
314 | dst->d_fieldmask |= QC_SPC_WARNS; | ||
315 | if (src->d_fieldmask & FS_DQ_IWARNS) | ||
316 | dst->d_fieldmask |= QC_INO_WARNS; | ||
317 | if (src->d_fieldmask & FS_DQ_RTBWARNS) | ||
318 | dst->d_fieldmask |= QC_RT_SPC_WARNS; | ||
319 | if (src->d_fieldmask & FS_DQ_BCOUNT) | ||
320 | dst->d_fieldmask |= QC_SPACE; | ||
321 | if (src->d_fieldmask & FS_DQ_ICOUNT) | ||
322 | dst->d_fieldmask |= QC_INO_COUNT; | ||
323 | if (src->d_fieldmask & FS_DQ_RTBCOUNT) | ||
324 | dst->d_fieldmask |= QC_RT_SPACE; | ||
325 | } | ||
326 | |||
250 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, | 327 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, |
251 | void __user *addr) | 328 | void __user *addr) |
252 | { | 329 | { |
253 | struct fs_disk_quota fdq; | 330 | struct fs_disk_quota fdq; |
331 | struct qc_dqblk qdq; | ||
254 | struct kqid qid; | 332 | struct kqid qid; |
255 | 333 | ||
256 | if (copy_from_user(&fdq, addr, sizeof(fdq))) | 334 | if (copy_from_user(&fdq, addr, sizeof(fdq))) |
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id, | |||
260 | qid = make_kqid(current_user_ns(), type, id); | 338 | qid = make_kqid(current_user_ns(), type, id); |
261 | if (!qid_valid(qid)) | 339 | if (!qid_valid(qid)) |
262 | return -EINVAL; | 340 | return -EINVAL; |
263 | return sb->s_qcop->set_dqblk(sb, qid, &fdq); | 341 | copy_from_xfs_dqblk(&qdq, &fdq); |
342 | return sb->s_qcop->set_dqblk(sb, qid, &qdq); | ||
343 | } | ||
344 | |||
345 | static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, | ||
346 | int type, qid_t id) | ||
347 | { | ||
348 | memset(dst, 0, sizeof(*dst)); | ||
349 | dst->d_version = FS_DQUOT_VERSION; | ||
350 | dst->d_id = id; | ||
351 | if (type == USRQUOTA) | ||
352 | dst->d_flags = FS_USER_QUOTA; | ||
353 | else if (type == PRJQUOTA) | ||
354 | dst->d_flags = FS_PROJ_QUOTA; | ||
355 | else | ||
356 | dst->d_flags = FS_GROUP_QUOTA; | ||
357 | dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); | ||
358 | dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); | ||
359 | dst->d_ino_hardlimit = src->d_ino_hardlimit; | ||
360 | dst->d_ino_softlimit = src->d_ino_softlimit; | ||
361 | dst->d_bcount = quota_btobb(src->d_space); | ||
362 | dst->d_icount = src->d_ino_count; | ||
363 | dst->d_itimer = src->d_ino_timer; | ||
364 | dst->d_btimer = src->d_spc_timer; | ||
365 | dst->d_iwarns = src->d_ino_warns; | ||
366 | dst->d_bwarns = src->d_spc_warns; | ||
367 | dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); | ||
368 | dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); | ||
369 | dst->d_rtbcount = quota_btobb(src->d_rt_space); | ||
370 | dst->d_rtbtimer = src->d_rt_spc_timer; | ||
371 | dst->d_rtbwarns = src->d_rt_spc_warns; | ||
264 | } | 372 | } |
265 | 373 | ||
266 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, | 374 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, |
267 | void __user *addr) | 375 | void __user *addr) |
268 | { | 376 | { |
269 | struct fs_disk_quota fdq; | 377 | struct fs_disk_quota fdq; |
378 | struct qc_dqblk qdq; | ||
270 | struct kqid qid; | 379 | struct kqid qid; |
271 | int ret; | 380 | int ret; |
272 | 381 | ||
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id, | |||
275 | qid = make_kqid(current_user_ns(), type, id); | 384 | qid = make_kqid(current_user_ns(), type, id); |
276 | if (!qid_valid(qid)) | 385 | if (!qid_valid(qid)) |
277 | return -EINVAL; | 386 | return -EINVAL; |
278 | ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); | 387 | ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); |
279 | if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) | 388 | if (ret) |
389 | return ret; | ||
390 | copy_to_xfs_dqblk(&fdq, &qdq, type, id); | ||
391 | if (copy_to_user(addr, &fdq, sizeof(fdq))) | ||
280 | return -EFAULT; | 392 | return -EFAULT; |
281 | return ret; | 393 | return ret; |
282 | } | 394 | } |
diff --git a/fs/udf/file.c b/fs/udf/file.c index bb15771b92ae..08f3555fbeac 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
@@ -224,7 +224,7 @@ out: | |||
224 | static int udf_release_file(struct inode *inode, struct file *filp) | 224 | static int udf_release_file(struct inode *inode, struct file *filp) |
225 | { | 225 | { |
226 | if (filp->f_mode & FMODE_WRITE && | 226 | if (filp->f_mode & FMODE_WRITE && |
227 | atomic_read(&inode->i_writecount) > 1) { | 227 | atomic_read(&inode->i_writecount) == 1) { |
228 | /* | 228 | /* |
229 | * Grab i_mutex to avoid races with writes changing i_size | 229 | * Grab i_mutex to avoid races with writes changing i_size |
230 | * while we are running. | 230 | * while we are running. |
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 3a07a937e232..41f6c0b9d51c 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
@@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint); | |||
166 | /* quota ops */ | 166 | /* quota ops */ |
167 | extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); | 167 | extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); |
168 | extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, | 168 | extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, |
169 | uint, struct fs_disk_quota *); | 169 | uint, struct qc_dqblk *); |
170 | extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, | 170 | extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, |
171 | struct fs_disk_quota *); | 171 | struct qc_dqblk *); |
172 | extern int xfs_qm_scall_getqstat(struct xfs_mount *, | 172 | extern int xfs_qm_scall_getqstat(struct xfs_mount *, |
173 | struct fs_quota_stat *); | 173 | struct fs_quota_stat *); |
174 | extern int xfs_qm_scall_getqstatv(struct xfs_mount *, | 174 | extern int xfs_qm_scall_getqstatv(struct xfs_mount *, |
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 42a15ddedb07..d56a4f5155d5 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
@@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); | |||
39 | STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, | 39 | STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, |
40 | uint); | 40 | uint); |
41 | STATIC uint xfs_qm_export_flags(uint); | 41 | STATIC uint xfs_qm_export_flags(uint); |
42 | STATIC uint xfs_qm_export_qtype_flags(uint); | ||
43 | 42 | ||
44 | /* | 43 | /* |
45 | * Turn off quota accounting and/or enforcement for all udquots and/or | 44 | * Turn off quota accounting and/or enforcement for all udquots and/or |
@@ -547,8 +546,8 @@ xfs_qm_scall_getqstatv( | |||
547 | return 0; | 546 | return 0; |
548 | } | 547 | } |
549 | 548 | ||
550 | #define XFS_DQ_MASK \ | 549 | #define XFS_QC_MASK \ |
551 | (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) | 550 | (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) |
552 | 551 | ||
553 | /* | 552 | /* |
554 | * Adjust quota limits, and start/stop timers accordingly. | 553 | * Adjust quota limits, and start/stop timers accordingly. |
@@ -558,7 +557,7 @@ xfs_qm_scall_setqlim( | |||
558 | struct xfs_mount *mp, | 557 | struct xfs_mount *mp, |
559 | xfs_dqid_t id, | 558 | xfs_dqid_t id, |
560 | uint type, | 559 | uint type, |
561 | fs_disk_quota_t *newlim) | 560 | struct qc_dqblk *newlim) |
562 | { | 561 | { |
563 | struct xfs_quotainfo *q = mp->m_quotainfo; | 562 | struct xfs_quotainfo *q = mp->m_quotainfo; |
564 | struct xfs_disk_dquot *ddq; | 563 | struct xfs_disk_dquot *ddq; |
@@ -567,9 +566,9 @@ xfs_qm_scall_setqlim( | |||
567 | int error; | 566 | int error; |
568 | xfs_qcnt_t hard, soft; | 567 | xfs_qcnt_t hard, soft; |
569 | 568 | ||
570 | if (newlim->d_fieldmask & ~XFS_DQ_MASK) | 569 | if (newlim->d_fieldmask & ~XFS_QC_MASK) |
571 | return -EINVAL; | 570 | return -EINVAL; |
572 | if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) | 571 | if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) |
573 | return 0; | 572 | return 0; |
574 | 573 | ||
575 | /* | 574 | /* |
@@ -607,11 +606,11 @@ xfs_qm_scall_setqlim( | |||
607 | /* | 606 | /* |
608 | * Make sure that hardlimits are >= soft limits before changing. | 607 | * Make sure that hardlimits are >= soft limits before changing. |
609 | */ | 608 | */ |
610 | hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? | 609 | hard = (newlim->d_fieldmask & QC_SPC_HARD) ? |
611 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : | 610 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : |
612 | be64_to_cpu(ddq->d_blk_hardlimit); | 611 | be64_to_cpu(ddq->d_blk_hardlimit); |
613 | soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? | 612 | soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? |
614 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : | 613 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : |
615 | be64_to_cpu(ddq->d_blk_softlimit); | 614 | be64_to_cpu(ddq->d_blk_softlimit); |
616 | if (hard == 0 || hard >= soft) { | 615 | if (hard == 0 || hard >= soft) { |
617 | ddq->d_blk_hardlimit = cpu_to_be64(hard); | 616 | ddq->d_blk_hardlimit = cpu_to_be64(hard); |
@@ -624,11 +623,11 @@ xfs_qm_scall_setqlim( | |||
624 | } else { | 623 | } else { |
625 | xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); | 624 | xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); |
626 | } | 625 | } |
627 | hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? | 626 | hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? |
628 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : | 627 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : |
629 | be64_to_cpu(ddq->d_rtb_hardlimit); | 628 | be64_to_cpu(ddq->d_rtb_hardlimit); |
630 | soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? | 629 | soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? |
631 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : | 630 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : |
632 | be64_to_cpu(ddq->d_rtb_softlimit); | 631 | be64_to_cpu(ddq->d_rtb_softlimit); |
633 | if (hard == 0 || hard >= soft) { | 632 | if (hard == 0 || hard >= soft) { |
634 | ddq->d_rtb_hardlimit = cpu_to_be64(hard); | 633 | ddq->d_rtb_hardlimit = cpu_to_be64(hard); |
@@ -641,10 +640,10 @@ xfs_qm_scall_setqlim( | |||
641 | xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); | 640 | xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); |
642 | } | 641 | } |
643 | 642 | ||
644 | hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? | 643 | hard = (newlim->d_fieldmask & QC_INO_HARD) ? |
645 | (xfs_qcnt_t) newlim->d_ino_hardlimit : | 644 | (xfs_qcnt_t) newlim->d_ino_hardlimit : |
646 | be64_to_cpu(ddq->d_ino_hardlimit); | 645 | be64_to_cpu(ddq->d_ino_hardlimit); |
647 | soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? | 646 | soft = (newlim->d_fieldmask & QC_INO_SOFT) ? |
648 | (xfs_qcnt_t) newlim->d_ino_softlimit : | 647 | (xfs_qcnt_t) newlim->d_ino_softlimit : |
649 | be64_to_cpu(ddq->d_ino_softlimit); | 648 | be64_to_cpu(ddq->d_ino_softlimit); |
650 | if (hard == 0 || hard >= soft) { | 649 | if (hard == 0 || hard >= soft) { |
@@ -661,12 +660,12 @@ xfs_qm_scall_setqlim( | |||
661 | /* | 660 | /* |
662 | * Update warnings counter(s) if requested | 661 | * Update warnings counter(s) if requested |
663 | */ | 662 | */ |
664 | if (newlim->d_fieldmask & FS_DQ_BWARNS) | 663 | if (newlim->d_fieldmask & QC_SPC_WARNS) |
665 | ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); | 664 | ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); |
666 | if (newlim->d_fieldmask & FS_DQ_IWARNS) | 665 | if (newlim->d_fieldmask & QC_INO_WARNS) |
667 | ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); | 666 | ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); |
668 | if (newlim->d_fieldmask & FS_DQ_RTBWARNS) | 667 | if (newlim->d_fieldmask & QC_RT_SPC_WARNS) |
669 | ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); | 668 | ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); |
670 | 669 | ||
671 | if (id == 0) { | 670 | if (id == 0) { |
672 | /* | 671 | /* |
@@ -676,24 +675,24 @@ xfs_qm_scall_setqlim( | |||
676 | * soft and hard limit values (already done, above), and | 675 | * soft and hard limit values (already done, above), and |
677 | * for warnings. | 676 | * for warnings. |
678 | */ | 677 | */ |
679 | if (newlim->d_fieldmask & FS_DQ_BTIMER) { | 678 | if (newlim->d_fieldmask & QC_SPC_TIMER) { |
680 | q->qi_btimelimit = newlim->d_btimer; | 679 | q->qi_btimelimit = newlim->d_spc_timer; |
681 | ddq->d_btimer = cpu_to_be32(newlim->d_btimer); | 680 | ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); |
682 | } | 681 | } |
683 | if (newlim->d_fieldmask & FS_DQ_ITIMER) { | 682 | if (newlim->d_fieldmask & QC_INO_TIMER) { |
684 | q->qi_itimelimit = newlim->d_itimer; | 683 | q->qi_itimelimit = newlim->d_ino_timer; |
685 | ddq->d_itimer = cpu_to_be32(newlim->d_itimer); | 684 | ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); |
686 | } | 685 | } |
687 | if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { | 686 | if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { |
688 | q->qi_rtbtimelimit = newlim->d_rtbtimer; | 687 | q->qi_rtbtimelimit = newlim->d_rt_spc_timer; |
689 | ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); | 688 | ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); |
690 | } | 689 | } |
691 | if (newlim->d_fieldmask & FS_DQ_BWARNS) | 690 | if (newlim->d_fieldmask & QC_SPC_WARNS) |
692 | q->qi_bwarnlimit = newlim->d_bwarns; | 691 | q->qi_bwarnlimit = newlim->d_spc_warns; |
693 | if (newlim->d_fieldmask & FS_DQ_IWARNS) | 692 | if (newlim->d_fieldmask & QC_INO_WARNS) |
694 | q->qi_iwarnlimit = newlim->d_iwarns; | 693 | q->qi_iwarnlimit = newlim->d_ino_warns; |
695 | if (newlim->d_fieldmask & FS_DQ_RTBWARNS) | 694 | if (newlim->d_fieldmask & QC_RT_SPC_WARNS) |
696 | q->qi_rtbwarnlimit = newlim->d_rtbwarns; | 695 | q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; |
697 | } else { | 696 | } else { |
698 | /* | 697 | /* |
699 | * If the user is now over quota, start the timelimit. | 698 | * If the user is now over quota, start the timelimit. |
@@ -798,7 +797,7 @@ xfs_qm_scall_getquota( | |||
798 | struct xfs_mount *mp, | 797 | struct xfs_mount *mp, |
799 | xfs_dqid_t id, | 798 | xfs_dqid_t id, |
800 | uint type, | 799 | uint type, |
801 | struct fs_disk_quota *dst) | 800 | struct qc_dqblk *dst) |
802 | { | 801 | { |
803 | struct xfs_dquot *dqp; | 802 | struct xfs_dquot *dqp; |
804 | int error; | 803 | int error; |
@@ -822,28 +821,25 @@ xfs_qm_scall_getquota( | |||
822 | } | 821 | } |
823 | 822 | ||
824 | memset(dst, 0, sizeof(*dst)); | 823 | memset(dst, 0, sizeof(*dst)); |
825 | dst->d_version = FS_DQUOT_VERSION; | 824 | dst->d_spc_hardlimit = |
826 | dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); | 825 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); |
827 | dst->d_id = be32_to_cpu(dqp->q_core.d_id); | 826 | dst->d_spc_softlimit = |
828 | dst->d_blk_hardlimit = | 827 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); |
829 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); | ||
830 | dst->d_blk_softlimit = | ||
831 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); | ||
832 | dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); | 828 | dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); |
833 | dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); | 829 | dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); |
834 | dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); | 830 | dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount); |
835 | dst->d_icount = dqp->q_res_icount; | 831 | dst->d_ino_count = dqp->q_res_icount; |
836 | dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); | 832 | dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer); |
837 | dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); | 833 | dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer); |
838 | dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); | 834 | dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns); |
839 | dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); | 835 | dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns); |
840 | dst->d_rtb_hardlimit = | 836 | dst->d_rt_spc_hardlimit = |
841 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); | 837 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); |
842 | dst->d_rtb_softlimit = | 838 | dst->d_rt_spc_softlimit = |
843 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); | 839 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); |
844 | dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); | 840 | dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount); |
845 | dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); | 841 | dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer); |
846 | dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); | 842 | dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns); |
847 | 843 | ||
848 | /* | 844 | /* |
849 | * Internally, we don't reset all the timers when quota enforcement | 845 | * Internally, we don't reset all the timers when quota enforcement |
@@ -856,23 +852,23 @@ xfs_qm_scall_getquota( | |||
856 | dqp->q_core.d_flags == XFS_DQ_GROUP) || | 852 | dqp->q_core.d_flags == XFS_DQ_GROUP) || |
857 | (!XFS_IS_PQUOTA_ENFORCED(mp) && | 853 | (!XFS_IS_PQUOTA_ENFORCED(mp) && |
858 | dqp->q_core.d_flags == XFS_DQ_PROJ)) { | 854 | dqp->q_core.d_flags == XFS_DQ_PROJ)) { |
859 | dst->d_btimer = 0; | 855 | dst->d_spc_timer = 0; |
860 | dst->d_itimer = 0; | 856 | dst->d_ino_timer = 0; |
861 | dst->d_rtbtimer = 0; | 857 | dst->d_rt_spc_timer = 0; |
862 | } | 858 | } |
863 | 859 | ||
864 | #ifdef DEBUG | 860 | #ifdef DEBUG |
865 | if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || | 861 | if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) || |
866 | (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || | 862 | (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) || |
867 | (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && | 863 | (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) && |
868 | dst->d_id != 0) { | 864 | id != 0) { |
869 | if ((dst->d_bcount > dst->d_blk_softlimit) && | 865 | if ((dst->d_space > dst->d_spc_softlimit) && |
870 | (dst->d_blk_softlimit > 0)) { | 866 | (dst->d_spc_softlimit > 0)) { |
871 | ASSERT(dst->d_btimer != 0); | 867 | ASSERT(dst->d_spc_timer != 0); |
872 | } | 868 | } |
873 | if ((dst->d_icount > dst->d_ino_softlimit) && | 869 | if ((dst->d_ino_count > dst->d_ino_softlimit) && |
874 | (dst->d_ino_softlimit > 0)) { | 870 | (dst->d_ino_softlimit > 0)) { |
875 | ASSERT(dst->d_itimer != 0); | 871 | ASSERT(dst->d_ino_timer != 0); |
876 | } | 872 | } |
877 | } | 873 | } |
878 | #endif | 874 | #endif |
@@ -882,26 +878,6 @@ out_put: | |||
882 | } | 878 | } |
883 | 879 | ||
884 | STATIC uint | 880 | STATIC uint |
885 | xfs_qm_export_qtype_flags( | ||
886 | uint flags) | ||
887 | { | ||
888 | /* | ||
889 | * Can't be more than one, or none. | ||
890 | */ | ||
891 | ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != | ||
892 | (FS_PROJ_QUOTA | FS_USER_QUOTA)); | ||
893 | ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != | ||
894 | (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); | ||
895 | ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != | ||
896 | (FS_USER_QUOTA | FS_GROUP_QUOTA)); | ||
897 | ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); | ||
898 | |||
899 | return (flags & XFS_DQ_USER) ? | ||
900 | FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? | ||
901 | FS_PROJ_QUOTA : FS_GROUP_QUOTA; | ||
902 | } | ||
903 | |||
904 | STATIC uint | ||
905 | xfs_qm_export_flags( | 881 | xfs_qm_export_flags( |
906 | uint flags) | 882 | uint flags) |
907 | { | 883 | { |
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 8fcd20dbf89a..a226203fa46a 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c | |||
@@ -131,7 +131,7 @@ STATIC int | |||
131 | xfs_fs_get_dqblk( | 131 | xfs_fs_get_dqblk( |
132 | struct super_block *sb, | 132 | struct super_block *sb, |
133 | struct kqid qid, | 133 | struct kqid qid, |
134 | struct fs_disk_quota *fdq) | 134 | struct qc_dqblk *qdq) |
135 | { | 135 | { |
136 | struct xfs_mount *mp = XFS_M(sb); | 136 | struct xfs_mount *mp = XFS_M(sb); |
137 | 137 | ||
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk( | |||
141 | return -ESRCH; | 141 | return -ESRCH; |
142 | 142 | ||
143 | return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), | 143 | return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), |
144 | xfs_quota_type(qid.type), fdq); | 144 | xfs_quota_type(qid.type), qdq); |
145 | } | 145 | } |
146 | 146 | ||
147 | STATIC int | 147 | STATIC int |
148 | xfs_fs_set_dqblk( | 148 | xfs_fs_set_dqblk( |
149 | struct super_block *sb, | 149 | struct super_block *sb, |
150 | struct kqid qid, | 150 | struct kqid qid, |
151 | struct fs_disk_quota *fdq) | 151 | struct qc_dqblk *qdq) |
152 | { | 152 | { |
153 | struct xfs_mount *mp = XFS_M(sb); | 153 | struct xfs_mount *mp = XFS_M(sb); |
154 | 154 | ||
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk( | |||
160 | return -ESRCH; | 160 | return -ESRCH; |
161 | 161 | ||
162 | return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), | 162 | return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), |
163 | xfs_quota_type(qid.type), fdq); | 163 | xfs_quota_type(qid.type), qdq); |
164 | } | 164 | } |
165 | 165 | ||
166 | const struct quotactl_ops xfs_quotactl_operations = { | 166 | const struct quotactl_ops xfs_quotactl_operations = { |