diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/backref.c | 94 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 9 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 1 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 4 | ||||
-rw-r--r-- | fs/ceph/addr.c | 21 | ||||
-rw-r--r-- | fs/exec.c | 2 | ||||
-rw-r--r-- | fs/hfsplus/ioctl.c | 9 | ||||
-rw-r--r-- | fs/hfsplus/wrapper.c | 2 | ||||
-rw-r--r-- | fs/nfs/client.c | 1 | ||||
-rw-r--r-- | fs/nfs/direct.c | 1 | ||||
-rw-r--r-- | fs/nfs/idmap.c | 13 | ||||
-rw-r--r-- | fs/nfs/inode.c | 2 | ||||
-rw-r--r-- | fs/nfs/pnfs.c | 13 | ||||
-rw-r--r-- | fs/nilfs2/gcinode.c | 2 | ||||
-rw-r--r-- | fs/nilfs2/segment.c | 2 | ||||
-rw-r--r-- | fs/pstore/inode.c | 2 | ||||
-rw-r--r-- | fs/pstore/platform.c | 34 | ||||
-rw-r--r-- | fs/pstore/ram.c | 3 | ||||
-rw-r--r-- | fs/pstore/ram_core.c | 27 | ||||
-rw-r--r-- | fs/ubifs/debug.c | 8 | ||||
-rw-r--r-- | fs/ubifs/find.c | 4 | ||||
-rw-r--r-- | fs/udf/super.c | 102 | ||||
-rw-r--r-- | fs/xfs/xfs_alloc.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_aops.c | 11 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.c | 16 | ||||
-rw-r--r-- | fs/xfs/xfs_inode_item.c | 17 | ||||
-rw-r--r-- | fs/xfs/xfs_log.c | 77 | ||||
-rw-r--r-- | fs/xfs/xfs_log_cil.c | 22 | ||||
-rw-r--r-- | fs/xfs/xfs_log_priv.h | 46 | ||||
-rw-r--r-- | fs/xfs/xfs_log_recover.c | 38 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_sync.c | 32 | ||||
-rw-r--r-- | fs/xfs/xfs_trace.h | 18 |
33 files changed, 362 insertions, 278 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 8f7d1237b7a0..7301cdb4b2cb 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -179,61 +179,74 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id, | |||
179 | 179 | ||
180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | 180 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, |
181 | struct ulist *parents, int level, | 181 | struct ulist *parents, int level, |
182 | struct btrfs_key *key, u64 time_seq, | 182 | struct btrfs_key *key_for_search, u64 time_seq, |
183 | u64 wanted_disk_byte, | 183 | u64 wanted_disk_byte, |
184 | const u64 *extent_item_pos) | 184 | const u64 *extent_item_pos) |
185 | { | 185 | { |
186 | int ret; | 186 | int ret = 0; |
187 | int slot = path->slots[level]; | 187 | int slot; |
188 | struct extent_buffer *eb = path->nodes[level]; | 188 | struct extent_buffer *eb; |
189 | struct btrfs_key key; | ||
189 | struct btrfs_file_extent_item *fi; | 190 | struct btrfs_file_extent_item *fi; |
190 | struct extent_inode_elem *eie = NULL; | 191 | struct extent_inode_elem *eie = NULL; |
191 | u64 disk_byte; | 192 | u64 disk_byte; |
192 | u64 wanted_objectid = key->objectid; | ||
193 | 193 | ||
194 | add_parent: | 194 | if (level != 0) { |
195 | if (level == 0 && extent_item_pos) { | 195 | eb = path->nodes[level]; |
196 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | 196 | ret = ulist_add(parents, eb->start, 0, GFP_NOFS); |
197 | ret = check_extent_in_eb(key, eb, fi, *extent_item_pos, &eie); | ||
198 | if (ret < 0) | 197 | if (ret < 0) |
199 | return ret; | 198 | return ret; |
200 | } | ||
201 | ret = ulist_add(parents, eb->start, (unsigned long)eie, GFP_NOFS); | ||
202 | if (ret < 0) | ||
203 | return ret; | ||
204 | |||
205 | if (level != 0) | ||
206 | return 0; | 199 | return 0; |
200 | } | ||
207 | 201 | ||
208 | /* | 202 | /* |
209 | * if the current leaf is full with EXTENT_DATA items, we must | 203 | * We normally enter this function with the path already pointing to |
210 | * check the next one if that holds a reference as well. | 204 | * the first item to check. But sometimes, we may enter it with |
211 | * ref->count cannot be used to skip this check. | 205 | * slot==nritems. In that case, go to the next leaf before we continue. |
212 | * repeat this until we don't find any additional EXTENT_DATA items. | ||
213 | */ | 206 | */ |
214 | while (1) { | 207 | if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) |
215 | eie = NULL; | ||
216 | ret = btrfs_next_old_leaf(root, path, time_seq); | 208 | ret = btrfs_next_old_leaf(root, path, time_seq); |
217 | if (ret < 0) | ||
218 | return ret; | ||
219 | if (ret) | ||
220 | return 0; | ||
221 | 209 | ||
210 | while (!ret) { | ||
222 | eb = path->nodes[0]; | 211 | eb = path->nodes[0]; |
223 | for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) { | 212 | slot = path->slots[0]; |
224 | btrfs_item_key_to_cpu(eb, key, slot); | 213 | |
225 | if (key->objectid != wanted_objectid || | 214 | btrfs_item_key_to_cpu(eb, &key, slot); |
226 | key->type != BTRFS_EXTENT_DATA_KEY) | 215 | |
227 | return 0; | 216 | if (key.objectid != key_for_search->objectid || |
228 | fi = btrfs_item_ptr(eb, slot, | 217 | key.type != BTRFS_EXTENT_DATA_KEY) |
229 | struct btrfs_file_extent_item); | 218 | break; |
230 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); | 219 | |
231 | if (disk_byte == wanted_disk_byte) | 220 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); |
232 | goto add_parent; | 221 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); |
222 | |||
223 | if (disk_byte == wanted_disk_byte) { | ||
224 | eie = NULL; | ||
225 | if (extent_item_pos) { | ||
226 | ret = check_extent_in_eb(&key, eb, fi, | ||
227 | *extent_item_pos, | ||
228 | &eie); | ||
229 | if (ret < 0) | ||
230 | break; | ||
231 | } | ||
232 | if (!ret) { | ||
233 | ret = ulist_add(parents, eb->start, | ||
234 | (unsigned long)eie, GFP_NOFS); | ||
235 | if (ret < 0) | ||
236 | break; | ||
237 | if (!extent_item_pos) { | ||
238 | ret = btrfs_next_old_leaf(root, path, | ||
239 | time_seq); | ||
240 | continue; | ||
241 | } | ||
242 | } | ||
233 | } | 243 | } |
244 | ret = btrfs_next_old_item(root, path, time_seq); | ||
234 | } | 245 | } |
235 | 246 | ||
236 | return 0; | 247 | if (ret > 0) |
248 | ret = 0; | ||
249 | return ret; | ||
237 | } | 250 | } |
238 | 251 | ||
239 | /* | 252 | /* |
@@ -250,7 +263,6 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, | |||
250 | struct btrfs_path *path; | 263 | struct btrfs_path *path; |
251 | struct btrfs_root *root; | 264 | struct btrfs_root *root; |
252 | struct btrfs_key root_key; | 265 | struct btrfs_key root_key; |
253 | struct btrfs_key key = {0}; | ||
254 | struct extent_buffer *eb; | 266 | struct extent_buffer *eb; |
255 | int ret = 0; | 267 | int ret = 0; |
256 | int root_level; | 268 | int root_level; |
@@ -295,11 +307,9 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, | |||
295 | goto out; | 307 | goto out; |
296 | } | 308 | } |
297 | 309 | ||
298 | if (level == 0) | 310 | ret = add_all_parents(root, path, parents, level, &ref->key_for_search, |
299 | btrfs_item_key_to_cpu(eb, &key, path->slots[0]); | 311 | time_seq, ref->wanted_disk_byte, |
300 | 312 | extent_item_pos); | |
301 | ret = add_all_parents(root, path, parents, level, &key, time_seq, | ||
302 | ref->wanted_disk_byte, extent_item_pos); | ||
303 | out: | 313 | out: |
304 | btrfs_free_path(path); | 314 | btrfs_free_path(path); |
305 | return ret; | 315 | return ret; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8b73b2d4deb7..fa5c45b39075 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -2755,13 +2755,18 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, | |||
2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2755 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2756 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | 2756 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, |
2757 | u64 time_seq); | 2757 | u64 time_seq); |
2758 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) | 2758 | static inline int btrfs_next_old_item(struct btrfs_root *root, |
2759 | struct btrfs_path *p, u64 time_seq) | ||
2759 | { | 2760 | { |
2760 | ++p->slots[0]; | 2761 | ++p->slots[0]; |
2761 | if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) | 2762 | if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) |
2762 | return btrfs_next_leaf(root, p); | 2763 | return btrfs_next_old_leaf(root, p, time_seq); |
2763 | return 0; | 2764 | return 0; |
2764 | } | 2765 | } |
2766 | static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) | ||
2767 | { | ||
2768 | return btrfs_next_old_item(root, p, 0); | ||
2769 | } | ||
2765 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); | 2770 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); |
2766 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); | 2771 | int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); |
2767 | int __must_check btrfs_drop_snapshot(struct btrfs_root *root, | 2772 | int __must_check btrfs_drop_snapshot(struct btrfs_root *root, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e1890b1d3075..7b845ff4af99 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -3426,6 +3426,7 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
3426 | mutex_unlock(&head->mutex); | 3426 | mutex_unlock(&head->mutex); |
3427 | btrfs_put_delayed_ref(ref); | 3427 | btrfs_put_delayed_ref(ref); |
3428 | 3428 | ||
3429 | spin_lock(&delayed_refs->lock); | ||
3429 | continue; | 3430 | continue; |
3430 | } | 3431 | } |
3431 | 3432 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a4f02501da40..d8bb0dbc4941 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -987,7 +987,7 @@ static noinline void async_cow_start(struct btrfs_work *work) | |||
987 | async_cow->start, async_cow->end, async_cow, | 987 | async_cow->start, async_cow->end, async_cow, |
988 | &num_added); | 988 | &num_added); |
989 | if (num_added == 0) { | 989 | if (num_added == 0) { |
990 | iput(async_cow->inode); | 990 | btrfs_add_delayed_iput(async_cow->inode); |
991 | async_cow->inode = NULL; | 991 | async_cow->inode = NULL; |
992 | } | 992 | } |
993 | } | 993 | } |
@@ -1023,7 +1023,7 @@ static noinline void async_cow_free(struct btrfs_work *work) | |||
1023 | struct async_cow *async_cow; | 1023 | struct async_cow *async_cow; |
1024 | async_cow = container_of(work, struct async_cow, work); | 1024 | async_cow = container_of(work, struct async_cow, work); |
1025 | if (async_cow->inode) | 1025 | if (async_cow->inode) |
1026 | iput(async_cow->inode); | 1026 | btrfs_add_delayed_iput(async_cow->inode); |
1027 | kfree(async_cow); | 1027 | kfree(async_cow); |
1028 | } | 1028 | } |
1029 | 1029 | ||
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 173b1d22e59b..8b67304e4b80 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -54,7 +54,12 @@ | |||
54 | (CONGESTION_ON_THRESH(congestion_kb) - \ | 54 | (CONGESTION_ON_THRESH(congestion_kb) - \ |
55 | (CONGESTION_ON_THRESH(congestion_kb) >> 2)) | 55 | (CONGESTION_ON_THRESH(congestion_kb) >> 2)) |
56 | 56 | ||
57 | 57 | static inline struct ceph_snap_context *page_snap_context(struct page *page) | |
58 | { | ||
59 | if (PagePrivate(page)) | ||
60 | return (void *)page->private; | ||
61 | return NULL; | ||
62 | } | ||
58 | 63 | ||
59 | /* | 64 | /* |
60 | * Dirty a page. Optimistically adjust accounting, on the assumption | 65 | * Dirty a page. Optimistically adjust accounting, on the assumption |
@@ -142,10 +147,9 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset) | |||
142 | { | 147 | { |
143 | struct inode *inode; | 148 | struct inode *inode; |
144 | struct ceph_inode_info *ci; | 149 | struct ceph_inode_info *ci; |
145 | struct ceph_snap_context *snapc = (void *)page->private; | 150 | struct ceph_snap_context *snapc = page_snap_context(page); |
146 | 151 | ||
147 | BUG_ON(!PageLocked(page)); | 152 | BUG_ON(!PageLocked(page)); |
148 | BUG_ON(!page->private); | ||
149 | BUG_ON(!PagePrivate(page)); | 153 | BUG_ON(!PagePrivate(page)); |
150 | BUG_ON(!page->mapping); | 154 | BUG_ON(!page->mapping); |
151 | 155 | ||
@@ -182,7 +186,6 @@ static int ceph_releasepage(struct page *page, gfp_t g) | |||
182 | struct inode *inode = page->mapping ? page->mapping->host : NULL; | 186 | struct inode *inode = page->mapping ? page->mapping->host : NULL; |
183 | dout("%p releasepage %p idx %lu\n", inode, page, page->index); | 187 | dout("%p releasepage %p idx %lu\n", inode, page, page->index); |
184 | WARN_ON(PageDirty(page)); | 188 | WARN_ON(PageDirty(page)); |
185 | WARN_ON(page->private); | ||
186 | WARN_ON(PagePrivate(page)); | 189 | WARN_ON(PagePrivate(page)); |
187 | return 0; | 190 | return 0; |
188 | } | 191 | } |
@@ -443,7 +446,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
443 | osdc = &fsc->client->osdc; | 446 | osdc = &fsc->client->osdc; |
444 | 447 | ||
445 | /* verify this is a writeable snap context */ | 448 | /* verify this is a writeable snap context */ |
446 | snapc = (void *)page->private; | 449 | snapc = page_snap_context(page); |
447 | if (snapc == NULL) { | 450 | if (snapc == NULL) { |
448 | dout("writepage %p page %p not dirty?\n", inode, page); | 451 | dout("writepage %p page %p not dirty?\n", inode, page); |
449 | goto out; | 452 | goto out; |
@@ -451,7 +454,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
451 | oldest = get_oldest_context(inode, &snap_size); | 454 | oldest = get_oldest_context(inode, &snap_size); |
452 | if (snapc->seq > oldest->seq) { | 455 | if (snapc->seq > oldest->seq) { |
453 | dout("writepage %p page %p snapc %p not writeable - noop\n", | 456 | dout("writepage %p page %p snapc %p not writeable - noop\n", |
454 | inode, page, (void *)page->private); | 457 | inode, page, snapc); |
455 | /* we should only noop if called by kswapd */ | 458 | /* we should only noop if called by kswapd */ |
456 | WARN_ON((current->flags & PF_MEMALLOC) == 0); | 459 | WARN_ON((current->flags & PF_MEMALLOC) == 0); |
457 | ceph_put_snap_context(oldest); | 460 | ceph_put_snap_context(oldest); |
@@ -591,7 +594,7 @@ static void writepages_finish(struct ceph_osd_request *req, | |||
591 | clear_bdi_congested(&fsc->backing_dev_info, | 594 | clear_bdi_congested(&fsc->backing_dev_info, |
592 | BLK_RW_ASYNC); | 595 | BLK_RW_ASYNC); |
593 | 596 | ||
594 | ceph_put_snap_context((void *)page->private); | 597 | ceph_put_snap_context(page_snap_context(page)); |
595 | page->private = 0; | 598 | page->private = 0; |
596 | ClearPagePrivate(page); | 599 | ClearPagePrivate(page); |
597 | dout("unlocking %d %p\n", i, page); | 600 | dout("unlocking %d %p\n", i, page); |
@@ -795,7 +798,7 @@ get_more_pages: | |||
795 | } | 798 | } |
796 | 799 | ||
797 | /* only if matching snap context */ | 800 | /* only if matching snap context */ |
798 | pgsnapc = (void *)page->private; | 801 | pgsnapc = page_snap_context(page); |
799 | if (pgsnapc->seq > snapc->seq) { | 802 | if (pgsnapc->seq > snapc->seq) { |
800 | dout("page snapc %p %lld > oldest %p %lld\n", | 803 | dout("page snapc %p %lld > oldest %p %lld\n", |
801 | pgsnapc, pgsnapc->seq, snapc, snapc->seq); | 804 | pgsnapc, pgsnapc->seq, snapc, snapc->seq); |
@@ -984,7 +987,7 @@ retry_locked: | |||
984 | BUG_ON(!ci->i_snap_realm); | 987 | BUG_ON(!ci->i_snap_realm); |
985 | down_read(&mdsc->snap_rwsem); | 988 | down_read(&mdsc->snap_rwsem); |
986 | BUG_ON(!ci->i_snap_realm->cached_context); | 989 | BUG_ON(!ci->i_snap_realm->cached_context); |
987 | snapc = (void *)page->private; | 990 | snapc = page_snap_context(page); |
988 | if (snapc && snapc != ci->i_head_snapc) { | 991 | if (snapc && snapc != ci->i_head_snapc) { |
989 | /* | 992 | /* |
990 | * this page is already dirty in another (older) snap | 993 | * this page is already dirty in another (older) snap |
@@ -819,10 +819,10 @@ static int exec_mmap(struct mm_struct *mm) | |||
819 | /* Notify parent that we're no longer interested in the old VM */ | 819 | /* Notify parent that we're no longer interested in the old VM */ |
820 | tsk = current; | 820 | tsk = current; |
821 | old_mm = current->mm; | 821 | old_mm = current->mm; |
822 | sync_mm_rss(old_mm); | ||
823 | mm_release(tsk, old_mm); | 822 | mm_release(tsk, old_mm); |
824 | 823 | ||
825 | if (old_mm) { | 824 | if (old_mm) { |
825 | sync_mm_rss(old_mm); | ||
826 | /* | 826 | /* |
827 | * Make sure that if there is a core dump in progress | 827 | * Make sure that if there is a core dump in progress |
828 | * for the old mm, we get out and die instead of going | 828 | * for the old mm, we get out and die instead of going |
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c index c640ba57074b..09addc8615fa 100644 --- a/fs/hfsplus/ioctl.c +++ b/fs/hfsplus/ioctl.c | |||
@@ -31,6 +31,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags) | |||
31 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); | 31 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); |
32 | struct hfsplus_vh *vh = sbi->s_vhdr; | 32 | struct hfsplus_vh *vh = sbi->s_vhdr; |
33 | struct hfsplus_vh *bvh = sbi->s_backup_vhdr; | 33 | struct hfsplus_vh *bvh = sbi->s_backup_vhdr; |
34 | u32 cnid = (unsigned long)dentry->d_fsdata; | ||
34 | 35 | ||
35 | if (!capable(CAP_SYS_ADMIN)) | 36 | if (!capable(CAP_SYS_ADMIN)) |
36 | return -EPERM; | 37 | return -EPERM; |
@@ -41,8 +42,12 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags) | |||
41 | vh->finder_info[0] = bvh->finder_info[0] = | 42 | vh->finder_info[0] = bvh->finder_info[0] = |
42 | cpu_to_be32(parent_ino(dentry)); | 43 | cpu_to_be32(parent_ino(dentry)); |
43 | 44 | ||
44 | /* Bootloader */ | 45 | /* |
45 | vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino); | 46 | * Bootloader. Just using the inode here breaks in the case of |
47 | * hard links - the firmware wants the ID of the hard link file, | ||
48 | * but the inode points at the indirect inode | ||
49 | */ | ||
50 | vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid); | ||
46 | 51 | ||
47 | /* Per spec, the OS X system folder - same as finder_info[0] here */ | 52 | /* Per spec, the OS X system folder - same as finder_info[0] here */ |
48 | vh->finder_info[5] = bvh->finder_info[5] = | 53 | vh->finder_info[5] = bvh->finder_info[5] = |
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 7daf4b852d1c..90effcccca9a 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c | |||
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, | |||
56 | DECLARE_COMPLETION_ONSTACK(wait); | 56 | DECLARE_COMPLETION_ONSTACK(wait); |
57 | struct bio *bio; | 57 | struct bio *bio; |
58 | int ret = 0; | 58 | int ret = 0; |
59 | unsigned int io_size; | 59 | u64 io_size; |
60 | loff_t start; | 60 | loff_t start; |
61 | int offset; | 61 | int offset; |
62 | 62 | ||
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 17ba6b995659..f005b5bebdc7 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -207,7 +207,6 @@ error_0: | |||
207 | static void nfs4_shutdown_session(struct nfs_client *clp) | 207 | static void nfs4_shutdown_session(struct nfs_client *clp) |
208 | { | 208 | { |
209 | if (nfs4_has_session(clp)) { | 209 | if (nfs4_has_session(clp)) { |
210 | nfs4_deviceid_purge_client(clp); | ||
211 | nfs4_destroy_session(clp->cl_session); | 210 | nfs4_destroy_session(clp->cl_session); |
212 | nfs4_destroy_clientid(clp); | 211 | nfs4_destroy_clientid(clp); |
213 | } | 212 | } |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 3168f6e3d4d4..9a4cbfc85d81 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -490,6 +490,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | |||
490 | dreq->error = -EIO; | 490 | dreq->error = -EIO; |
491 | spin_unlock(cinfo.lock); | 491 | spin_unlock(cinfo.lock); |
492 | } | 492 | } |
493 | nfs_release_request(req); | ||
493 | } | 494 | } |
494 | nfs_pageio_complete(&desc); | 495 | nfs_pageio_complete(&desc); |
495 | 496 | ||
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b5b86a05059c..864c51e4b400 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c | |||
@@ -57,6 +57,11 @@ unsigned int nfs_idmap_cache_timeout = 600; | |||
57 | static const struct cred *id_resolver_cache; | 57 | static const struct cred *id_resolver_cache; |
58 | static struct key_type key_type_id_resolver_legacy; | 58 | static struct key_type key_type_id_resolver_legacy; |
59 | 59 | ||
60 | struct idmap { | ||
61 | struct rpc_pipe *idmap_pipe; | ||
62 | struct key_construction *idmap_key_cons; | ||
63 | struct mutex idmap_mutex; | ||
64 | }; | ||
60 | 65 | ||
61 | /** | 66 | /** |
62 | * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields | 67 | * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields |
@@ -310,9 +315,11 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen, | |||
310 | name, namelen, type, data, | 315 | name, namelen, type, data, |
311 | data_size, NULL); | 316 | data_size, NULL); |
312 | if (ret < 0) { | 317 | if (ret < 0) { |
318 | mutex_lock(&idmap->idmap_mutex); | ||
313 | ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, | 319 | ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, |
314 | name, namelen, type, data, | 320 | name, namelen, type, data, |
315 | data_size, idmap); | 321 | data_size, idmap); |
322 | mutex_unlock(&idmap->idmap_mutex); | ||
316 | } | 323 | } |
317 | return ret; | 324 | return ret; |
318 | } | 325 | } |
@@ -354,11 +361,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ | |||
354 | /* idmap classic begins here */ | 361 | /* idmap classic begins here */ |
355 | module_param(nfs_idmap_cache_timeout, int, 0644); | 362 | module_param(nfs_idmap_cache_timeout, int, 0644); |
356 | 363 | ||
357 | struct idmap { | ||
358 | struct rpc_pipe *idmap_pipe; | ||
359 | struct key_construction *idmap_key_cons; | ||
360 | }; | ||
361 | |||
362 | enum { | 364 | enum { |
363 | Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err | 365 | Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err |
364 | }; | 366 | }; |
@@ -469,6 +471,7 @@ nfs_idmap_new(struct nfs_client *clp) | |||
469 | return error; | 471 | return error; |
470 | } | 472 | } |
471 | idmap->idmap_pipe = pipe; | 473 | idmap->idmap_pipe = pipe; |
474 | mutex_init(&idmap->idmap_mutex); | ||
472 | 475 | ||
473 | clp->cl_idmap = idmap; | 476 | clp->cl_idmap = idmap; |
474 | return 0; | 477 | return 0; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index e605d695dbcb..f7296983eba6 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1530,7 +1530,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) | |||
1530 | nfsi->delegation_state = 0; | 1530 | nfsi->delegation_state = 0; |
1531 | init_rwsem(&nfsi->rwsem); | 1531 | init_rwsem(&nfsi->rwsem); |
1532 | nfsi->layout = NULL; | 1532 | nfsi->layout = NULL; |
1533 | atomic_set(&nfsi->commit_info.rpcs_out, 0); | ||
1534 | #endif | 1533 | #endif |
1535 | } | 1534 | } |
1536 | 1535 | ||
@@ -1545,6 +1544,7 @@ static void init_once(void *foo) | |||
1545 | INIT_LIST_HEAD(&nfsi->commit_info.list); | 1544 | INIT_LIST_HEAD(&nfsi->commit_info.list); |
1546 | nfsi->npages = 0; | 1545 | nfsi->npages = 0; |
1547 | nfsi->commit_info.ncommit = 0; | 1546 | nfsi->commit_info.ncommit = 0; |
1547 | atomic_set(&nfsi->commit_info.rpcs_out, 0); | ||
1548 | atomic_set(&nfsi->silly_count, 1); | 1548 | atomic_set(&nfsi->silly_count, 1); |
1549 | INIT_HLIST_HEAD(&nfsi->silly_list); | 1549 | INIT_HLIST_HEAD(&nfsi->silly_list); |
1550 | init_waitqueue_head(&nfsi->waitqueue); | 1550 | init_waitqueue_head(&nfsi->waitqueue); |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index b8323aa7b543..bbc49caa7a82 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -70,6 +70,10 @@ find_pnfs_driver(u32 id) | |||
70 | 70 | ||
71 | spin_lock(&pnfs_spinlock); | 71 | spin_lock(&pnfs_spinlock); |
72 | local = find_pnfs_driver_locked(id); | 72 | local = find_pnfs_driver_locked(id); |
73 | if (local != NULL && !try_module_get(local->owner)) { | ||
74 | dprintk("%s: Could not grab reference on module\n", __func__); | ||
75 | local = NULL; | ||
76 | } | ||
73 | spin_unlock(&pnfs_spinlock); | 77 | spin_unlock(&pnfs_spinlock); |
74 | return local; | 78 | return local; |
75 | } | 79 | } |
@@ -80,6 +84,9 @@ unset_pnfs_layoutdriver(struct nfs_server *nfss) | |||
80 | if (nfss->pnfs_curr_ld) { | 84 | if (nfss->pnfs_curr_ld) { |
81 | if (nfss->pnfs_curr_ld->clear_layoutdriver) | 85 | if (nfss->pnfs_curr_ld->clear_layoutdriver) |
82 | nfss->pnfs_curr_ld->clear_layoutdriver(nfss); | 86 | nfss->pnfs_curr_ld->clear_layoutdriver(nfss); |
87 | /* Decrement the MDS count. Purge the deviceid cache if zero */ | ||
88 | if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count)) | ||
89 | nfs4_deviceid_purge_client(nfss->nfs_client); | ||
83 | module_put(nfss->pnfs_curr_ld->owner); | 90 | module_put(nfss->pnfs_curr_ld->owner); |
84 | } | 91 | } |
85 | nfss->pnfs_curr_ld = NULL; | 92 | nfss->pnfs_curr_ld = NULL; |
@@ -115,10 +122,6 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, | |||
115 | goto out_no_driver; | 122 | goto out_no_driver; |
116 | } | 123 | } |
117 | } | 124 | } |
118 | if (!try_module_get(ld_type->owner)) { | ||
119 | dprintk("%s: Could not grab reference on module\n", __func__); | ||
120 | goto out_no_driver; | ||
121 | } | ||
122 | server->pnfs_curr_ld = ld_type; | 125 | server->pnfs_curr_ld = ld_type; |
123 | if (ld_type->set_layoutdriver | 126 | if (ld_type->set_layoutdriver |
124 | && ld_type->set_layoutdriver(server, mntfh)) { | 127 | && ld_type->set_layoutdriver(server, mntfh)) { |
@@ -127,6 +130,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, | |||
127 | module_put(ld_type->owner); | 130 | module_put(ld_type->owner); |
128 | goto out_no_driver; | 131 | goto out_no_driver; |
129 | } | 132 | } |
133 | /* Bump the MDS count */ | ||
134 | atomic_inc(&server->nfs_client->cl_mds_count); | ||
130 | 135 | ||
131 | dprintk("%s: pNFS module for %u set\n", __func__, id); | 136 | dprintk("%s: pNFS module for %u set\n", __func__, id); |
132 | return; | 137 | return; |
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 08a07a218d26..57ceaf33d177 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c | |||
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) | |||
191 | while (!list_empty(head)) { | 191 | while (!list_empty(head)) { |
192 | ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); | 192 | ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); |
193 | list_del_init(&ii->i_dirty); | 193 | list_del_init(&ii->i_dirty); |
194 | truncate_inode_pages(&ii->vfs_inode.i_data, 0); | ||
195 | nilfs_btnode_cache_clear(&ii->i_btnode_cache); | ||
194 | iput(&ii->vfs_inode); | 196 | iput(&ii->vfs_inode); |
195 | } | 197 | } |
196 | } | 198 | } |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 0e72ad6f22aa..88e11fb346b6 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) | |||
2309 | if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) | 2309 | if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) |
2310 | continue; | 2310 | continue; |
2311 | list_del_init(&ii->i_dirty); | 2311 | list_del_init(&ii->i_dirty); |
2312 | truncate_inode_pages(&ii->vfs_inode.i_data, 0); | ||
2313 | nilfs_btnode_cache_clear(&ii->i_btnode_cache); | ||
2312 | iput(&ii->vfs_inode); | 2314 | iput(&ii->vfs_inode); |
2313 | } | 2315 | } |
2314 | } | 2316 | } |
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index aeb19e68e086..11a2aa2a56c4 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c | |||
@@ -258,7 +258,7 @@ fail: | |||
258 | return rc; | 258 | return rc; |
259 | } | 259 | } |
260 | 260 | ||
261 | int pstore_fill_super(struct super_block *sb, void *data, int silent) | 261 | static int pstore_fill_super(struct super_block *sb, void *data, int silent) |
262 | { | 262 | { |
263 | struct inode *inode; | 263 | struct inode *inode; |
264 | 264 | ||
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 82c585f715e3..03ce7a9b81cc 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
@@ -94,20 +94,15 @@ static const char *get_reason_str(enum kmsg_dump_reason reason) | |||
94 | * as we can from the end of the buffer. | 94 | * as we can from the end of the buffer. |
95 | */ | 95 | */ |
96 | static void pstore_dump(struct kmsg_dumper *dumper, | 96 | static void pstore_dump(struct kmsg_dumper *dumper, |
97 | enum kmsg_dump_reason reason, | 97 | enum kmsg_dump_reason reason) |
98 | const char *s1, unsigned long l1, | ||
99 | const char *s2, unsigned long l2) | ||
100 | { | 98 | { |
101 | unsigned long s1_start, s2_start; | 99 | unsigned long total = 0; |
102 | unsigned long l1_cpy, l2_cpy; | ||
103 | unsigned long size, total = 0; | ||
104 | char *dst; | ||
105 | const char *why; | 100 | const char *why; |
106 | u64 id; | 101 | u64 id; |
107 | int hsize, ret; | ||
108 | unsigned int part = 1; | 102 | unsigned int part = 1; |
109 | unsigned long flags = 0; | 103 | unsigned long flags = 0; |
110 | int is_locked = 0; | 104 | int is_locked = 0; |
105 | int ret; | ||
111 | 106 | ||
112 | why = get_reason_str(reason); | 107 | why = get_reason_str(reason); |
113 | 108 | ||
@@ -119,30 +114,25 @@ static void pstore_dump(struct kmsg_dumper *dumper, | |||
119 | spin_lock_irqsave(&psinfo->buf_lock, flags); | 114 | spin_lock_irqsave(&psinfo->buf_lock, flags); |
120 | oopscount++; | 115 | oopscount++; |
121 | while (total < kmsg_bytes) { | 116 | while (total < kmsg_bytes) { |
117 | char *dst; | ||
118 | unsigned long size; | ||
119 | int hsize; | ||
120 | size_t len; | ||
121 | |||
122 | dst = psinfo->buf; | 122 | dst = psinfo->buf; |
123 | hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); | 123 | hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); |
124 | size = psinfo->bufsize - hsize; | 124 | size = psinfo->bufsize - hsize; |
125 | dst += hsize; | 125 | dst += hsize; |
126 | 126 | ||
127 | l2_cpy = min(l2, size); | 127 | if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len)) |
128 | l1_cpy = min(l1, size - l2_cpy); | ||
129 | |||
130 | if (l1_cpy + l2_cpy == 0) | ||
131 | break; | 128 | break; |
132 | 129 | ||
133 | s2_start = l2 - l2_cpy; | ||
134 | s1_start = l1 - l1_cpy; | ||
135 | |||
136 | memcpy(dst, s1 + s1_start, l1_cpy); | ||
137 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); | ||
138 | |||
139 | ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, | 130 | ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, |
140 | hsize + l1_cpy + l2_cpy, psinfo); | 131 | hsize + len, psinfo); |
141 | if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) | 132 | if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) |
142 | pstore_new_entry = 1; | 133 | pstore_new_entry = 1; |
143 | l1 -= l1_cpy; | 134 | |
144 | l2 -= l2_cpy; | 135 | total += hsize + len; |
145 | total += l1_cpy + l2_cpy; | ||
146 | part++; | 136 | part++; |
147 | } | 137 | } |
148 | if (in_nmi()) { | 138 | if (in_nmi()) { |
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 9123cce28c1e..453030f9c5bc 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c | |||
@@ -106,6 +106,8 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type, | |||
106 | time->tv_sec = 0; | 106 | time->tv_sec = 0; |
107 | time->tv_nsec = 0; | 107 | time->tv_nsec = 0; |
108 | 108 | ||
109 | /* Update old/shadowed buffer. */ | ||
110 | persistent_ram_save_old(prz); | ||
109 | size = persistent_ram_old_size(prz); | 111 | size = persistent_ram_old_size(prz); |
110 | *buf = kmalloc(size, GFP_KERNEL); | 112 | *buf = kmalloc(size, GFP_KERNEL); |
111 | if (*buf == NULL) | 113 | if (*buf == NULL) |
@@ -184,6 +186,7 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, | |||
184 | return -EINVAL; | 186 | return -EINVAL; |
185 | 187 | ||
186 | persistent_ram_free_old(cxt->przs[id]); | 188 | persistent_ram_free_old(cxt->przs[id]); |
189 | persistent_ram_zap(cxt->przs[id]); | ||
187 | 190 | ||
188 | return 0; | 191 | return 0; |
189 | } | 192 | } |
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index 31f8d184f3a0..c5fbdbbf81ac 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c | |||
@@ -250,23 +250,24 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz, | |||
250 | persistent_ram_update_ecc(prz, start, count); | 250 | persistent_ram_update_ecc(prz, start, count); |
251 | } | 251 | } |
252 | 252 | ||
253 | static void __init | 253 | void persistent_ram_save_old(struct persistent_ram_zone *prz) |
254 | persistent_ram_save_old(struct persistent_ram_zone *prz) | ||
255 | { | 254 | { |
256 | struct persistent_ram_buffer *buffer = prz->buffer; | 255 | struct persistent_ram_buffer *buffer = prz->buffer; |
257 | size_t size = buffer_size(prz); | 256 | size_t size = buffer_size(prz); |
258 | size_t start = buffer_start(prz); | 257 | size_t start = buffer_start(prz); |
259 | char *dest; | ||
260 | 258 | ||
261 | persistent_ram_ecc_old(prz); | 259 | if (!size) |
260 | return; | ||
262 | 261 | ||
263 | dest = kmalloc(size, GFP_KERNEL); | 262 | if (!prz->old_log) { |
264 | if (dest == NULL) { | 263 | persistent_ram_ecc_old(prz); |
264 | prz->old_log = kmalloc(size, GFP_KERNEL); | ||
265 | } | ||
266 | if (!prz->old_log) { | ||
265 | pr_err("persistent_ram: failed to allocate buffer\n"); | 267 | pr_err("persistent_ram: failed to allocate buffer\n"); |
266 | return; | 268 | return; |
267 | } | 269 | } |
268 | 270 | ||
269 | prz->old_log = dest; | ||
270 | prz->old_log_size = size; | 271 | prz->old_log_size = size; |
271 | memcpy(prz->old_log, &buffer->data[start], size - start); | 272 | memcpy(prz->old_log, &buffer->data[start], size - start); |
272 | memcpy(prz->old_log + size - start, &buffer->data[0], start); | 273 | memcpy(prz->old_log + size - start, &buffer->data[0], start); |
@@ -319,6 +320,13 @@ void persistent_ram_free_old(struct persistent_ram_zone *prz) | |||
319 | prz->old_log_size = 0; | 320 | prz->old_log_size = 0; |
320 | } | 321 | } |
321 | 322 | ||
323 | void persistent_ram_zap(struct persistent_ram_zone *prz) | ||
324 | { | ||
325 | atomic_set(&prz->buffer->start, 0); | ||
326 | atomic_set(&prz->buffer->size, 0); | ||
327 | persistent_ram_update_header_ecc(prz); | ||
328 | } | ||
329 | |||
322 | static void *persistent_ram_vmap(phys_addr_t start, size_t size) | 330 | static void *persistent_ram_vmap(phys_addr_t start, size_t size) |
323 | { | 331 | { |
324 | struct page **pages; | 332 | struct page **pages; |
@@ -405,6 +413,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool | |||
405 | " size %zu, start %zu\n", | 413 | " size %zu, start %zu\n", |
406 | buffer_size(prz), buffer_start(prz)); | 414 | buffer_size(prz), buffer_start(prz)); |
407 | persistent_ram_save_old(prz); | 415 | persistent_ram_save_old(prz); |
416 | return 0; | ||
408 | } | 417 | } |
409 | } else { | 418 | } else { |
410 | pr_info("persistent_ram: no valid data in buffer" | 419 | pr_info("persistent_ram: no valid data in buffer" |
@@ -412,8 +421,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool | |||
412 | } | 421 | } |
413 | 422 | ||
414 | prz->buffer->sig = PERSISTENT_RAM_SIG; | 423 | prz->buffer->sig = PERSISTENT_RAM_SIG; |
415 | atomic_set(&prz->buffer->start, 0); | 424 | persistent_ram_zap(prz); |
416 | atomic_set(&prz->buffer->size, 0); | ||
417 | 425 | ||
418 | return 0; | 426 | return 0; |
419 | } | 427 | } |
@@ -448,7 +456,6 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start, | |||
448 | goto err; | 456 | goto err; |
449 | 457 | ||
450 | persistent_ram_post_init(prz, ecc); | 458 | persistent_ram_post_init(prz, ecc); |
451 | persistent_ram_update_header_ecc(prz); | ||
452 | 459 | ||
453 | return prz; | 460 | return prz; |
454 | err: | 461 | err: |
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 84a7e6f3c046..92df3b081539 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
@@ -2918,7 +2918,7 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) | |||
2918 | struct dentry *dent; | 2918 | struct dentry *dent; |
2919 | struct ubifs_debug_info *d = c->dbg; | 2919 | struct ubifs_debug_info *d = c->dbg; |
2920 | 2920 | ||
2921 | if (!IS_ENABLED(DEBUG_FS)) | 2921 | if (!IS_ENABLED(CONFIG_DEBUG_FS)) |
2922 | return 0; | 2922 | return 0; |
2923 | 2923 | ||
2924 | n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, | 2924 | n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, |
@@ -3013,7 +3013,7 @@ out: | |||
3013 | */ | 3013 | */ |
3014 | void dbg_debugfs_exit_fs(struct ubifs_info *c) | 3014 | void dbg_debugfs_exit_fs(struct ubifs_info *c) |
3015 | { | 3015 | { |
3016 | if (IS_ENABLED(DEBUG_FS)) | 3016 | if (IS_ENABLED(CONFIG_DEBUG_FS)) |
3017 | debugfs_remove_recursive(c->dbg->dfs_dir); | 3017 | debugfs_remove_recursive(c->dbg->dfs_dir); |
3018 | } | 3018 | } |
3019 | 3019 | ||
@@ -3099,7 +3099,7 @@ int dbg_debugfs_init(void) | |||
3099 | const char *fname; | 3099 | const char *fname; |
3100 | struct dentry *dent; | 3100 | struct dentry *dent; |
3101 | 3101 | ||
3102 | if (!IS_ENABLED(DEBUG_FS)) | 3102 | if (!IS_ENABLED(CONFIG_DEBUG_FS)) |
3103 | return 0; | 3103 | return 0; |
3104 | 3104 | ||
3105 | fname = "ubifs"; | 3105 | fname = "ubifs"; |
@@ -3166,7 +3166,7 @@ out: | |||
3166 | */ | 3166 | */ |
3167 | void dbg_debugfs_exit(void) | 3167 | void dbg_debugfs_exit(void) |
3168 | { | 3168 | { |
3169 | if (IS_ENABLED(DEBUG_FS)) | 3169 | if (IS_ENABLED(CONFIG_DEBUG_FS)) |
3170 | debugfs_remove_recursive(dfs_rootdir); | 3170 | debugfs_remove_recursive(dfs_rootdir); |
3171 | } | 3171 | } |
3172 | 3172 | ||
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c index 2559d174e004..28ec13af28d9 100644 --- a/fs/ubifs/find.c +++ b/fs/ubifs/find.c | |||
@@ -939,8 +939,8 @@ static int find_dirtiest_idx_leb(struct ubifs_info *c) | |||
939 | } | 939 | } |
940 | dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, | 940 | dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, |
941 | lp->free, lp->flags); | 941 | lp->free, lp->flags); |
942 | ubifs_assert(lp->flags | LPROPS_TAKEN); | 942 | ubifs_assert(lp->flags & LPROPS_TAKEN); |
943 | ubifs_assert(lp->flags | LPROPS_INDEX); | 943 | ubifs_assert(lp->flags & LPROPS_INDEX); |
944 | return lnum; | 944 | return lnum; |
945 | } | 945 | } |
946 | 946 | ||
diff --git a/fs/udf/super.c b/fs/udf/super.c index ac8a348dcb69..8d86a8706c0e 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/seq_file.h> | 56 | #include <linux/seq_file.h> |
57 | #include <linux/bitmap.h> | 57 | #include <linux/bitmap.h> |
58 | #include <linux/crc-itu-t.h> | 58 | #include <linux/crc-itu-t.h> |
59 | #include <linux/log2.h> | ||
59 | #include <asm/byteorder.h> | 60 | #include <asm/byteorder.h> |
60 | 61 | ||
61 | #include "udf_sb.h" | 62 | #include "udf_sb.h" |
@@ -1215,16 +1216,65 @@ out_bh: | |||
1215 | return ret; | 1216 | return ret; |
1216 | } | 1217 | } |
1217 | 1218 | ||
1219 | static int udf_load_sparable_map(struct super_block *sb, | ||
1220 | struct udf_part_map *map, | ||
1221 | struct sparablePartitionMap *spm) | ||
1222 | { | ||
1223 | uint32_t loc; | ||
1224 | uint16_t ident; | ||
1225 | struct sparingTable *st; | ||
1226 | struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing; | ||
1227 | int i; | ||
1228 | struct buffer_head *bh; | ||
1229 | |||
1230 | map->s_partition_type = UDF_SPARABLE_MAP15; | ||
1231 | sdata->s_packet_len = le16_to_cpu(spm->packetLength); | ||
1232 | if (!is_power_of_2(sdata->s_packet_len)) { | ||
1233 | udf_err(sb, "error loading logical volume descriptor: " | ||
1234 | "Invalid packet length %u\n", | ||
1235 | (unsigned)sdata->s_packet_len); | ||
1236 | return -EIO; | ||
1237 | } | ||
1238 | if (spm->numSparingTables > 4) { | ||
1239 | udf_err(sb, "error loading logical volume descriptor: " | ||
1240 | "Too many sparing tables (%d)\n", | ||
1241 | (int)spm->numSparingTables); | ||
1242 | return -EIO; | ||
1243 | } | ||
1244 | |||
1245 | for (i = 0; i < spm->numSparingTables; i++) { | ||
1246 | loc = le32_to_cpu(spm->locSparingTable[i]); | ||
1247 | bh = udf_read_tagged(sb, loc, loc, &ident); | ||
1248 | if (!bh) | ||
1249 | continue; | ||
1250 | |||
1251 | st = (struct sparingTable *)bh->b_data; | ||
1252 | if (ident != 0 || | ||
1253 | strncmp(st->sparingIdent.ident, UDF_ID_SPARING, | ||
1254 | strlen(UDF_ID_SPARING)) || | ||
1255 | sizeof(*st) + le16_to_cpu(st->reallocationTableLen) > | ||
1256 | sb->s_blocksize) { | ||
1257 | brelse(bh); | ||
1258 | continue; | ||
1259 | } | ||
1260 | |||
1261 | sdata->s_spar_map[i] = bh; | ||
1262 | } | ||
1263 | map->s_partition_func = udf_get_pblock_spar15; | ||
1264 | return 0; | ||
1265 | } | ||
1266 | |||
1218 | static int udf_load_logicalvol(struct super_block *sb, sector_t block, | 1267 | static int udf_load_logicalvol(struct super_block *sb, sector_t block, |
1219 | struct kernel_lb_addr *fileset) | 1268 | struct kernel_lb_addr *fileset) |
1220 | { | 1269 | { |
1221 | struct logicalVolDesc *lvd; | 1270 | struct logicalVolDesc *lvd; |
1222 | int i, j, offset; | 1271 | int i, offset; |
1223 | uint8_t type; | 1272 | uint8_t type; |
1224 | struct udf_sb_info *sbi = UDF_SB(sb); | 1273 | struct udf_sb_info *sbi = UDF_SB(sb); |
1225 | struct genericPartitionMap *gpm; | 1274 | struct genericPartitionMap *gpm; |
1226 | uint16_t ident; | 1275 | uint16_t ident; |
1227 | struct buffer_head *bh; | 1276 | struct buffer_head *bh; |
1277 | unsigned int table_len; | ||
1228 | int ret = 0; | 1278 | int ret = 0; |
1229 | 1279 | ||
1230 | bh = udf_read_tagged(sb, block, block, &ident); | 1280 | bh = udf_read_tagged(sb, block, block, &ident); |
@@ -1232,15 +1282,20 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, | |||
1232 | return 1; | 1282 | return 1; |
1233 | BUG_ON(ident != TAG_IDENT_LVD); | 1283 | BUG_ON(ident != TAG_IDENT_LVD); |
1234 | lvd = (struct logicalVolDesc *)bh->b_data; | 1284 | lvd = (struct logicalVolDesc *)bh->b_data; |
1235 | 1285 | table_len = le32_to_cpu(lvd->mapTableLength); | |
1236 | i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); | 1286 | if (sizeof(*lvd) + table_len > sb->s_blocksize) { |
1237 | if (i != 0) { | 1287 | udf_err(sb, "error loading logical volume descriptor: " |
1238 | ret = i; | 1288 | "Partition table too long (%u > %lu)\n", table_len, |
1289 | sb->s_blocksize - sizeof(*lvd)); | ||
1239 | goto out_bh; | 1290 | goto out_bh; |
1240 | } | 1291 | } |
1241 | 1292 | ||
1293 | ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); | ||
1294 | if (ret) | ||
1295 | goto out_bh; | ||
1296 | |||
1242 | for (i = 0, offset = 0; | 1297 | for (i = 0, offset = 0; |
1243 | i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength); | 1298 | i < sbi->s_partitions && offset < table_len; |
1244 | i++, offset += gpm->partitionMapLength) { | 1299 | i++, offset += gpm->partitionMapLength) { |
1245 | struct udf_part_map *map = &sbi->s_partmaps[i]; | 1300 | struct udf_part_map *map = &sbi->s_partmaps[i]; |
1246 | gpm = (struct genericPartitionMap *) | 1301 | gpm = (struct genericPartitionMap *) |
@@ -1275,38 +1330,9 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, | |||
1275 | } else if (!strncmp(upm2->partIdent.ident, | 1330 | } else if (!strncmp(upm2->partIdent.ident, |
1276 | UDF_ID_SPARABLE, | 1331 | UDF_ID_SPARABLE, |
1277 | strlen(UDF_ID_SPARABLE))) { | 1332 | strlen(UDF_ID_SPARABLE))) { |
1278 | uint32_t loc; | 1333 | if (udf_load_sparable_map(sb, map, |
1279 | struct sparingTable *st; | 1334 | (struct sparablePartitionMap *)gpm) < 0) |
1280 | struct sparablePartitionMap *spm = | 1335 | goto out_bh; |
1281 | (struct sparablePartitionMap *)gpm; | ||
1282 | |||
1283 | map->s_partition_type = UDF_SPARABLE_MAP15; | ||
1284 | map->s_type_specific.s_sparing.s_packet_len = | ||
1285 | le16_to_cpu(spm->packetLength); | ||
1286 | for (j = 0; j < spm->numSparingTables; j++) { | ||
1287 | struct buffer_head *bh2; | ||
1288 | |||
1289 | loc = le32_to_cpu( | ||
1290 | spm->locSparingTable[j]); | ||
1291 | bh2 = udf_read_tagged(sb, loc, loc, | ||
1292 | &ident); | ||
1293 | map->s_type_specific.s_sparing. | ||
1294 | s_spar_map[j] = bh2; | ||
1295 | |||
1296 | if (bh2 == NULL) | ||
1297 | continue; | ||
1298 | |||
1299 | st = (struct sparingTable *)bh2->b_data; | ||
1300 | if (ident != 0 || strncmp( | ||
1301 | st->sparingIdent.ident, | ||
1302 | UDF_ID_SPARING, | ||
1303 | strlen(UDF_ID_SPARING))) { | ||
1304 | brelse(bh2); | ||
1305 | map->s_type_specific.s_sparing. | ||
1306 | s_spar_map[j] = NULL; | ||
1307 | } | ||
1308 | } | ||
1309 | map->s_partition_func = udf_get_pblock_spar15; | ||
1310 | } else if (!strncmp(upm2->partIdent.ident, | 1336 | } else if (!strncmp(upm2->partIdent.ident, |
1311 | UDF_ID_METADATA, | 1337 | UDF_ID_METADATA, |
1312 | strlen(UDF_ID_METADATA))) { | 1338 | strlen(UDF_ID_METADATA))) { |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 229641fb8e67..9d1aeb7e2734 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -1080,6 +1080,7 @@ restart: | |||
1080 | goto restart; | 1080 | goto restart; |
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); | ||
1083 | trace_xfs_alloc_size_neither(args); | 1084 | trace_xfs_alloc_size_neither(args); |
1084 | args->agbno = NULLAGBLOCK; | 1085 | args->agbno = NULLAGBLOCK; |
1085 | return 0; | 1086 | return 0; |
@@ -2441,7 +2442,7 @@ xfs_alloc_vextent( | |||
2441 | DECLARE_COMPLETION_ONSTACK(done); | 2442 | DECLARE_COMPLETION_ONSTACK(done); |
2442 | 2443 | ||
2443 | args->done = &done; | 2444 | args->done = &done; |
2444 | INIT_WORK(&args->work, xfs_alloc_vextent_worker); | 2445 | INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker); |
2445 | queue_work(xfs_alloc_wq, &args->work); | 2446 | queue_work(xfs_alloc_wq, &args->work); |
2446 | wait_for_completion(&done); | 2447 | wait_for_completion(&done); |
2447 | return args->result; | 2448 | return args->result; |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index ae31c313a79e..8dad722c0041 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -981,10 +981,15 @@ xfs_vm_writepage( | |||
981 | imap_valid = 0; | 981 | imap_valid = 0; |
982 | } | 982 | } |
983 | } else { | 983 | } else { |
984 | if (PageUptodate(page)) { | 984 | if (PageUptodate(page)) |
985 | ASSERT(buffer_mapped(bh)); | 985 | ASSERT(buffer_mapped(bh)); |
986 | imap_valid = 0; | 986 | /* |
987 | } | 987 | * This buffer is not uptodate and will not be |
988 | * written to disk. Ensure that we will put any | ||
989 | * subsequent writeable buffers into a new | ||
990 | * ioend. | ||
991 | */ | ||
992 | imap_valid = 0; | ||
988 | continue; | 993 | continue; |
989 | } | 994 | } |
990 | 995 | ||
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 172d3cc8f8cb..a4beb421018a 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -201,14 +201,7 @@ xfs_buf_alloc( | |||
201 | bp->b_length = numblks; | 201 | bp->b_length = numblks; |
202 | bp->b_io_length = numblks; | 202 | bp->b_io_length = numblks; |
203 | bp->b_flags = flags; | 203 | bp->b_flags = flags; |
204 | 204 | bp->b_bn = blkno; | |
205 | /* | ||
206 | * We do not set the block number here in the buffer because we have not | ||
207 | * finished initialising the buffer. We insert the buffer into the cache | ||
208 | * in this state, so this ensures that we are unable to do IO on a | ||
209 | * buffer that hasn't been fully initialised. | ||
210 | */ | ||
211 | bp->b_bn = XFS_BUF_DADDR_NULL; | ||
212 | atomic_set(&bp->b_pin_count, 0); | 205 | atomic_set(&bp->b_pin_count, 0); |
213 | init_waitqueue_head(&bp->b_waiters); | 206 | init_waitqueue_head(&bp->b_waiters); |
214 | 207 | ||
@@ -567,11 +560,6 @@ xfs_buf_get( | |||
567 | if (bp != new_bp) | 560 | if (bp != new_bp) |
568 | xfs_buf_free(new_bp); | 561 | xfs_buf_free(new_bp); |
569 | 562 | ||
570 | /* | ||
571 | * Now we have a workable buffer, fill in the block number so | ||
572 | * that we can do IO on it. | ||
573 | */ | ||
574 | bp->b_bn = blkno; | ||
575 | bp->b_io_length = bp->b_length; | 563 | bp->b_io_length = bp->b_length; |
576 | 564 | ||
577 | found: | 565 | found: |
@@ -772,7 +760,7 @@ xfs_buf_get_uncached( | |||
772 | int error, i; | 760 | int error, i; |
773 | xfs_buf_t *bp; | 761 | xfs_buf_t *bp; |
774 | 762 | ||
775 | bp = xfs_buf_alloc(target, 0, numblks, 0); | 763 | bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0); |
776 | if (unlikely(bp == NULL)) | 764 | if (unlikely(bp == NULL)) |
777 | goto fail; | 765 | goto fail; |
778 | 766 | ||
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 6cdbf90c6f7b..d041d47d9d86 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -505,6 +505,14 @@ xfs_inode_item_push( | |||
505 | } | 505 | } |
506 | 506 | ||
507 | /* | 507 | /* |
508 | * Stale inode items should force out the iclog. | ||
509 | */ | ||
510 | if (ip->i_flags & XFS_ISTALE) { | ||
511 | rval = XFS_ITEM_PINNED; | ||
512 | goto out_unlock; | ||
513 | } | ||
514 | |||
515 | /* | ||
508 | * Someone else is already flushing the inode. Nothing we can do | 516 | * Someone else is already flushing the inode. Nothing we can do |
509 | * here but wait for the flush to finish and remove the item from | 517 | * here but wait for the flush to finish and remove the item from |
510 | * the AIL. | 518 | * the AIL. |
@@ -514,15 +522,6 @@ xfs_inode_item_push( | |||
514 | goto out_unlock; | 522 | goto out_unlock; |
515 | } | 523 | } |
516 | 524 | ||
517 | /* | ||
518 | * Stale inode items should force out the iclog. | ||
519 | */ | ||
520 | if (ip->i_flags & XFS_ISTALE) { | ||
521 | xfs_ifunlock(ip); | ||
522 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
523 | return XFS_ITEM_PINNED; | ||
524 | } | ||
525 | |||
526 | ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); | 525 | ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); |
527 | ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); | 526 | ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); |
528 | 527 | ||
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index f30d9807dc48..d90d4a388609 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -38,13 +38,21 @@ | |||
38 | kmem_zone_t *xfs_log_ticket_zone; | 38 | kmem_zone_t *xfs_log_ticket_zone; |
39 | 39 | ||
40 | /* Local miscellaneous function prototypes */ | 40 | /* Local miscellaneous function prototypes */ |
41 | STATIC int xlog_commit_record(struct log *log, struct xlog_ticket *ticket, | 41 | STATIC int |
42 | xlog_in_core_t **, xfs_lsn_t *); | 42 | xlog_commit_record( |
43 | struct xlog *log, | ||
44 | struct xlog_ticket *ticket, | ||
45 | struct xlog_in_core **iclog, | ||
46 | xfs_lsn_t *commitlsnp); | ||
47 | |||
43 | STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, | 48 | STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, |
44 | xfs_buftarg_t *log_target, | 49 | xfs_buftarg_t *log_target, |
45 | xfs_daddr_t blk_offset, | 50 | xfs_daddr_t blk_offset, |
46 | int num_bblks); | 51 | int num_bblks); |
47 | STATIC int xlog_space_left(struct log *log, atomic64_t *head); | 52 | STATIC int |
53 | xlog_space_left( | ||
54 | struct xlog *log, | ||
55 | atomic64_t *head); | ||
48 | STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); | 56 | STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); |
49 | STATIC void xlog_dealloc_log(xlog_t *log); | 57 | STATIC void xlog_dealloc_log(xlog_t *log); |
50 | 58 | ||
@@ -64,8 +72,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t *log, | |||
64 | int eventual_size); | 72 | int eventual_size); |
65 | STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); | 73 | STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); |
66 | 74 | ||
67 | STATIC void xlog_grant_push_ail(struct log *log, | 75 | STATIC void |
68 | int need_bytes); | 76 | xlog_grant_push_ail( |
77 | struct xlog *log, | ||
78 | int need_bytes); | ||
69 | STATIC void xlog_regrant_reserve_log_space(xlog_t *log, | 79 | STATIC void xlog_regrant_reserve_log_space(xlog_t *log, |
70 | xlog_ticket_t *ticket); | 80 | xlog_ticket_t *ticket); |
71 | STATIC void xlog_ungrant_log_space(xlog_t *log, | 81 | STATIC void xlog_ungrant_log_space(xlog_t *log, |
@@ -73,7 +83,9 @@ STATIC void xlog_ungrant_log_space(xlog_t *log, | |||
73 | 83 | ||
74 | #if defined(DEBUG) | 84 | #if defined(DEBUG) |
75 | STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); | 85 | STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); |
76 | STATIC void xlog_verify_grant_tail(struct log *log); | 86 | STATIC void |
87 | xlog_verify_grant_tail( | ||
88 | struct xlog *log); | ||
77 | STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, | 89 | STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, |
78 | int count, boolean_t syncing); | 90 | int count, boolean_t syncing); |
79 | STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, | 91 | STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, |
@@ -89,9 +101,9 @@ STATIC int xlog_iclogs_empty(xlog_t *log); | |||
89 | 101 | ||
90 | static void | 102 | static void |
91 | xlog_grant_sub_space( | 103 | xlog_grant_sub_space( |
92 | struct log *log, | 104 | struct xlog *log, |
93 | atomic64_t *head, | 105 | atomic64_t *head, |
94 | int bytes) | 106 | int bytes) |
95 | { | 107 | { |
96 | int64_t head_val = atomic64_read(head); | 108 | int64_t head_val = atomic64_read(head); |
97 | int64_t new, old; | 109 | int64_t new, old; |
@@ -115,9 +127,9 @@ xlog_grant_sub_space( | |||
115 | 127 | ||
116 | static void | 128 | static void |
117 | xlog_grant_add_space( | 129 | xlog_grant_add_space( |
118 | struct log *log, | 130 | struct xlog *log, |
119 | atomic64_t *head, | 131 | atomic64_t *head, |
120 | int bytes) | 132 | int bytes) |
121 | { | 133 | { |
122 | int64_t head_val = atomic64_read(head); | 134 | int64_t head_val = atomic64_read(head); |
123 | int64_t new, old; | 135 | int64_t new, old; |
@@ -165,7 +177,7 @@ xlog_grant_head_wake_all( | |||
165 | 177 | ||
166 | static inline int | 178 | static inline int |
167 | xlog_ticket_reservation( | 179 | xlog_ticket_reservation( |
168 | struct log *log, | 180 | struct xlog *log, |
169 | struct xlog_grant_head *head, | 181 | struct xlog_grant_head *head, |
170 | struct xlog_ticket *tic) | 182 | struct xlog_ticket *tic) |
171 | { | 183 | { |
@@ -182,7 +194,7 @@ xlog_ticket_reservation( | |||
182 | 194 | ||
183 | STATIC bool | 195 | STATIC bool |
184 | xlog_grant_head_wake( | 196 | xlog_grant_head_wake( |
185 | struct log *log, | 197 | struct xlog *log, |
186 | struct xlog_grant_head *head, | 198 | struct xlog_grant_head *head, |
187 | int *free_bytes) | 199 | int *free_bytes) |
188 | { | 200 | { |
@@ -204,7 +216,7 @@ xlog_grant_head_wake( | |||
204 | 216 | ||
205 | STATIC int | 217 | STATIC int |
206 | xlog_grant_head_wait( | 218 | xlog_grant_head_wait( |
207 | struct log *log, | 219 | struct xlog *log, |
208 | struct xlog_grant_head *head, | 220 | struct xlog_grant_head *head, |
209 | struct xlog_ticket *tic, | 221 | struct xlog_ticket *tic, |
210 | int need_bytes) | 222 | int need_bytes) |
@@ -256,7 +268,7 @@ shutdown: | |||
256 | */ | 268 | */ |
257 | STATIC int | 269 | STATIC int |
258 | xlog_grant_head_check( | 270 | xlog_grant_head_check( |
259 | struct log *log, | 271 | struct xlog *log, |
260 | struct xlog_grant_head *head, | 272 | struct xlog_grant_head *head, |
261 | struct xlog_ticket *tic, | 273 | struct xlog_ticket *tic, |
262 | int *need_bytes) | 274 | int *need_bytes) |
@@ -323,7 +335,7 @@ xfs_log_regrant( | |||
323 | struct xfs_mount *mp, | 335 | struct xfs_mount *mp, |
324 | struct xlog_ticket *tic) | 336 | struct xlog_ticket *tic) |
325 | { | 337 | { |
326 | struct log *log = mp->m_log; | 338 | struct xlog *log = mp->m_log; |
327 | int need_bytes; | 339 | int need_bytes; |
328 | int error = 0; | 340 | int error = 0; |
329 | 341 | ||
@@ -389,7 +401,7 @@ xfs_log_reserve( | |||
389 | bool permanent, | 401 | bool permanent, |
390 | uint t_type) | 402 | uint t_type) |
391 | { | 403 | { |
392 | struct log *log = mp->m_log; | 404 | struct xlog *log = mp->m_log; |
393 | struct xlog_ticket *tic; | 405 | struct xlog_ticket *tic; |
394 | int need_bytes; | 406 | int need_bytes; |
395 | int error = 0; | 407 | int error = 0; |
@@ -465,7 +477,7 @@ xfs_log_done( | |||
465 | struct xlog_in_core **iclog, | 477 | struct xlog_in_core **iclog, |
466 | uint flags) | 478 | uint flags) |
467 | { | 479 | { |
468 | struct log *log = mp->m_log; | 480 | struct xlog *log = mp->m_log; |
469 | xfs_lsn_t lsn = 0; | 481 | xfs_lsn_t lsn = 0; |
470 | 482 | ||
471 | if (XLOG_FORCED_SHUTDOWN(log) || | 483 | if (XLOG_FORCED_SHUTDOWN(log) || |
@@ -810,6 +822,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
810 | void | 822 | void |
811 | xfs_log_unmount(xfs_mount_t *mp) | 823 | xfs_log_unmount(xfs_mount_t *mp) |
812 | { | 824 | { |
825 | cancel_delayed_work_sync(&mp->m_sync_work); | ||
813 | xfs_trans_ail_destroy(mp); | 826 | xfs_trans_ail_destroy(mp); |
814 | xlog_dealloc_log(mp->m_log); | 827 | xlog_dealloc_log(mp->m_log); |
815 | } | 828 | } |
@@ -838,7 +851,7 @@ void | |||
838 | xfs_log_space_wake( | 851 | xfs_log_space_wake( |
839 | struct xfs_mount *mp) | 852 | struct xfs_mount *mp) |
840 | { | 853 | { |
841 | struct log *log = mp->m_log; | 854 | struct xlog *log = mp->m_log; |
842 | int free_bytes; | 855 | int free_bytes; |
843 | 856 | ||
844 | if (XLOG_FORCED_SHUTDOWN(log)) | 857 | if (XLOG_FORCED_SHUTDOWN(log)) |
@@ -916,7 +929,7 @@ xfs_lsn_t | |||
916 | xlog_assign_tail_lsn_locked( | 929 | xlog_assign_tail_lsn_locked( |
917 | struct xfs_mount *mp) | 930 | struct xfs_mount *mp) |
918 | { | 931 | { |
919 | struct log *log = mp->m_log; | 932 | struct xlog *log = mp->m_log; |
920 | struct xfs_log_item *lip; | 933 | struct xfs_log_item *lip; |
921 | xfs_lsn_t tail_lsn; | 934 | xfs_lsn_t tail_lsn; |
922 | 935 | ||
@@ -965,7 +978,7 @@ xlog_assign_tail_lsn( | |||
965 | */ | 978 | */ |
966 | STATIC int | 979 | STATIC int |
967 | xlog_space_left( | 980 | xlog_space_left( |
968 | struct log *log, | 981 | struct xlog *log, |
969 | atomic64_t *head) | 982 | atomic64_t *head) |
970 | { | 983 | { |
971 | int free_bytes; | 984 | int free_bytes; |
@@ -1277,7 +1290,7 @@ out: | |||
1277 | */ | 1290 | */ |
1278 | STATIC int | 1291 | STATIC int |
1279 | xlog_commit_record( | 1292 | xlog_commit_record( |
1280 | struct log *log, | 1293 | struct xlog *log, |
1281 | struct xlog_ticket *ticket, | 1294 | struct xlog_ticket *ticket, |
1282 | struct xlog_in_core **iclog, | 1295 | struct xlog_in_core **iclog, |
1283 | xfs_lsn_t *commitlsnp) | 1296 | xfs_lsn_t *commitlsnp) |
@@ -1311,7 +1324,7 @@ xlog_commit_record( | |||
1311 | */ | 1324 | */ |
1312 | STATIC void | 1325 | STATIC void |
1313 | xlog_grant_push_ail( | 1326 | xlog_grant_push_ail( |
1314 | struct log *log, | 1327 | struct xlog *log, |
1315 | int need_bytes) | 1328 | int need_bytes) |
1316 | { | 1329 | { |
1317 | xfs_lsn_t threshold_lsn = 0; | 1330 | xfs_lsn_t threshold_lsn = 0; |
@@ -1790,7 +1803,7 @@ xlog_write_start_rec( | |||
1790 | 1803 | ||
1791 | static xlog_op_header_t * | 1804 | static xlog_op_header_t * |
1792 | xlog_write_setup_ophdr( | 1805 | xlog_write_setup_ophdr( |
1793 | struct log *log, | 1806 | struct xlog *log, |
1794 | struct xlog_op_header *ophdr, | 1807 | struct xlog_op_header *ophdr, |
1795 | struct xlog_ticket *ticket, | 1808 | struct xlog_ticket *ticket, |
1796 | uint flags) | 1809 | uint flags) |
@@ -1873,7 +1886,7 @@ xlog_write_setup_copy( | |||
1873 | 1886 | ||
1874 | static int | 1887 | static int |
1875 | xlog_write_copy_finish( | 1888 | xlog_write_copy_finish( |
1876 | struct log *log, | 1889 | struct xlog *log, |
1877 | struct xlog_in_core *iclog, | 1890 | struct xlog_in_core *iclog, |
1878 | uint flags, | 1891 | uint flags, |
1879 | int *record_cnt, | 1892 | int *record_cnt, |
@@ -1958,7 +1971,7 @@ xlog_write_copy_finish( | |||
1958 | */ | 1971 | */ |
1959 | int | 1972 | int |
1960 | xlog_write( | 1973 | xlog_write( |
1961 | struct log *log, | 1974 | struct xlog *log, |
1962 | struct xfs_log_vec *log_vector, | 1975 | struct xfs_log_vec *log_vector, |
1963 | struct xlog_ticket *ticket, | 1976 | struct xlog_ticket *ticket, |
1964 | xfs_lsn_t *start_lsn, | 1977 | xfs_lsn_t *start_lsn, |
@@ -2821,7 +2834,7 @@ _xfs_log_force( | |||
2821 | uint flags, | 2834 | uint flags, |
2822 | int *log_flushed) | 2835 | int *log_flushed) |
2823 | { | 2836 | { |
2824 | struct log *log = mp->m_log; | 2837 | struct xlog *log = mp->m_log; |
2825 | struct xlog_in_core *iclog; | 2838 | struct xlog_in_core *iclog; |
2826 | xfs_lsn_t lsn; | 2839 | xfs_lsn_t lsn; |
2827 | 2840 | ||
@@ -2969,7 +2982,7 @@ _xfs_log_force_lsn( | |||
2969 | uint flags, | 2982 | uint flags, |
2970 | int *log_flushed) | 2983 | int *log_flushed) |
2971 | { | 2984 | { |
2972 | struct log *log = mp->m_log; | 2985 | struct xlog *log = mp->m_log; |
2973 | struct xlog_in_core *iclog; | 2986 | struct xlog_in_core *iclog; |
2974 | int already_slept = 0; | 2987 | int already_slept = 0; |
2975 | 2988 | ||
@@ -3147,7 +3160,7 @@ xfs_log_ticket_get( | |||
3147 | */ | 3160 | */ |
3148 | xlog_ticket_t * | 3161 | xlog_ticket_t * |
3149 | xlog_ticket_alloc( | 3162 | xlog_ticket_alloc( |
3150 | struct log *log, | 3163 | struct xlog *log, |
3151 | int unit_bytes, | 3164 | int unit_bytes, |
3152 | int cnt, | 3165 | int cnt, |
3153 | char client, | 3166 | char client, |
@@ -3278,7 +3291,7 @@ xlog_ticket_alloc( | |||
3278 | */ | 3291 | */ |
3279 | void | 3292 | void |
3280 | xlog_verify_dest_ptr( | 3293 | xlog_verify_dest_ptr( |
3281 | struct log *log, | 3294 | struct xlog *log, |
3282 | char *ptr) | 3295 | char *ptr) |
3283 | { | 3296 | { |
3284 | int i; | 3297 | int i; |
@@ -3307,7 +3320,7 @@ xlog_verify_dest_ptr( | |||
3307 | */ | 3320 | */ |
3308 | STATIC void | 3321 | STATIC void |
3309 | xlog_verify_grant_tail( | 3322 | xlog_verify_grant_tail( |
3310 | struct log *log) | 3323 | struct xlog *log) |
3311 | { | 3324 | { |
3312 | int tail_cycle, tail_blocks; | 3325 | int tail_cycle, tail_blocks; |
3313 | int cycle, space; | 3326 | int cycle, space; |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 7d6197c58493..ddc4529d07d3 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -44,7 +44,7 @@ | |||
44 | */ | 44 | */ |
45 | static struct xlog_ticket * | 45 | static struct xlog_ticket * |
46 | xlog_cil_ticket_alloc( | 46 | xlog_cil_ticket_alloc( |
47 | struct log *log) | 47 | struct xlog *log) |
48 | { | 48 | { |
49 | struct xlog_ticket *tic; | 49 | struct xlog_ticket *tic; |
50 | 50 | ||
@@ -72,7 +72,7 @@ xlog_cil_ticket_alloc( | |||
72 | */ | 72 | */ |
73 | void | 73 | void |
74 | xlog_cil_init_post_recovery( | 74 | xlog_cil_init_post_recovery( |
75 | struct log *log) | 75 | struct xlog *log) |
76 | { | 76 | { |
77 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); | 77 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); |
78 | log->l_cilp->xc_ctx->sequence = 1; | 78 | log->l_cilp->xc_ctx->sequence = 1; |
@@ -182,7 +182,7 @@ xlog_cil_prepare_log_vecs( | |||
182 | */ | 182 | */ |
183 | STATIC void | 183 | STATIC void |
184 | xfs_cil_prepare_item( | 184 | xfs_cil_prepare_item( |
185 | struct log *log, | 185 | struct xlog *log, |
186 | struct xfs_log_vec *lv, | 186 | struct xfs_log_vec *lv, |
187 | int *len, | 187 | int *len, |
188 | int *diff_iovecs) | 188 | int *diff_iovecs) |
@@ -231,7 +231,7 @@ xfs_cil_prepare_item( | |||
231 | */ | 231 | */ |
232 | static void | 232 | static void |
233 | xlog_cil_insert_items( | 233 | xlog_cil_insert_items( |
234 | struct log *log, | 234 | struct xlog *log, |
235 | struct xfs_log_vec *log_vector, | 235 | struct xfs_log_vec *log_vector, |
236 | struct xlog_ticket *ticket) | 236 | struct xlog_ticket *ticket) |
237 | { | 237 | { |
@@ -373,7 +373,7 @@ xlog_cil_committed( | |||
373 | */ | 373 | */ |
374 | STATIC int | 374 | STATIC int |
375 | xlog_cil_push( | 375 | xlog_cil_push( |
376 | struct log *log) | 376 | struct xlog *log) |
377 | { | 377 | { |
378 | struct xfs_cil *cil = log->l_cilp; | 378 | struct xfs_cil *cil = log->l_cilp; |
379 | struct xfs_log_vec *lv; | 379 | struct xfs_log_vec *lv; |
@@ -601,7 +601,7 @@ xlog_cil_push_work( | |||
601 | */ | 601 | */ |
602 | static void | 602 | static void |
603 | xlog_cil_push_background( | 603 | xlog_cil_push_background( |
604 | struct log *log) | 604 | struct xlog *log) |
605 | { | 605 | { |
606 | struct xfs_cil *cil = log->l_cilp; | 606 | struct xfs_cil *cil = log->l_cilp; |
607 | 607 | ||
@@ -629,7 +629,7 @@ xlog_cil_push_background( | |||
629 | 629 | ||
630 | static void | 630 | static void |
631 | xlog_cil_push_foreground( | 631 | xlog_cil_push_foreground( |
632 | struct log *log, | 632 | struct xlog *log, |
633 | xfs_lsn_t push_seq) | 633 | xfs_lsn_t push_seq) |
634 | { | 634 | { |
635 | struct xfs_cil *cil = log->l_cilp; | 635 | struct xfs_cil *cil = log->l_cilp; |
@@ -683,7 +683,7 @@ xfs_log_commit_cil( | |||
683 | xfs_lsn_t *commit_lsn, | 683 | xfs_lsn_t *commit_lsn, |
684 | int flags) | 684 | int flags) |
685 | { | 685 | { |
686 | struct log *log = mp->m_log; | 686 | struct xlog *log = mp->m_log; |
687 | int log_flags = 0; | 687 | int log_flags = 0; |
688 | struct xfs_log_vec *log_vector; | 688 | struct xfs_log_vec *log_vector; |
689 | 689 | ||
@@ -754,7 +754,7 @@ xfs_log_commit_cil( | |||
754 | */ | 754 | */ |
755 | xfs_lsn_t | 755 | xfs_lsn_t |
756 | xlog_cil_force_lsn( | 756 | xlog_cil_force_lsn( |
757 | struct log *log, | 757 | struct xlog *log, |
758 | xfs_lsn_t sequence) | 758 | xfs_lsn_t sequence) |
759 | { | 759 | { |
760 | struct xfs_cil *cil = log->l_cilp; | 760 | struct xfs_cil *cil = log->l_cilp; |
@@ -833,7 +833,7 @@ xfs_log_item_in_current_chkpt( | |||
833 | */ | 833 | */ |
834 | int | 834 | int |
835 | xlog_cil_init( | 835 | xlog_cil_init( |
836 | struct log *log) | 836 | struct xlog *log) |
837 | { | 837 | { |
838 | struct xfs_cil *cil; | 838 | struct xfs_cil *cil; |
839 | struct xfs_cil_ctx *ctx; | 839 | struct xfs_cil_ctx *ctx; |
@@ -869,7 +869,7 @@ xlog_cil_init( | |||
869 | 869 | ||
870 | void | 870 | void |
871 | xlog_cil_destroy( | 871 | xlog_cil_destroy( |
872 | struct log *log) | 872 | struct xlog *log) |
873 | { | 873 | { |
874 | if (log->l_cilp->xc_ctx) { | 874 | if (log->l_cilp->xc_ctx) { |
875 | if (log->l_cilp->xc_ctx->ticket) | 875 | if (log->l_cilp->xc_ctx->ticket) |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 5bc33261f5be..72eba2201b14 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define __XFS_LOG_PRIV_H__ | 19 | #define __XFS_LOG_PRIV_H__ |
20 | 20 | ||
21 | struct xfs_buf; | 21 | struct xfs_buf; |
22 | struct log; | 22 | struct xlog; |
23 | struct xlog_ticket; | 23 | struct xlog_ticket; |
24 | struct xfs_mount; | 24 | struct xfs_mount; |
25 | 25 | ||
@@ -352,7 +352,7 @@ typedef struct xlog_in_core { | |||
352 | struct xlog_in_core *ic_next; | 352 | struct xlog_in_core *ic_next; |
353 | struct xlog_in_core *ic_prev; | 353 | struct xlog_in_core *ic_prev; |
354 | struct xfs_buf *ic_bp; | 354 | struct xfs_buf *ic_bp; |
355 | struct log *ic_log; | 355 | struct xlog *ic_log; |
356 | int ic_size; | 356 | int ic_size; |
357 | int ic_offset; | 357 | int ic_offset; |
358 | int ic_bwritecnt; | 358 | int ic_bwritecnt; |
@@ -409,7 +409,7 @@ struct xfs_cil_ctx { | |||
409 | * operations almost as efficient as the old logging methods. | 409 | * operations almost as efficient as the old logging methods. |
410 | */ | 410 | */ |
411 | struct xfs_cil { | 411 | struct xfs_cil { |
412 | struct log *xc_log; | 412 | struct xlog *xc_log; |
413 | struct list_head xc_cil; | 413 | struct list_head xc_cil; |
414 | spinlock_t xc_cil_lock; | 414 | spinlock_t xc_cil_lock; |
415 | struct xfs_cil_ctx *xc_ctx; | 415 | struct xfs_cil_ctx *xc_ctx; |
@@ -487,7 +487,7 @@ struct xlog_grant_head { | |||
487 | * overflow 31 bits worth of byte offset, so using a byte number will mean | 487 | * overflow 31 bits worth of byte offset, so using a byte number will mean |
488 | * that round off problems won't occur when releasing partial reservations. | 488 | * that round off problems won't occur when releasing partial reservations. |
489 | */ | 489 | */ |
490 | typedef struct log { | 490 | typedef struct xlog { |
491 | /* The following fields don't need locking */ | 491 | /* The following fields don't need locking */ |
492 | struct xfs_mount *l_mp; /* mount point */ | 492 | struct xfs_mount *l_mp; /* mount point */ |
493 | struct xfs_ail *l_ailp; /* AIL log is working with */ | 493 | struct xfs_ail *l_ailp; /* AIL log is working with */ |
@@ -553,9 +553,14 @@ extern int xlog_recover_finish(xlog_t *log); | |||
553 | extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); | 553 | extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); |
554 | 554 | ||
555 | extern kmem_zone_t *xfs_log_ticket_zone; | 555 | extern kmem_zone_t *xfs_log_ticket_zone; |
556 | struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes, | 556 | struct xlog_ticket * |
557 | int count, char client, bool permanent, | 557 | xlog_ticket_alloc( |
558 | xfs_km_flags_t alloc_flags); | 558 | struct xlog *log, |
559 | int unit_bytes, | ||
560 | int count, | ||
561 | char client, | ||
562 | bool permanent, | ||
563 | xfs_km_flags_t alloc_flags); | ||
559 | 564 | ||
560 | 565 | ||
561 | static inline void | 566 | static inline void |
@@ -567,9 +572,14 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes) | |||
567 | } | 572 | } |
568 | 573 | ||
569 | void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); | 574 | void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); |
570 | int xlog_write(struct log *log, struct xfs_log_vec *log_vector, | 575 | int |
571 | struct xlog_ticket *tic, xfs_lsn_t *start_lsn, | 576 | xlog_write( |
572 | xlog_in_core_t **commit_iclog, uint flags); | 577 | struct xlog *log, |
578 | struct xfs_log_vec *log_vector, | ||
579 | struct xlog_ticket *tic, | ||
580 | xfs_lsn_t *start_lsn, | ||
581 | struct xlog_in_core **commit_iclog, | ||
582 | uint flags); | ||
573 | 583 | ||
574 | /* | 584 | /* |
575 | * When we crack an atomic LSN, we sample it first so that the value will not | 585 | * When we crack an atomic LSN, we sample it first so that the value will not |
@@ -629,17 +639,23 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space) | |||
629 | /* | 639 | /* |
630 | * Committed Item List interfaces | 640 | * Committed Item List interfaces |
631 | */ | 641 | */ |
632 | int xlog_cil_init(struct log *log); | 642 | int |
633 | void xlog_cil_init_post_recovery(struct log *log); | 643 | xlog_cil_init(struct xlog *log); |
634 | void xlog_cil_destroy(struct log *log); | 644 | void |
645 | xlog_cil_init_post_recovery(struct xlog *log); | ||
646 | void | ||
647 | xlog_cil_destroy(struct xlog *log); | ||
635 | 648 | ||
636 | /* | 649 | /* |
637 | * CIL force routines | 650 | * CIL force routines |
638 | */ | 651 | */ |
639 | xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence); | 652 | xfs_lsn_t |
653 | xlog_cil_force_lsn( | ||
654 | struct xlog *log, | ||
655 | xfs_lsn_t sequence); | ||
640 | 656 | ||
641 | static inline void | 657 | static inline void |
642 | xlog_cil_force(struct log *log) | 658 | xlog_cil_force(struct xlog *log) |
643 | { | 659 | { |
644 | xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); | 660 | xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); |
645 | } | 661 | } |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index ca386909131a..a7be98abd6a9 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -1471,8 +1471,8 @@ xlog_recover_add_item( | |||
1471 | 1471 | ||
1472 | STATIC int | 1472 | STATIC int |
1473 | xlog_recover_add_to_cont_trans( | 1473 | xlog_recover_add_to_cont_trans( |
1474 | struct log *log, | 1474 | struct xlog *log, |
1475 | xlog_recover_t *trans, | 1475 | struct xlog_recover *trans, |
1476 | xfs_caddr_t dp, | 1476 | xfs_caddr_t dp, |
1477 | int len) | 1477 | int len) |
1478 | { | 1478 | { |
@@ -1517,8 +1517,8 @@ xlog_recover_add_to_cont_trans( | |||
1517 | */ | 1517 | */ |
1518 | STATIC int | 1518 | STATIC int |
1519 | xlog_recover_add_to_trans( | 1519 | xlog_recover_add_to_trans( |
1520 | struct log *log, | 1520 | struct xlog *log, |
1521 | xlog_recover_t *trans, | 1521 | struct xlog_recover *trans, |
1522 | xfs_caddr_t dp, | 1522 | xfs_caddr_t dp, |
1523 | int len) | 1523 | int len) |
1524 | { | 1524 | { |
@@ -1588,8 +1588,8 @@ xlog_recover_add_to_trans( | |||
1588 | */ | 1588 | */ |
1589 | STATIC int | 1589 | STATIC int |
1590 | xlog_recover_reorder_trans( | 1590 | xlog_recover_reorder_trans( |
1591 | struct log *log, | 1591 | struct xlog *log, |
1592 | xlog_recover_t *trans, | 1592 | struct xlog_recover *trans, |
1593 | int pass) | 1593 | int pass) |
1594 | { | 1594 | { |
1595 | xlog_recover_item_t *item, *n; | 1595 | xlog_recover_item_t *item, *n; |
@@ -1642,8 +1642,8 @@ xlog_recover_reorder_trans( | |||
1642 | */ | 1642 | */ |
1643 | STATIC int | 1643 | STATIC int |
1644 | xlog_recover_buffer_pass1( | 1644 | xlog_recover_buffer_pass1( |
1645 | struct log *log, | 1645 | struct xlog *log, |
1646 | xlog_recover_item_t *item) | 1646 | struct xlog_recover_item *item) |
1647 | { | 1647 | { |
1648 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; | 1648 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; |
1649 | struct list_head *bucket; | 1649 | struct list_head *bucket; |
@@ -1696,7 +1696,7 @@ xlog_recover_buffer_pass1( | |||
1696 | */ | 1696 | */ |
1697 | STATIC int | 1697 | STATIC int |
1698 | xlog_check_buffer_cancelled( | 1698 | xlog_check_buffer_cancelled( |
1699 | struct log *log, | 1699 | struct xlog *log, |
1700 | xfs_daddr_t blkno, | 1700 | xfs_daddr_t blkno, |
1701 | uint len, | 1701 | uint len, |
1702 | ushort flags) | 1702 | ushort flags) |
@@ -2689,9 +2689,9 @@ xlog_recover_free_trans( | |||
2689 | 2689 | ||
2690 | STATIC int | 2690 | STATIC int |
2691 | xlog_recover_commit_pass1( | 2691 | xlog_recover_commit_pass1( |
2692 | struct log *log, | 2692 | struct xlog *log, |
2693 | struct xlog_recover *trans, | 2693 | struct xlog_recover *trans, |
2694 | xlog_recover_item_t *item) | 2694 | struct xlog_recover_item *item) |
2695 | { | 2695 | { |
2696 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); | 2696 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); |
2697 | 2697 | ||
@@ -2716,10 +2716,10 @@ xlog_recover_commit_pass1( | |||
2716 | 2716 | ||
2717 | STATIC int | 2717 | STATIC int |
2718 | xlog_recover_commit_pass2( | 2718 | xlog_recover_commit_pass2( |
2719 | struct log *log, | 2719 | struct xlog *log, |
2720 | struct xlog_recover *trans, | 2720 | struct xlog_recover *trans, |
2721 | struct list_head *buffer_list, | 2721 | struct list_head *buffer_list, |
2722 | xlog_recover_item_t *item) | 2722 | struct xlog_recover_item *item) |
2723 | { | 2723 | { |
2724 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); | 2724 | trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); |
2725 | 2725 | ||
@@ -2753,7 +2753,7 @@ xlog_recover_commit_pass2( | |||
2753 | */ | 2753 | */ |
2754 | STATIC int | 2754 | STATIC int |
2755 | xlog_recover_commit_trans( | 2755 | xlog_recover_commit_trans( |
2756 | struct log *log, | 2756 | struct xlog *log, |
2757 | struct xlog_recover *trans, | 2757 | struct xlog_recover *trans, |
2758 | int pass) | 2758 | int pass) |
2759 | { | 2759 | { |
@@ -2793,8 +2793,8 @@ out: | |||
2793 | 2793 | ||
2794 | STATIC int | 2794 | STATIC int |
2795 | xlog_recover_unmount_trans( | 2795 | xlog_recover_unmount_trans( |
2796 | struct log *log, | 2796 | struct xlog *log, |
2797 | xlog_recover_t *trans) | 2797 | struct xlog_recover *trans) |
2798 | { | 2798 | { |
2799 | /* Do nothing now */ | 2799 | /* Do nothing now */ |
2800 | xfs_warn(log->l_mp, "%s: Unmount LR", __func__); | 2800 | xfs_warn(log->l_mp, "%s: Unmount LR", __func__); |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 8b89c5ac72d9..90c1fc9eaea4 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -53,7 +53,7 @@ typedef struct xfs_trans_reservations { | |||
53 | 53 | ||
54 | #include "xfs_sync.h" | 54 | #include "xfs_sync.h" |
55 | 55 | ||
56 | struct log; | 56 | struct xlog; |
57 | struct xfs_mount_args; | 57 | struct xfs_mount_args; |
58 | struct xfs_inode; | 58 | struct xfs_inode; |
59 | struct xfs_bmbt_irec; | 59 | struct xfs_bmbt_irec; |
@@ -133,7 +133,7 @@ typedef struct xfs_mount { | |||
133 | uint m_readio_blocks; /* min read size blocks */ | 133 | uint m_readio_blocks; /* min read size blocks */ |
134 | uint m_writeio_log; /* min write size log bytes */ | 134 | uint m_writeio_log; /* min write size log bytes */ |
135 | uint m_writeio_blocks; /* min write size blocks */ | 135 | uint m_writeio_blocks; /* min write size blocks */ |
136 | struct log *m_log; /* log specific stuff */ | 136 | struct xlog *m_log; /* log specific stuff */ |
137 | int m_logbufs; /* number of log buffers */ | 137 | int m_logbufs; /* number of log buffers */ |
138 | int m_logbsize; /* size of each log buffer */ | 138 | int m_logbsize; /* size of each log buffer */ |
139 | uint m_rsumlevels; /* rt summary levels */ | 139 | uint m_rsumlevels; /* rt summary levels */ |
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index c9d3409c5ca3..1e9ee064dbb2 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c | |||
@@ -386,23 +386,23 @@ xfs_sync_worker( | |||
386 | * We shouldn't write/force the log if we are in the mount/unmount | 386 | * We shouldn't write/force the log if we are in the mount/unmount |
387 | * process or on a read only filesystem. The workqueue still needs to be | 387 | * process or on a read only filesystem. The workqueue still needs to be |
388 | * active in both cases, however, because it is used for inode reclaim | 388 | * active in both cases, however, because it is used for inode reclaim |
389 | * during these times. Use the s_umount semaphore to provide exclusion | 389 | * during these times. Use the MS_ACTIVE flag to avoid doing anything |
390 | * with unmount. | 390 | * during mount. Doing work during unmount is avoided by calling |
391 | * cancel_delayed_work_sync on this work queue before tearing down | ||
392 | * the ail and the log in xfs_log_unmount. | ||
391 | */ | 393 | */ |
392 | if (down_read_trylock(&mp->m_super->s_umount)) { | 394 | if (!(mp->m_super->s_flags & MS_ACTIVE) && |
393 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { | 395 | !(mp->m_flags & XFS_MOUNT_RDONLY)) { |
394 | /* dgc: errors ignored here */ | 396 | /* dgc: errors ignored here */ |
395 | if (mp->m_super->s_frozen == SB_UNFROZEN && | 397 | if (mp->m_super->s_frozen == SB_UNFROZEN && |
396 | xfs_log_need_covered(mp)) | 398 | xfs_log_need_covered(mp)) |
397 | error = xfs_fs_log_dummy(mp); | 399 | error = xfs_fs_log_dummy(mp); |
398 | else | 400 | else |
399 | xfs_log_force(mp, 0); | 401 | xfs_log_force(mp, 0); |
400 | 402 | ||
401 | /* start pushing all the metadata that is currently | 403 | /* start pushing all the metadata that is currently |
402 | * dirty */ | 404 | * dirty */ |
403 | xfs_ail_push_all(mp->m_ail); | 405 | xfs_ail_push_all(mp->m_ail); |
404 | } | ||
405 | up_read(&mp->m_super->s_umount); | ||
406 | } | 406 | } |
407 | 407 | ||
408 | /* queue us up again */ | 408 | /* queue us up again */ |
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 7cf9d3529e51..caf5dabfd553 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
@@ -32,7 +32,7 @@ struct xfs_da_node_entry; | |||
32 | struct xfs_dquot; | 32 | struct xfs_dquot; |
33 | struct xfs_log_item; | 33 | struct xfs_log_item; |
34 | struct xlog_ticket; | 34 | struct xlog_ticket; |
35 | struct log; | 35 | struct xlog; |
36 | struct xlog_recover; | 36 | struct xlog_recover; |
37 | struct xlog_recover_item; | 37 | struct xlog_recover_item; |
38 | struct xfs_buf_log_format; | 38 | struct xfs_buf_log_format; |
@@ -762,7 +762,7 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_force); | |||
762 | DEFINE_DQUOT_EVENT(xfs_dqflush_done); | 762 | DEFINE_DQUOT_EVENT(xfs_dqflush_done); |
763 | 763 | ||
764 | DECLARE_EVENT_CLASS(xfs_loggrant_class, | 764 | DECLARE_EVENT_CLASS(xfs_loggrant_class, |
765 | TP_PROTO(struct log *log, struct xlog_ticket *tic), | 765 | TP_PROTO(struct xlog *log, struct xlog_ticket *tic), |
766 | TP_ARGS(log, tic), | 766 | TP_ARGS(log, tic), |
767 | TP_STRUCT__entry( | 767 | TP_STRUCT__entry( |
768 | __field(dev_t, dev) | 768 | __field(dev_t, dev) |
@@ -830,7 +830,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, | |||
830 | 830 | ||
831 | #define DEFINE_LOGGRANT_EVENT(name) \ | 831 | #define DEFINE_LOGGRANT_EVENT(name) \ |
832 | DEFINE_EVENT(xfs_loggrant_class, name, \ | 832 | DEFINE_EVENT(xfs_loggrant_class, name, \ |
833 | TP_PROTO(struct log *log, struct xlog_ticket *tic), \ | 833 | TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \ |
834 | TP_ARGS(log, tic)) | 834 | TP_ARGS(log, tic)) |
835 | DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); | 835 | DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); |
836 | DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); | 836 | DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); |
@@ -1664,7 +1664,7 @@ DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before); | |||
1664 | DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); | 1664 | DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); |
1665 | 1665 | ||
1666 | DECLARE_EVENT_CLASS(xfs_log_recover_item_class, | 1666 | DECLARE_EVENT_CLASS(xfs_log_recover_item_class, |
1667 | TP_PROTO(struct log *log, struct xlog_recover *trans, | 1667 | TP_PROTO(struct xlog *log, struct xlog_recover *trans, |
1668 | struct xlog_recover_item *item, int pass), | 1668 | struct xlog_recover_item *item, int pass), |
1669 | TP_ARGS(log, trans, item, pass), | 1669 | TP_ARGS(log, trans, item, pass), |
1670 | TP_STRUCT__entry( | 1670 | TP_STRUCT__entry( |
@@ -1698,7 +1698,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class, | |||
1698 | 1698 | ||
1699 | #define DEFINE_LOG_RECOVER_ITEM(name) \ | 1699 | #define DEFINE_LOG_RECOVER_ITEM(name) \ |
1700 | DEFINE_EVENT(xfs_log_recover_item_class, name, \ | 1700 | DEFINE_EVENT(xfs_log_recover_item_class, name, \ |
1701 | TP_PROTO(struct log *log, struct xlog_recover *trans, \ | 1701 | TP_PROTO(struct xlog *log, struct xlog_recover *trans, \ |
1702 | struct xlog_recover_item *item, int pass), \ | 1702 | struct xlog_recover_item *item, int pass), \ |
1703 | TP_ARGS(log, trans, item, pass)) | 1703 | TP_ARGS(log, trans, item, pass)) |
1704 | 1704 | ||
@@ -1709,7 +1709,7 @@ DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail); | |||
1709 | DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); | 1709 | DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); |
1710 | 1710 | ||
1711 | DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, | 1711 | DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, |
1712 | TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), | 1712 | TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), |
1713 | TP_ARGS(log, buf_f), | 1713 | TP_ARGS(log, buf_f), |
1714 | TP_STRUCT__entry( | 1714 | TP_STRUCT__entry( |
1715 | __field(dev_t, dev) | 1715 | __field(dev_t, dev) |
@@ -1739,7 +1739,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, | |||
1739 | 1739 | ||
1740 | #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ | 1740 | #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ |
1741 | DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ | 1741 | DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ |
1742 | TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \ | 1742 | TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \ |
1743 | TP_ARGS(log, buf_f)) | 1743 | TP_ARGS(log, buf_f)) |
1744 | 1744 | ||
1745 | DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); | 1745 | DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); |
@@ -1752,7 +1752,7 @@ DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf); | |||
1752 | DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); | 1752 | DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); |
1753 | 1753 | ||
1754 | DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, | 1754 | DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, |
1755 | TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), | 1755 | TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), |
1756 | TP_ARGS(log, in_f), | 1756 | TP_ARGS(log, in_f), |
1757 | TP_STRUCT__entry( | 1757 | TP_STRUCT__entry( |
1758 | __field(dev_t, dev) | 1758 | __field(dev_t, dev) |
@@ -1790,7 +1790,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, | |||
1790 | ) | 1790 | ) |
1791 | #define DEFINE_LOG_RECOVER_INO_ITEM(name) \ | 1791 | #define DEFINE_LOG_RECOVER_INO_ITEM(name) \ |
1792 | DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ | 1792 | DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ |
1793 | TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \ | 1793 | TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \ |
1794 | TP_ARGS(log, in_f)) | 1794 | TP_ARGS(log, in_f)) |
1795 | 1795 | ||
1796 | DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); | 1796 | DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); |