diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/aio.c | 113 | ||||
| -rw-r--r-- | fs/ceph/addr.c | 8 | ||||
| -rw-r--r-- | fs/ceph/inode.c | 136 | ||||
| -rw-r--r-- | fs/pstore/platform.c | 7 | ||||
| -rw-r--r-- | fs/sysfs/file.c | 8 | ||||
| -rw-r--r-- | fs/xfs/xfs_bmap.c | 32 | ||||
| -rw-r--r-- | fs/xfs/xfs_bmap_util.c | 14 | ||||
| -rw-r--r-- | fs/xfs/xfs_buf.c | 37 | ||||
| -rw-r--r-- | fs/xfs/xfs_buf.h | 11 | ||||
| -rw-r--r-- | fs/xfs/xfs_buf_item.c | 21 | ||||
| -rw-r--r-- | fs/xfs/xfs_dir2_node.c | 26 | ||||
| -rw-r--r-- | fs/xfs/xfs_iops.c | 3 | ||||
| -rw-r--r-- | fs/xfs/xfs_log_recover.c | 13 | ||||
| -rw-r--r-- | fs/xfs/xfs_qm.c | 80 | ||||
| -rw-r--r-- | fs/xfs/xfs_trans_buf.c | 13 |
15 files changed, 308 insertions, 214 deletions
| @@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx) | |||
| 244 | int i; | 244 | int i; |
| 245 | 245 | ||
| 246 | for (i = 0; i < ctx->nr_pages; i++) { | 246 | for (i = 0; i < ctx->nr_pages; i++) { |
| 247 | struct page *page; | ||
| 247 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, | 248 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
| 248 | page_count(ctx->ring_pages[i])); | 249 | page_count(ctx->ring_pages[i])); |
| 249 | put_page(ctx->ring_pages[i]); | 250 | page = ctx->ring_pages[i]; |
| 251 | if (!page) | ||
| 252 | continue; | ||
| 253 | ctx->ring_pages[i] = NULL; | ||
| 254 | put_page(page); | ||
| 250 | } | 255 | } |
| 251 | 256 | ||
| 252 | put_aio_ring_file(ctx); | 257 | put_aio_ring_file(ctx); |
| @@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, | |||
| 280 | unsigned long flags; | 285 | unsigned long flags; |
| 281 | int rc; | 286 | int rc; |
| 282 | 287 | ||
| 288 | rc = 0; | ||
| 289 | |||
| 290 | /* Make sure the old page hasn't already been changed */ | ||
| 291 | spin_lock(&mapping->private_lock); | ||
| 292 | ctx = mapping->private_data; | ||
| 293 | if (ctx) { | ||
| 294 | pgoff_t idx; | ||
| 295 | spin_lock_irqsave(&ctx->completion_lock, flags); | ||
| 296 | idx = old->index; | ||
| 297 | if (idx < (pgoff_t)ctx->nr_pages) { | ||
| 298 | if (ctx->ring_pages[idx] != old) | ||
| 299 | rc = -EAGAIN; | ||
| 300 | } else | ||
| 301 | rc = -EINVAL; | ||
| 302 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | ||
| 303 | } else | ||
| 304 | rc = -EINVAL; | ||
| 305 | spin_unlock(&mapping->private_lock); | ||
| 306 | |||
| 307 | if (rc != 0) | ||
| 308 | return rc; | ||
| 309 | |||
| 283 | /* Writeback must be complete */ | 310 | /* Writeback must be complete */ |
| 284 | BUG_ON(PageWriteback(old)); | 311 | BUG_ON(PageWriteback(old)); |
| 285 | put_page(old); | 312 | get_page(new); |
| 286 | 313 | ||
| 287 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); | 314 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); |
| 288 | if (rc != MIGRATEPAGE_SUCCESS) { | 315 | if (rc != MIGRATEPAGE_SUCCESS) { |
| 289 | get_page(old); | 316 | put_page(new); |
| 290 | return rc; | 317 | return rc; |
| 291 | } | 318 | } |
| 292 | 319 | ||
| 293 | get_page(new); | ||
| 294 | |||
| 295 | /* We can potentially race against kioctx teardown here. Use the | 320 | /* We can potentially race against kioctx teardown here. Use the |
| 296 | * address_space's private data lock to protect the mapping's | 321 | * address_space's private data lock to protect the mapping's |
| 297 | * private_data. | 322 | * private_data. |
| @@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, | |||
| 303 | spin_lock_irqsave(&ctx->completion_lock, flags); | 328 | spin_lock_irqsave(&ctx->completion_lock, flags); |
| 304 | migrate_page_copy(new, old); | 329 | migrate_page_copy(new, old); |
| 305 | idx = old->index; | 330 | idx = old->index; |
| 306 | if (idx < (pgoff_t)ctx->nr_pages) | 331 | if (idx < (pgoff_t)ctx->nr_pages) { |
| 307 | ctx->ring_pages[idx] = new; | 332 | /* And only do the move if things haven't changed */ |
| 333 | if (ctx->ring_pages[idx] == old) | ||
| 334 | ctx->ring_pages[idx] = new; | ||
| 335 | else | ||
| 336 | rc = -EAGAIN; | ||
| 337 | } else | ||
| 338 | rc = -EINVAL; | ||
| 308 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | 339 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 309 | } else | 340 | } else |
| 310 | rc = -EBUSY; | 341 | rc = -EBUSY; |
| 311 | spin_unlock(&mapping->private_lock); | 342 | spin_unlock(&mapping->private_lock); |
| 312 | 343 | ||
| 344 | if (rc == MIGRATEPAGE_SUCCESS) | ||
| 345 | put_page(old); | ||
| 346 | else | ||
| 347 | put_page(new); | ||
| 348 | |||
| 313 | return rc; | 349 | return rc; |
| 314 | } | 350 | } |
| 315 | #endif | 351 | #endif |
| @@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 326 | struct aio_ring *ring; | 362 | struct aio_ring *ring; |
| 327 | unsigned nr_events = ctx->max_reqs; | 363 | unsigned nr_events = ctx->max_reqs; |
| 328 | struct mm_struct *mm = current->mm; | 364 | struct mm_struct *mm = current->mm; |
| 329 | unsigned long size, populate; | 365 | unsigned long size, unused; |
| 330 | int nr_pages; | 366 | int nr_pages; |
| 331 | int i; | 367 | int i; |
| 332 | struct file *file; | 368 | struct file *file; |
| @@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 347 | return -EAGAIN; | 383 | return -EAGAIN; |
| 348 | } | 384 | } |
| 349 | 385 | ||
| 386 | ctx->aio_ring_file = file; | ||
| 387 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | ||
| 388 | / sizeof(struct io_event); | ||
| 389 | |||
| 390 | ctx->ring_pages = ctx->internal_pages; | ||
| 391 | if (nr_pages > AIO_RING_PAGES) { | ||
| 392 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | ||
| 393 | GFP_KERNEL); | ||
| 394 | if (!ctx->ring_pages) { | ||
| 395 | put_aio_ring_file(ctx); | ||
| 396 | return -ENOMEM; | ||
| 397 | } | ||
| 398 | } | ||
| 399 | |||
| 350 | for (i = 0; i < nr_pages; i++) { | 400 | for (i = 0; i < nr_pages; i++) { |
| 351 | struct page *page; | 401 | struct page *page; |
| 352 | page = find_or_create_page(file->f_inode->i_mapping, | 402 | page = find_or_create_page(file->f_inode->i_mapping, |
| @@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 358 | SetPageUptodate(page); | 408 | SetPageUptodate(page); |
| 359 | SetPageDirty(page); | 409 | SetPageDirty(page); |
| 360 | unlock_page(page); | 410 | unlock_page(page); |
| 411 | |||
| 412 | ctx->ring_pages[i] = page; | ||
| 361 | } | 413 | } |
| 362 | ctx->aio_ring_file = file; | 414 | ctx->nr_pages = i; |
| 363 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | ||
| 364 | / sizeof(struct io_event); | ||
| 365 | 415 | ||
| 366 | ctx->ring_pages = ctx->internal_pages; | 416 | if (unlikely(i != nr_pages)) { |
| 367 | if (nr_pages > AIO_RING_PAGES) { | 417 | aio_free_ring(ctx); |
| 368 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | 418 | return -EAGAIN; |
| 369 | GFP_KERNEL); | ||
| 370 | if (!ctx->ring_pages) { | ||
| 371 | put_aio_ring_file(ctx); | ||
| 372 | return -ENOMEM; | ||
| 373 | } | ||
| 374 | } | 419 | } |
| 375 | 420 | ||
| 376 | ctx->mmap_size = nr_pages * PAGE_SIZE; | 421 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
| @@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 379 | down_write(&mm->mmap_sem); | 424 | down_write(&mm->mmap_sem); |
| 380 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, | 425 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
| 381 | PROT_READ | PROT_WRITE, | 426 | PROT_READ | PROT_WRITE, |
| 382 | MAP_SHARED | MAP_POPULATE, 0, &populate); | 427 | MAP_SHARED, 0, &unused); |
| 428 | up_write(&mm->mmap_sem); | ||
| 383 | if (IS_ERR((void *)ctx->mmap_base)) { | 429 | if (IS_ERR((void *)ctx->mmap_base)) { |
| 384 | up_write(&mm->mmap_sem); | ||
| 385 | ctx->mmap_size = 0; | 430 | ctx->mmap_size = 0; |
| 386 | aio_free_ring(ctx); | 431 | aio_free_ring(ctx); |
| 387 | return -EAGAIN; | 432 | return -EAGAIN; |
| @@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
| 389 | 434 | ||
| 390 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); | 435 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
| 391 | 436 | ||
| 392 | /* We must do this while still holding mmap_sem for write, as we | ||
| 393 | * need to be protected against userspace attempting to mremap() | ||
| 394 | * or munmap() the ring buffer. | ||
| 395 | */ | ||
| 396 | ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, | ||
| 397 | 1, 0, ctx->ring_pages, NULL); | ||
| 398 | |||
| 399 | /* Dropping the reference here is safe as the page cache will hold | ||
| 400 | * onto the pages for us. It is also required so that page migration | ||
| 401 | * can unmap the pages and get the right reference count. | ||
| 402 | */ | ||
| 403 | for (i = 0; i < ctx->nr_pages; i++) | ||
| 404 | put_page(ctx->ring_pages[i]); | ||
| 405 | |||
| 406 | up_write(&mm->mmap_sem); | ||
| 407 | |||
| 408 | if (unlikely(ctx->nr_pages != nr_pages)) { | ||
| 409 | aio_free_ring(ctx); | ||
| 410 | return -EAGAIN; | ||
| 411 | } | ||
| 412 | |||
| 413 | ctx->user_id = ctx->mmap_base; | 437 | ctx->user_id = ctx->mmap_base; |
| 414 | ctx->nr_events = nr_events; /* trusted copy */ | 438 | ctx->nr_events = nr_events; /* trusted copy */ |
| 415 | 439 | ||
| @@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
| 652 | aio_nr += ctx->max_reqs; | 676 | aio_nr += ctx->max_reqs; |
| 653 | spin_unlock(&aio_nr_lock); | 677 | spin_unlock(&aio_nr_lock); |
| 654 | 678 | ||
| 655 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ | 679 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
| 680 | percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ | ||
| 656 | 681 | ||
| 657 | err = ioctx_add_table(ctx, mm); | 682 | err = ioctx_add_table(ctx, mm); |
| 658 | if (err) | 683 | if (err) |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 1e561c059539..ec3ba43b9faa 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
| @@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page) | |||
| 210 | if (err < 0) { | 210 | if (err < 0) { |
| 211 | SetPageError(page); | 211 | SetPageError(page); |
| 212 | goto out; | 212 | goto out; |
| 213 | } else if (err < PAGE_CACHE_SIZE) { | 213 | } else { |
| 214 | if (err < PAGE_CACHE_SIZE) { | ||
| 214 | /* zero fill remainder of page */ | 215 | /* zero fill remainder of page */ |
| 215 | zero_user_segment(page, err, PAGE_CACHE_SIZE); | 216 | zero_user_segment(page, err, PAGE_CACHE_SIZE); |
| 217 | } else { | ||
| 218 | flush_dcache_page(page); | ||
| 219 | } | ||
| 216 | } | 220 | } |
| 217 | SetPageUptodate(page); | 221 | SetPageUptodate(page); |
| 218 | 222 | ||
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9a8e396aed89..278fd2891288 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 978 | struct ceph_mds_reply_inode *ininfo; | 978 | struct ceph_mds_reply_inode *ininfo; |
| 979 | struct ceph_vino vino; | 979 | struct ceph_vino vino; |
| 980 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); | 980 | struct ceph_fs_client *fsc = ceph_sb_to_client(sb); |
| 981 | int i = 0; | ||
| 982 | int err = 0; | 981 | int err = 0; |
| 983 | 982 | ||
| 984 | dout("fill_trace %p is_dentry %d is_target %d\n", req, | 983 | dout("fill_trace %p is_dentry %d is_target %d\n", req, |
| @@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 1039 | } | 1038 | } |
| 1040 | } | 1039 | } |
| 1041 | 1040 | ||
| 1041 | if (rinfo->head->is_target) { | ||
| 1042 | vino.ino = le64_to_cpu(rinfo->targeti.in->ino); | ||
| 1043 | vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); | ||
| 1044 | |||
| 1045 | in = ceph_get_inode(sb, vino); | ||
| 1046 | if (IS_ERR(in)) { | ||
| 1047 | err = PTR_ERR(in); | ||
| 1048 | goto done; | ||
| 1049 | } | ||
| 1050 | req->r_target_inode = in; | ||
| 1051 | |||
| 1052 | err = fill_inode(in, &rinfo->targeti, NULL, | ||
| 1053 | session, req->r_request_started, | ||
| 1054 | (le32_to_cpu(rinfo->head->result) == 0) ? | ||
| 1055 | req->r_fmode : -1, | ||
| 1056 | &req->r_caps_reservation); | ||
| 1057 | if (err < 0) { | ||
| 1058 | pr_err("fill_inode badness %p %llx.%llx\n", | ||
| 1059 | in, ceph_vinop(in)); | ||
| 1060 | goto done; | ||
| 1061 | } | ||
| 1062 | } | ||
| 1063 | |||
| 1042 | /* | 1064 | /* |
| 1043 | * ignore null lease/binding on snapdir ENOENT, or else we | 1065 | * ignore null lease/binding on snapdir ENOENT, or else we |
| 1044 | * will have trouble splicing in the virtual snapdir later | 1066 | * will have trouble splicing in the virtual snapdir later |
| @@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 1108 | ceph_dentry(req->r_old_dentry)->offset); | 1130 | ceph_dentry(req->r_old_dentry)->offset); |
| 1109 | 1131 | ||
| 1110 | dn = req->r_old_dentry; /* use old_dentry */ | 1132 | dn = req->r_old_dentry; /* use old_dentry */ |
| 1111 | in = dn->d_inode; | ||
| 1112 | } | 1133 | } |
| 1113 | 1134 | ||
| 1114 | /* null dentry? */ | 1135 | /* null dentry? */ |
| @@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 1130 | } | 1151 | } |
| 1131 | 1152 | ||
| 1132 | /* attach proper inode */ | 1153 | /* attach proper inode */ |
| 1133 | ininfo = rinfo->targeti.in; | 1154 | if (!dn->d_inode) { |
| 1134 | vino.ino = le64_to_cpu(ininfo->ino); | 1155 | ihold(in); |
| 1135 | vino.snap = le64_to_cpu(ininfo->snapid); | ||
| 1136 | in = dn->d_inode; | ||
| 1137 | if (!in) { | ||
| 1138 | in = ceph_get_inode(sb, vino); | ||
| 1139 | if (IS_ERR(in)) { | ||
| 1140 | pr_err("fill_trace bad get_inode " | ||
| 1141 | "%llx.%llx\n", vino.ino, vino.snap); | ||
| 1142 | err = PTR_ERR(in); | ||
| 1143 | d_drop(dn); | ||
| 1144 | goto done; | ||
| 1145 | } | ||
| 1146 | dn = splice_dentry(dn, in, &have_lease, true); | 1156 | dn = splice_dentry(dn, in, &have_lease, true); |
| 1147 | if (IS_ERR(dn)) { | 1157 | if (IS_ERR(dn)) { |
| 1148 | err = PTR_ERR(dn); | 1158 | err = PTR_ERR(dn); |
| 1149 | goto done; | 1159 | goto done; |
| 1150 | } | 1160 | } |
| 1151 | req->r_dentry = dn; /* may have spliced */ | 1161 | req->r_dentry = dn; /* may have spliced */ |
| 1152 | ihold(in); | 1162 | } else if (dn->d_inode && dn->d_inode != in) { |
| 1153 | } else if (ceph_ino(in) == vino.ino && | ||
| 1154 | ceph_snap(in) == vino.snap) { | ||
| 1155 | ihold(in); | ||
| 1156 | } else { | ||
| 1157 | dout(" %p links to %p %llx.%llx, not %llx.%llx\n", | 1163 | dout(" %p links to %p %llx.%llx, not %llx.%llx\n", |
| 1158 | dn, in, ceph_ino(in), ceph_snap(in), | 1164 | dn, dn->d_inode, ceph_vinop(dn->d_inode), |
| 1159 | vino.ino, vino.snap); | 1165 | ceph_vinop(in)); |
| 1160 | have_lease = false; | 1166 | have_lease = false; |
| 1161 | in = NULL; | ||
| 1162 | } | 1167 | } |
| 1163 | 1168 | ||
| 1164 | if (have_lease) | 1169 | if (have_lease) |
| 1165 | update_dentry_lease(dn, rinfo->dlease, session, | 1170 | update_dentry_lease(dn, rinfo->dlease, session, |
| 1166 | req->r_request_started); | 1171 | req->r_request_started); |
| 1167 | dout(" final dn %p\n", dn); | 1172 | dout(" final dn %p\n", dn); |
| 1168 | i++; | 1173 | } else if (!req->r_aborted && |
| 1169 | } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || | 1174 | (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || |
| 1170 | req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { | 1175 | req->r_op == CEPH_MDS_OP_MKSNAP)) { |
| 1171 | struct dentry *dn = req->r_dentry; | 1176 | struct dentry *dn = req->r_dentry; |
| 1172 | 1177 | ||
| 1173 | /* fill out a snapdir LOOKUPSNAP dentry */ | 1178 | /* fill out a snapdir LOOKUPSNAP dentry */ |
| @@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, | |||
| 1177 | ininfo = rinfo->targeti.in; | 1182 | ininfo = rinfo->targeti.in; |
| 1178 | vino.ino = le64_to_cpu(ininfo->ino); | 1183 | vino.ino = le64_to_cpu(ininfo->ino); |
| 1179 | vino.snap = le64_to_cpu(ininfo->snapid); | 1184 | vino.snap = le64_to_cpu(ininfo->snapid); |
| 1180 | in = ceph_get_inode(sb, vino); | ||
| 1181 | if (IS_ERR(in)) { | ||
| 1182 | pr_err("fill_inode get_inode badness %llx.%llx\n", | ||
| 1183 | vino.ino, vino.snap); | ||
| 1184 | err = PTR_ERR(in); | ||
| 1185 | d_delete(dn); | ||
| 1186 | goto done; | ||
| 1187 | } | ||
| 1188 | dout(" linking snapped dir %p to dn %p\n", in, dn); | 1185 | dout(" linking snapped dir %p to dn %p\n", in, dn); |
| 1186 | ihold(in); | ||
| 1189 | dn = splice_dentry(dn, in, NULL, true); | 1187 | dn = splice_dentry(dn, in, NULL, true); |
| 1190 | if (IS_ERR(dn)) { | 1188 | if (IS_ERR(dn)) { |
| 1191 | err = PTR_ERR(dn); | 1189 | err = PTR_ERR(dn); |
| 1192 | goto done; | 1190 | goto done; |
| 1193 | } | 1191 | } |
| 1194 | req->r_dentry = dn; /* may have spliced */ | 1192 | req->r_dentry = dn; /* may have spliced */ |
| 1195 | ihold(in); | ||
| 1196 | rinfo->head->is_dentry = 1; /* fool notrace handlers */ | ||
| 1197 | } | ||
| 1198 | |||
| 1199 | if (rinfo->head->is_target) { | ||
| 1200 | vino.ino = le64_to_cpu(rinfo->targeti.in->ino); | ||
| 1201 | vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); | ||
| 1202 | |||
| 1203 | if (in == NULL || ceph_ino(in) != vino.ino || | ||
| 1204 | ceph_snap(in) != vino.snap) { | ||
| 1205 | in = ceph_get_inode(sb, vino); | ||
| 1206 | if (IS_ERR(in)) { | ||
| 1207 | err = PTR_ERR(in); | ||
| 1208 | goto done; | ||
| 1209 | } | ||
| 1210 | } | ||
| 1211 | req->r_target_inode = in; | ||
| 1212 | |||
| 1213 | err = fill_inode(in, | ||
| 1214 | &rinfo->targeti, NULL, | ||
| 1215 | session, req->r_request_started, | ||
| 1216 | (le32_to_cpu(rinfo->head->result) == 0) ? | ||
| 1217 | req->r_fmode : -1, | ||
| 1218 | &req->r_caps_reservation); | ||
| 1219 | if (err < 0) { | ||
| 1220 | pr_err("fill_inode badness %p %llx.%llx\n", | ||
| 1221 | in, ceph_vinop(in)); | ||
| 1222 | goto done; | ||
| 1223 | } | ||
| 1224 | } | 1193 | } |
| 1225 | |||
| 1226 | done: | 1194 | done: |
| 1227 | dout("fill_trace done err=%d\n", err); | 1195 | dout("fill_trace done err=%d\n", err); |
| 1228 | return err; | 1196 | return err; |
| @@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, | |||
| 1272 | struct qstr dname; | 1240 | struct qstr dname; |
| 1273 | struct dentry *dn; | 1241 | struct dentry *dn; |
| 1274 | struct inode *in; | 1242 | struct inode *in; |
| 1275 | int err = 0, i; | 1243 | int err = 0, ret, i; |
| 1276 | struct inode *snapdir = NULL; | 1244 | struct inode *snapdir = NULL; |
| 1277 | struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; | 1245 | struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; |
| 1278 | struct ceph_dentry_info *di; | 1246 | struct ceph_dentry_info *di; |
| @@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, | |||
| 1305 | ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); | 1273 | ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); |
| 1306 | } | 1274 | } |
| 1307 | 1275 | ||
| 1276 | /* FIXME: release caps/leases if error occurs */ | ||
| 1308 | for (i = 0; i < rinfo->dir_nr; i++) { | 1277 | for (i = 0; i < rinfo->dir_nr; i++) { |
| 1309 | struct ceph_vino vino; | 1278 | struct ceph_vino vino; |
| 1310 | 1279 | ||
| @@ -1329,9 +1298,10 @@ retry_lookup: | |||
| 1329 | err = -ENOMEM; | 1298 | err = -ENOMEM; |
| 1330 | goto out; | 1299 | goto out; |
| 1331 | } | 1300 | } |
| 1332 | err = ceph_init_dentry(dn); | 1301 | ret = ceph_init_dentry(dn); |
| 1333 | if (err < 0) { | 1302 | if (ret < 0) { |
| 1334 | dput(dn); | 1303 | dput(dn); |
| 1304 | err = ret; | ||
| 1335 | goto out; | 1305 | goto out; |
| 1336 | } | 1306 | } |
| 1337 | } else if (dn->d_inode && | 1307 | } else if (dn->d_inode && |
| @@ -1351,9 +1321,6 @@ retry_lookup: | |||
| 1351 | spin_unlock(&parent->d_lock); | 1321 | spin_unlock(&parent->d_lock); |
| 1352 | } | 1322 | } |
| 1353 | 1323 | ||
| 1354 | di = dn->d_fsdata; | ||
| 1355 | di->offset = ceph_make_fpos(frag, i + r_readdir_offset); | ||
| 1356 | |||
| 1357 | /* inode */ | 1324 | /* inode */ |
| 1358 | if (dn->d_inode) { | 1325 | if (dn->d_inode) { |
| 1359 | in = dn->d_inode; | 1326 | in = dn->d_inode; |
| @@ -1366,26 +1333,39 @@ retry_lookup: | |||
| 1366 | err = PTR_ERR(in); | 1333 | err = PTR_ERR(in); |
| 1367 | goto out; | 1334 | goto out; |
| 1368 | } | 1335 | } |
| 1369 | dn = splice_dentry(dn, in, NULL, false); | ||
| 1370 | if (IS_ERR(dn)) | ||
| 1371 | dn = NULL; | ||
| 1372 | } | 1336 | } |
| 1373 | 1337 | ||
| 1374 | if (fill_inode(in, &rinfo->dir_in[i], NULL, session, | 1338 | if (fill_inode(in, &rinfo->dir_in[i], NULL, session, |
| 1375 | req->r_request_started, -1, | 1339 | req->r_request_started, -1, |
| 1376 | &req->r_caps_reservation) < 0) { | 1340 | &req->r_caps_reservation) < 0) { |
| 1377 | pr_err("fill_inode badness on %p\n", in); | 1341 | pr_err("fill_inode badness on %p\n", in); |
| 1342 | if (!dn->d_inode) | ||
| 1343 | iput(in); | ||
| 1344 | d_drop(dn); | ||
| 1378 | goto next_item; | 1345 | goto next_item; |
| 1379 | } | 1346 | } |
| 1380 | if (dn) | 1347 | |
| 1381 | update_dentry_lease(dn, rinfo->dir_dlease[i], | 1348 | if (!dn->d_inode) { |
| 1382 | req->r_session, | 1349 | dn = splice_dentry(dn, in, NULL, false); |
| 1383 | req->r_request_started); | 1350 | if (IS_ERR(dn)) { |
| 1351 | err = PTR_ERR(dn); | ||
| 1352 | dn = NULL; | ||
| 1353 | goto next_item; | ||
| 1354 | } | ||
| 1355 | } | ||
| 1356 | |||
| 1357 | di = dn->d_fsdata; | ||
| 1358 | di->offset = ceph_make_fpos(frag, i + r_readdir_offset); | ||
| 1359 | |||
| 1360 | update_dentry_lease(dn, rinfo->dir_dlease[i], | ||
| 1361 | req->r_session, | ||
| 1362 | req->r_request_started); | ||
| 1384 | next_item: | 1363 | next_item: |
| 1385 | if (dn) | 1364 | if (dn) |
| 1386 | dput(dn); | 1365 | dput(dn); |
| 1387 | } | 1366 | } |
| 1388 | req->r_did_prepopulate = true; | 1367 | if (err == 0) |
| 1368 | req->r_did_prepopulate = true; | ||
| 1389 | 1369 | ||
| 1390 | out: | 1370 | out: |
| 1391 | if (snapdir) { | 1371 | if (snapdir) { |
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index b8e93a40a5d3..78c3c2097787 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c | |||
| @@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi) | |||
| 443 | pstore_get_records(0); | 443 | pstore_get_records(0); |
| 444 | 444 | ||
| 445 | kmsg_dump_register(&pstore_dumper); | 445 | kmsg_dump_register(&pstore_dumper); |
| 446 | pstore_register_console(); | 446 | |
| 447 | pstore_register_ftrace(); | 447 | if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) { |
| 448 | pstore_register_console(); | ||
| 449 | pstore_register_ftrace(); | ||
| 450 | } | ||
| 448 | 451 | ||
| 449 | if (pstore_update_ms >= 0) { | 452 | if (pstore_update_ms >= 0) { |
| 450 | pstore_timer.expires = jiffies + | 453 | pstore_timer.expires = jiffies + |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index b94f93685093..35e7d08fe629 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
| @@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 609 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; | 609 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; |
| 610 | struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; | 610 | struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; |
| 611 | struct sysfs_open_file *of; | 611 | struct sysfs_open_file *of; |
| 612 | bool has_read, has_write, has_mmap; | 612 | bool has_read, has_write; |
| 613 | int error = -EACCES; | 613 | int error = -EACCES; |
| 614 | 614 | ||
| 615 | /* need attr_sd for attr and ops, its parent for kobj */ | 615 | /* need attr_sd for attr and ops, its parent for kobj */ |
| @@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 621 | 621 | ||
| 622 | has_read = battr->read || battr->mmap; | 622 | has_read = battr->read || battr->mmap; |
| 623 | has_write = battr->write || battr->mmap; | 623 | has_write = battr->write || battr->mmap; |
| 624 | has_mmap = battr->mmap; | ||
| 625 | } else { | 624 | } else { |
| 626 | const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); | 625 | const struct sysfs_ops *ops = sysfs_file_ops(attr_sd); |
| 627 | 626 | ||
| @@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 633 | 632 | ||
| 634 | has_read = ops->show; | 633 | has_read = ops->show; |
| 635 | has_write = ops->store; | 634 | has_write = ops->store; |
| 636 | has_mmap = false; | ||
| 637 | } | 635 | } |
| 638 | 636 | ||
| 639 | /* check perms and supported operations */ | 637 | /* check perms and supported operations */ |
| @@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file) | |||
| 661 | * open file has a separate mutex, it's okay as long as those don't | 659 | * open file has a separate mutex, it's okay as long as those don't |
| 662 | * happen on the same file. At this point, we can't easily give | 660 | * happen on the same file. At this point, we can't easily give |
| 663 | * each file a separate locking class. Let's differentiate on | 661 | * each file a separate locking class. Let's differentiate on |
| 664 | * whether the file has mmap or not for now. | 662 | * whether the file is bin or not for now. |
| 665 | */ | 663 | */ |
| 666 | if (has_mmap) | 664 | if (sysfs_is_bin(attr_sd)) |
| 667 | mutex_init(&of->mutex); | 665 | mutex_init(&of->mutex); |
| 668 | else | 666 | else |
| 669 | mutex_init(&of->mutex); | 667 | mutex_init(&of->mutex); |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3ef11b22e750..3b2c14b6f0fb 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
| @@ -1635,7 +1635,7 @@ xfs_bmap_last_extent( | |||
| 1635 | * blocks at the end of the file which do not start at the previous data block, | 1635 | * blocks at the end of the file which do not start at the previous data block, |
| 1636 | * we will try to align the new blocks at stripe unit boundaries. | 1636 | * we will try to align the new blocks at stripe unit boundaries. |
| 1637 | * | 1637 | * |
| 1638 | * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be | 1638 | * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be |
| 1639 | * at, or past the EOF. | 1639 | * at, or past the EOF. |
| 1640 | */ | 1640 | */ |
| 1641 | STATIC int | 1641 | STATIC int |
| @@ -1650,9 +1650,14 @@ xfs_bmap_isaeof( | |||
| 1650 | bma->aeof = 0; | 1650 | bma->aeof = 0; |
| 1651 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, | 1651 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, |
| 1652 | &is_empty); | 1652 | &is_empty); |
| 1653 | if (error || is_empty) | 1653 | if (error) |
| 1654 | return error; | 1654 | return error; |
| 1655 | 1655 | ||
| 1656 | if (is_empty) { | ||
| 1657 | bma->aeof = 1; | ||
| 1658 | return 0; | ||
| 1659 | } | ||
| 1660 | |||
| 1656 | /* | 1661 | /* |
| 1657 | * Check if we are allocation or past the last extent, or at least into | 1662 | * Check if we are allocation or past the last extent, or at least into |
| 1658 | * the last delayed allocated extent. | 1663 | * the last delayed allocated extent. |
| @@ -3643,10 +3648,19 @@ xfs_bmap_btalloc( | |||
| 3643 | int isaligned; | 3648 | int isaligned; |
| 3644 | int tryagain; | 3649 | int tryagain; |
| 3645 | int error; | 3650 | int error; |
| 3651 | int stripe_align; | ||
| 3646 | 3652 | ||
| 3647 | ASSERT(ap->length); | 3653 | ASSERT(ap->length); |
| 3648 | 3654 | ||
| 3649 | mp = ap->ip->i_mount; | 3655 | mp = ap->ip->i_mount; |
| 3656 | |||
| 3657 | /* stripe alignment for allocation is determined by mount parameters */ | ||
| 3658 | stripe_align = 0; | ||
| 3659 | if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC)) | ||
| 3660 | stripe_align = mp->m_swidth; | ||
| 3661 | else if (mp->m_dalign) | ||
| 3662 | stripe_align = mp->m_dalign; | ||
| 3663 | |||
| 3650 | align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; | 3664 | align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; |
| 3651 | if (unlikely(align)) { | 3665 | if (unlikely(align)) { |
| 3652 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, | 3666 | error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, |
| @@ -3655,6 +3669,8 @@ xfs_bmap_btalloc( | |||
| 3655 | ASSERT(!error); | 3669 | ASSERT(!error); |
| 3656 | ASSERT(ap->length); | 3670 | ASSERT(ap->length); |
| 3657 | } | 3671 | } |
| 3672 | |||
| 3673 | |||
| 3658 | nullfb = *ap->firstblock == NULLFSBLOCK; | 3674 | nullfb = *ap->firstblock == NULLFSBLOCK; |
| 3659 | fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); | 3675 | fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock); |
| 3660 | if (nullfb) { | 3676 | if (nullfb) { |
| @@ -3730,7 +3746,7 @@ xfs_bmap_btalloc( | |||
| 3730 | */ | 3746 | */ |
| 3731 | if (!ap->flist->xbf_low && ap->aeof) { | 3747 | if (!ap->flist->xbf_low && ap->aeof) { |
| 3732 | if (!ap->offset) { | 3748 | if (!ap->offset) { |
| 3733 | args.alignment = mp->m_dalign; | 3749 | args.alignment = stripe_align; |
| 3734 | atype = args.type; | 3750 | atype = args.type; |
| 3735 | isaligned = 1; | 3751 | isaligned = 1; |
| 3736 | /* | 3752 | /* |
| @@ -3755,13 +3771,13 @@ xfs_bmap_btalloc( | |||
| 3755 | * of minlen+alignment+slop doesn't go up | 3771 | * of minlen+alignment+slop doesn't go up |
| 3756 | * between the calls. | 3772 | * between the calls. |
| 3757 | */ | 3773 | */ |
| 3758 | if (blen > mp->m_dalign && blen <= args.maxlen) | 3774 | if (blen > stripe_align && blen <= args.maxlen) |
| 3759 | nextminlen = blen - mp->m_dalign; | 3775 | nextminlen = blen - stripe_align; |
| 3760 | else | 3776 | else |
| 3761 | nextminlen = args.minlen; | 3777 | nextminlen = args.minlen; |
| 3762 | if (nextminlen + mp->m_dalign > args.minlen + 1) | 3778 | if (nextminlen + stripe_align > args.minlen + 1) |
| 3763 | args.minalignslop = | 3779 | args.minalignslop = |
| 3764 | nextminlen + mp->m_dalign - | 3780 | nextminlen + stripe_align - |
| 3765 | args.minlen - 1; | 3781 | args.minlen - 1; |
| 3766 | else | 3782 | else |
| 3767 | args.minalignslop = 0; | 3783 | args.minalignslop = 0; |
| @@ -3783,7 +3799,7 @@ xfs_bmap_btalloc( | |||
| 3783 | */ | 3799 | */ |
| 3784 | args.type = atype; | 3800 | args.type = atype; |
| 3785 | args.fsbno = ap->blkno; | 3801 | args.fsbno = ap->blkno; |
| 3786 | args.alignment = mp->m_dalign; | 3802 | args.alignment = stripe_align; |
| 3787 | args.minlen = nextminlen; | 3803 | args.minlen = nextminlen; |
| 3788 | args.minalignslop = 0; | 3804 | args.minalignslop = 0; |
| 3789 | isaligned = 1; | 3805 | isaligned = 1; |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 5887e41c0323..1394106ed22d 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
| @@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes( | |||
| 1187 | XFS_BUF_UNWRITE(bp); | 1187 | XFS_BUF_UNWRITE(bp); |
| 1188 | XFS_BUF_READ(bp); | 1188 | XFS_BUF_READ(bp); |
| 1189 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); | 1189 | XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); |
| 1190 | xfsbdstrat(mp, bp); | 1190 | |
| 1191 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
| 1192 | error = XFS_ERROR(EIO); | ||
| 1193 | break; | ||
| 1194 | } | ||
| 1195 | xfs_buf_iorequest(bp); | ||
| 1191 | error = xfs_buf_iowait(bp); | 1196 | error = xfs_buf_iowait(bp); |
| 1192 | if (error) { | 1197 | if (error) { |
| 1193 | xfs_buf_ioerror_alert(bp, | 1198 | xfs_buf_ioerror_alert(bp, |
| @@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes( | |||
| 1200 | XFS_BUF_UNDONE(bp); | 1205 | XFS_BUF_UNDONE(bp); |
| 1201 | XFS_BUF_UNREAD(bp); | 1206 | XFS_BUF_UNREAD(bp); |
| 1202 | XFS_BUF_WRITE(bp); | 1207 | XFS_BUF_WRITE(bp); |
| 1203 | xfsbdstrat(mp, bp); | 1208 | |
| 1209 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
| 1210 | error = XFS_ERROR(EIO); | ||
| 1211 | break; | ||
| 1212 | } | ||
| 1213 | xfs_buf_iorequest(bp); | ||
| 1204 | error = xfs_buf_iowait(bp); | 1214 | error = xfs_buf_iowait(bp); |
| 1205 | if (error) { | 1215 | if (error) { |
| 1206 | xfs_buf_ioerror_alert(bp, | 1216 | xfs_buf_ioerror_alert(bp, |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index c7f0b77dcb00..afe7645e4b2b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
| @@ -698,7 +698,11 @@ xfs_buf_read_uncached( | |||
| 698 | bp->b_flags |= XBF_READ; | 698 | bp->b_flags |= XBF_READ; |
| 699 | bp->b_ops = ops; | 699 | bp->b_ops = ops; |
| 700 | 700 | ||
| 701 | xfsbdstrat(target->bt_mount, bp); | 701 | if (XFS_FORCED_SHUTDOWN(target->bt_mount)) { |
| 702 | xfs_buf_relse(bp); | ||
| 703 | return NULL; | ||
| 704 | } | ||
| 705 | xfs_buf_iorequest(bp); | ||
| 702 | xfs_buf_iowait(bp); | 706 | xfs_buf_iowait(bp); |
| 703 | return bp; | 707 | return bp; |
| 704 | } | 708 | } |
| @@ -1089,7 +1093,7 @@ xfs_bioerror( | |||
| 1089 | * This is meant for userdata errors; metadata bufs come with | 1093 | * This is meant for userdata errors; metadata bufs come with |
| 1090 | * iodone functions attached, so that we can track down errors. | 1094 | * iodone functions attached, so that we can track down errors. |
| 1091 | */ | 1095 | */ |
| 1092 | STATIC int | 1096 | int |
| 1093 | xfs_bioerror_relse( | 1097 | xfs_bioerror_relse( |
| 1094 | struct xfs_buf *bp) | 1098 | struct xfs_buf *bp) |
| 1095 | { | 1099 | { |
| @@ -1152,7 +1156,7 @@ xfs_bwrite( | |||
| 1152 | ASSERT(xfs_buf_islocked(bp)); | 1156 | ASSERT(xfs_buf_islocked(bp)); |
| 1153 | 1157 | ||
| 1154 | bp->b_flags |= XBF_WRITE; | 1158 | bp->b_flags |= XBF_WRITE; |
| 1155 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); | 1159 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL); |
| 1156 | 1160 | ||
| 1157 | xfs_bdstrat_cb(bp); | 1161 | xfs_bdstrat_cb(bp); |
| 1158 | 1162 | ||
| @@ -1164,25 +1168,6 @@ xfs_bwrite( | |||
| 1164 | return error; | 1168 | return error; |
| 1165 | } | 1169 | } |
| 1166 | 1170 | ||
| 1167 | /* | ||
| 1168 | * Wrapper around bdstrat so that we can stop data from going to disk in case | ||
| 1169 | * we are shutting down the filesystem. Typically user data goes thru this | ||
| 1170 | * path; one of the exceptions is the superblock. | ||
| 1171 | */ | ||
| 1172 | void | ||
| 1173 | xfsbdstrat( | ||
| 1174 | struct xfs_mount *mp, | ||
| 1175 | struct xfs_buf *bp) | ||
| 1176 | { | ||
| 1177 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
| 1178 | trace_xfs_bdstrat_shut(bp, _RET_IP_); | ||
| 1179 | xfs_bioerror_relse(bp); | ||
| 1180 | return; | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | xfs_buf_iorequest(bp); | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | STATIC void | 1171 | STATIC void |
| 1187 | _xfs_buf_ioend( | 1172 | _xfs_buf_ioend( |
| 1188 | xfs_buf_t *bp, | 1173 | xfs_buf_t *bp, |
| @@ -1516,6 +1501,12 @@ xfs_wait_buftarg( | |||
| 1516 | struct xfs_buf *bp; | 1501 | struct xfs_buf *bp; |
| 1517 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); | 1502 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); |
| 1518 | list_del_init(&bp->b_lru); | 1503 | list_del_init(&bp->b_lru); |
| 1504 | if (bp->b_flags & XBF_WRITE_FAIL) { | ||
| 1505 | xfs_alert(btp->bt_mount, | ||
| 1506 | "Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n" | ||
| 1507 | "Please run xfs_repair to determine the extent of the problem.", | ||
| 1508 | (long long)bp->b_bn); | ||
| 1509 | } | ||
| 1519 | xfs_buf_rele(bp); | 1510 | xfs_buf_rele(bp); |
| 1520 | } | 1511 | } |
| 1521 | if (loop++ != 0) | 1512 | if (loop++ != 0) |
| @@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit( | |||
| 1799 | 1790 | ||
| 1800 | blk_start_plug(&plug); | 1791 | blk_start_plug(&plug); |
| 1801 | list_for_each_entry_safe(bp, n, io_list, b_list) { | 1792 | list_for_each_entry_safe(bp, n, io_list, b_list) { |
| 1802 | bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); | 1793 | bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); |
| 1803 | bp->b_flags |= XBF_WRITE; | 1794 | bp->b_flags |= XBF_WRITE; |
| 1804 | 1795 | ||
| 1805 | if (!wait) { | 1796 | if (!wait) { |
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index e65683361017..1cf21a4a9f22 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
| @@ -45,6 +45,7 @@ typedef enum { | |||
| 45 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ | 45 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ |
| 46 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ | 46 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ |
| 47 | #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ | 47 | #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ |
| 48 | #define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */ | ||
| 48 | 49 | ||
| 49 | /* I/O hints for the BIO layer */ | 50 | /* I/O hints for the BIO layer */ |
| 50 | #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ | 51 | #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ |
| @@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t; | |||
| 70 | { XBF_ASYNC, "ASYNC" }, \ | 71 | { XBF_ASYNC, "ASYNC" }, \ |
| 71 | { XBF_DONE, "DONE" }, \ | 72 | { XBF_DONE, "DONE" }, \ |
| 72 | { XBF_STALE, "STALE" }, \ | 73 | { XBF_STALE, "STALE" }, \ |
| 74 | { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ | ||
| 73 | { XBF_SYNCIO, "SYNCIO" }, \ | 75 | { XBF_SYNCIO, "SYNCIO" }, \ |
| 74 | { XBF_FUA, "FUA" }, \ | 76 | { XBF_FUA, "FUA" }, \ |
| 75 | { XBF_FLUSH, "FLUSH" }, \ | 77 | { XBF_FLUSH, "FLUSH" }, \ |
| @@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t; | |||
| 80 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ | 82 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ |
| 81 | { _XBF_COMPOUND, "COMPOUND" } | 83 | { _XBF_COMPOUND, "COMPOUND" } |
| 82 | 84 | ||
| 85 | |||
| 83 | /* | 86 | /* |
| 84 | * Internal state flags. | 87 | * Internal state flags. |
| 85 | */ | 88 | */ |
| @@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *); | |||
| 269 | 272 | ||
| 270 | /* Buffer Read and Write Routines */ | 273 | /* Buffer Read and Write Routines */ |
| 271 | extern int xfs_bwrite(struct xfs_buf *bp); | 274 | extern int xfs_bwrite(struct xfs_buf *bp); |
| 272 | |||
| 273 | extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); | ||
| 274 | |||
| 275 | extern void xfs_buf_ioend(xfs_buf_t *, int); | 275 | extern void xfs_buf_ioend(xfs_buf_t *, int); |
| 276 | extern void xfs_buf_ioerror(xfs_buf_t *, int); | 276 | extern void xfs_buf_ioerror(xfs_buf_t *, int); |
| 277 | extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); | 277 | extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); |
| @@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, | |||
| 282 | #define xfs_buf_zero(bp, off, len) \ | 282 | #define xfs_buf_zero(bp, off, len) \ |
| 283 | xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) | 283 | xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) |
| 284 | 284 | ||
| 285 | extern int xfs_bioerror_relse(struct xfs_buf *); | ||
| 286 | |||
| 285 | static inline int xfs_buf_geterror(xfs_buf_t *bp) | 287 | static inline int xfs_buf_geterror(xfs_buf_t *bp) |
| 286 | { | 288 | { |
| 287 | return bp ? bp->b_error : ENOMEM; | 289 | return bp ? bp->b_error : ENOMEM; |
| @@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void); | |||
| 301 | 303 | ||
| 302 | #define XFS_BUF_ZEROFLAGS(bp) \ | 304 | #define XFS_BUF_ZEROFLAGS(bp) \ |
| 303 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ | 305 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \ |
| 304 | XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) | 306 | XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \ |
| 307 | XBF_WRITE_FAIL)) | ||
| 305 | 308 | ||
| 306 | void xfs_buf_stale(struct xfs_buf *bp); | 309 | void xfs_buf_stale(struct xfs_buf *bp); |
| 307 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) | 310 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a64f67ba25d3..2227b9b050bb 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
| @@ -496,6 +496,14 @@ xfs_buf_item_unpin( | |||
| 496 | } | 496 | } |
| 497 | } | 497 | } |
| 498 | 498 | ||
| 499 | /* | ||
| 500 | * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30 | ||
| 501 | * seconds so as to not spam logs too much on repeated detection of the same | ||
| 502 | * buffer being bad.. | ||
| 503 | */ | ||
| 504 | |||
| 505 | DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10); | ||
| 506 | |||
| 499 | STATIC uint | 507 | STATIC uint |
| 500 | xfs_buf_item_push( | 508 | xfs_buf_item_push( |
| 501 | struct xfs_log_item *lip, | 509 | struct xfs_log_item *lip, |
| @@ -524,6 +532,14 @@ xfs_buf_item_push( | |||
| 524 | 532 | ||
| 525 | trace_xfs_buf_item_push(bip); | 533 | trace_xfs_buf_item_push(bip); |
| 526 | 534 | ||
| 535 | /* has a previous flush failed due to IO errors? */ | ||
| 536 | if ((bp->b_flags & XBF_WRITE_FAIL) && | ||
| 537 | ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { | ||
| 538 | xfs_warn(bp->b_target->bt_mount, | ||
| 539 | "Detected failing async write on buffer block 0x%llx. Retrying async write.\n", | ||
| 540 | (long long)bp->b_bn); | ||
| 541 | } | ||
| 542 | |||
| 527 | if (!xfs_buf_delwri_queue(bp, buffer_list)) | 543 | if (!xfs_buf_delwri_queue(bp, buffer_list)) |
| 528 | rval = XFS_ITEM_FLUSHING; | 544 | rval = XFS_ITEM_FLUSHING; |
| 529 | xfs_buf_unlock(bp); | 545 | xfs_buf_unlock(bp); |
| @@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks( | |||
| 1096 | 1112 | ||
| 1097 | xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ | 1113 | xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ |
| 1098 | 1114 | ||
| 1099 | if (!XFS_BUF_ISSTALE(bp)) { | 1115 | if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) { |
| 1100 | bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; | 1116 | bp->b_flags |= XBF_WRITE | XBF_ASYNC | |
| 1117 | XBF_DONE | XBF_WRITE_FAIL; | ||
| 1101 | xfs_buf_iorequest(bp); | 1118 | xfs_buf_iorequest(bp); |
| 1102 | } else { | 1119 | } else { |
| 1103 | xfs_buf_relse(bp); | 1120 | xfs_buf_relse(bp); |
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 56369d4509d5..48c7d18f68c3 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c | |||
| @@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup( | |||
| 2067 | */ | 2067 | */ |
| 2068 | int /* error */ | 2068 | int /* error */ |
| 2069 | xfs_dir2_node_removename( | 2069 | xfs_dir2_node_removename( |
| 2070 | xfs_da_args_t *args) /* operation arguments */ | 2070 | struct xfs_da_args *args) /* operation arguments */ |
| 2071 | { | 2071 | { |
| 2072 | xfs_da_state_blk_t *blk; /* leaf block */ | 2072 | struct xfs_da_state_blk *blk; /* leaf block */ |
| 2073 | int error; /* error return value */ | 2073 | int error; /* error return value */ |
| 2074 | int rval; /* operation return value */ | 2074 | int rval; /* operation return value */ |
| 2075 | xfs_da_state_t *state; /* btree cursor */ | 2075 | struct xfs_da_state *state; /* btree cursor */ |
| 2076 | 2076 | ||
| 2077 | trace_xfs_dir2_node_removename(args); | 2077 | trace_xfs_dir2_node_removename(args); |
| 2078 | 2078 | ||
| @@ -2084,19 +2084,18 @@ xfs_dir2_node_removename( | |||
| 2084 | state->mp = args->dp->i_mount; | 2084 | state->mp = args->dp->i_mount; |
| 2085 | state->blocksize = state->mp->m_dirblksize; | 2085 | state->blocksize = state->mp->m_dirblksize; |
| 2086 | state->node_ents = state->mp->m_dir_node_ents; | 2086 | state->node_ents = state->mp->m_dir_node_ents; |
| 2087 | /* | 2087 | |
| 2088 | * Look up the entry we're deleting, set up the cursor. | 2088 | /* Look up the entry we're deleting, set up the cursor. */ |
| 2089 | */ | ||
| 2090 | error = xfs_da3_node_lookup_int(state, &rval); | 2089 | error = xfs_da3_node_lookup_int(state, &rval); |
| 2091 | if (error) | 2090 | if (error) |
| 2092 | rval = error; | 2091 | goto out_free; |
| 2093 | /* | 2092 | |
| 2094 | * Didn't find it, upper layer screwed up. | 2093 | /* Didn't find it, upper layer screwed up. */ |
| 2095 | */ | ||
| 2096 | if (rval != EEXIST) { | 2094 | if (rval != EEXIST) { |
| 2097 | xfs_da_state_free(state); | 2095 | error = rval; |
| 2098 | return rval; | 2096 | goto out_free; |
| 2099 | } | 2097 | } |
| 2098 | |||
| 2100 | blk = &state->path.blk[state->path.active - 1]; | 2099 | blk = &state->path.blk[state->path.active - 1]; |
| 2101 | ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); | 2100 | ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC); |
| 2102 | ASSERT(state->extravalid); | 2101 | ASSERT(state->extravalid); |
| @@ -2107,7 +2106,7 @@ xfs_dir2_node_removename( | |||
| 2107 | error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, | 2106 | error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, |
| 2108 | &state->extrablk, &rval); | 2107 | &state->extrablk, &rval); |
| 2109 | if (error) | 2108 | if (error) |
| 2110 | return error; | 2109 | goto out_free; |
| 2111 | /* | 2110 | /* |
| 2112 | * Fix the hash values up the btree. | 2111 | * Fix the hash values up the btree. |
| 2113 | */ | 2112 | */ |
| @@ -2122,6 +2121,7 @@ xfs_dir2_node_removename( | |||
| 2122 | */ | 2121 | */ |
| 2123 | if (!error) | 2122 | if (!error) |
| 2124 | error = xfs_dir2_node_to_leaf(state); | 2123 | error = xfs_dir2_node_to_leaf(state); |
| 2124 | out_free: | ||
| 2125 | xfs_da_state_free(state); | 2125 | xfs_da_state_free(state); |
| 2126 | return error; | 2126 | return error; |
| 2127 | } | 2127 | } |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 27e0e544e963..104455b8046c 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
| @@ -618,7 +618,8 @@ xfs_setattr_nonsize( | |||
| 618 | } | 618 | } |
| 619 | if (!gid_eq(igid, gid)) { | 619 | if (!gid_eq(igid, gid)) { |
| 620 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { | 620 | if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { |
| 621 | ASSERT(!XFS_IS_PQUOTA_ON(mp)); | 621 | ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) || |
| 622 | !XFS_IS_PQUOTA_ON(mp)); | ||
| 622 | ASSERT(mask & ATTR_GID); | 623 | ASSERT(mask & ATTR_GID); |
| 623 | ASSERT(gdqp); | 624 | ASSERT(gdqp); |
| 624 | olddquot2 = xfs_qm_vop_chown(tp, ip, | 625 | olddquot2 = xfs_qm_vop_chown(tp, ip, |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b6b669df40f3..eae16920655b 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
| @@ -193,7 +193,10 @@ xlog_bread_noalign( | |||
| 193 | bp->b_io_length = nbblks; | 193 | bp->b_io_length = nbblks; |
| 194 | bp->b_error = 0; | 194 | bp->b_error = 0; |
| 195 | 195 | ||
| 196 | xfsbdstrat(log->l_mp, bp); | 196 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) |
| 197 | return XFS_ERROR(EIO); | ||
| 198 | |||
| 199 | xfs_buf_iorequest(bp); | ||
| 197 | error = xfs_buf_iowait(bp); | 200 | error = xfs_buf_iowait(bp); |
| 198 | if (error) | 201 | if (error) |
| 199 | xfs_buf_ioerror_alert(bp, __func__); | 202 | xfs_buf_ioerror_alert(bp, __func__); |
| @@ -4397,7 +4400,13 @@ xlog_do_recover( | |||
| 4397 | XFS_BUF_READ(bp); | 4400 | XFS_BUF_READ(bp); |
| 4398 | XFS_BUF_UNASYNC(bp); | 4401 | XFS_BUF_UNASYNC(bp); |
| 4399 | bp->b_ops = &xfs_sb_buf_ops; | 4402 | bp->b_ops = &xfs_sb_buf_ops; |
| 4400 | xfsbdstrat(log->l_mp, bp); | 4403 | |
| 4404 | if (XFS_FORCED_SHUTDOWN(log->l_mp)) { | ||
| 4405 | xfs_buf_relse(bp); | ||
| 4406 | return XFS_ERROR(EIO); | ||
| 4407 | } | ||
| 4408 | |||
| 4409 | xfs_buf_iorequest(bp); | ||
| 4401 | error = xfs_buf_iowait(bp); | 4410 | error = xfs_buf_iowait(bp); |
| 4402 | if (error) { | 4411 | if (error) { |
| 4403 | xfs_buf_ioerror_alert(bp, __func__); | 4412 | xfs_buf_ioerror_alert(bp, __func__); |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 14a4996cfec6..dd88f0e27bd8 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
| @@ -134,8 +134,6 @@ xfs_qm_dqpurge( | |||
| 134 | { | 134 | { |
| 135 | struct xfs_mount *mp = dqp->q_mount; | 135 | struct xfs_mount *mp = dqp->q_mount; |
| 136 | struct xfs_quotainfo *qi = mp->m_quotainfo; | 136 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
| 137 | struct xfs_dquot *gdqp = NULL; | ||
| 138 | struct xfs_dquot *pdqp = NULL; | ||
| 139 | 137 | ||
| 140 | xfs_dqlock(dqp); | 138 | xfs_dqlock(dqp); |
| 141 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { | 139 | if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { |
| @@ -143,21 +141,6 @@ xfs_qm_dqpurge( | |||
| 143 | return EAGAIN; | 141 | return EAGAIN; |
| 144 | } | 142 | } |
| 145 | 143 | ||
| 146 | /* | ||
| 147 | * If this quota has a hint attached, prepare for releasing it now. | ||
| 148 | */ | ||
| 149 | gdqp = dqp->q_gdquot; | ||
| 150 | if (gdqp) { | ||
| 151 | xfs_dqlock(gdqp); | ||
| 152 | dqp->q_gdquot = NULL; | ||
| 153 | } | ||
| 154 | |||
| 155 | pdqp = dqp->q_pdquot; | ||
| 156 | if (pdqp) { | ||
| 157 | xfs_dqlock(pdqp); | ||
| 158 | dqp->q_pdquot = NULL; | ||
| 159 | } | ||
| 160 | |||
| 161 | dqp->dq_flags |= XFS_DQ_FREEING; | 144 | dqp->dq_flags |= XFS_DQ_FREEING; |
| 162 | 145 | ||
| 163 | xfs_dqflock(dqp); | 146 | xfs_dqflock(dqp); |
| @@ -206,11 +189,47 @@ xfs_qm_dqpurge( | |||
| 206 | XFS_STATS_DEC(xs_qm_dquot_unused); | 189 | XFS_STATS_DEC(xs_qm_dquot_unused); |
| 207 | 190 | ||
| 208 | xfs_qm_dqdestroy(dqp); | 191 | xfs_qm_dqdestroy(dqp); |
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | /* | ||
| 196 | * Release the group or project dquot pointers the user dquots maybe carrying | ||
| 197 | * around as a hint, and proceed to purge the user dquot cache if requested. | ||
| 198 | */ | ||
| 199 | STATIC int | ||
| 200 | xfs_qm_dqpurge_hints( | ||
| 201 | struct xfs_dquot *dqp, | ||
| 202 | void *data) | ||
| 203 | { | ||
| 204 | struct xfs_dquot *gdqp = NULL; | ||
| 205 | struct xfs_dquot *pdqp = NULL; | ||
| 206 | uint flags = *((uint *)data); | ||
| 207 | |||
| 208 | xfs_dqlock(dqp); | ||
| 209 | if (dqp->dq_flags & XFS_DQ_FREEING) { | ||
| 210 | xfs_dqunlock(dqp); | ||
| 211 | return EAGAIN; | ||
| 212 | } | ||
| 213 | |||
| 214 | /* If this quota has a hint attached, prepare for releasing it now */ | ||
| 215 | gdqp = dqp->q_gdquot; | ||
| 216 | if (gdqp) | ||
| 217 | dqp->q_gdquot = NULL; | ||
| 218 | |||
| 219 | pdqp = dqp->q_pdquot; | ||
| 220 | if (pdqp) | ||
| 221 | dqp->q_pdquot = NULL; | ||
| 222 | |||
| 223 | xfs_dqunlock(dqp); | ||
| 209 | 224 | ||
| 210 | if (gdqp) | 225 | if (gdqp) |
| 211 | xfs_qm_dqput(gdqp); | 226 | xfs_qm_dqrele(gdqp); |
| 212 | if (pdqp) | 227 | if (pdqp) |
| 213 | xfs_qm_dqput(pdqp); | 228 | xfs_qm_dqrele(pdqp); |
| 229 | |||
| 230 | if (flags & XFS_QMOPT_UQUOTA) | ||
| 231 | return xfs_qm_dqpurge(dqp, NULL); | ||
| 232 | |||
| 214 | return 0; | 233 | return 0; |
| 215 | } | 234 | } |
| 216 | 235 | ||
| @@ -222,8 +241,18 @@ xfs_qm_dqpurge_all( | |||
| 222 | struct xfs_mount *mp, | 241 | struct xfs_mount *mp, |
| 223 | uint flags) | 242 | uint flags) |
| 224 | { | 243 | { |
| 225 | if (flags & XFS_QMOPT_UQUOTA) | 244 | /* |
| 226 | xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); | 245 | * We have to release group/project dquot hint(s) from the user dquot |
| 246 | * at first if they are there, otherwise we would run into an infinite | ||
| 247 | * loop while walking through radix tree to purge other type of dquots | ||
| 248 | * since their refcount is not zero if the user dquot refers to them | ||
| 249 | * as hint. | ||
| 250 | * | ||
| 251 | * Call the special xfs_qm_dqpurge_hints() will end up go through the | ||
| 252 | * general xfs_qm_dqpurge() against user dquot cache if requested. | ||
| 253 | */ | ||
| 254 | xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags); | ||
| 255 | |||
| 227 | if (flags & XFS_QMOPT_GQUOTA) | 256 | if (flags & XFS_QMOPT_GQUOTA) |
| 228 | xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); | 257 | xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); |
| 229 | if (flags & XFS_QMOPT_PQUOTA) | 258 | if (flags & XFS_QMOPT_PQUOTA) |
| @@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach( | |||
| 2082 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 2111 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
| 2083 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 2112 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
| 2084 | 2113 | ||
| 2085 | if (udqp) { | 2114 | if (udqp && XFS_IS_UQUOTA_ON(mp)) { |
| 2086 | ASSERT(ip->i_udquot == NULL); | 2115 | ASSERT(ip->i_udquot == NULL); |
| 2087 | ASSERT(XFS_IS_UQUOTA_ON(mp)); | ||
| 2088 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); | 2116 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); |
| 2089 | 2117 | ||
| 2090 | ip->i_udquot = xfs_qm_dqhold(udqp); | 2118 | ip->i_udquot = xfs_qm_dqhold(udqp); |
| 2091 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); | 2119 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); |
| 2092 | } | 2120 | } |
| 2093 | if (gdqp) { | 2121 | if (gdqp && XFS_IS_GQUOTA_ON(mp)) { |
| 2094 | ASSERT(ip->i_gdquot == NULL); | 2122 | ASSERT(ip->i_gdquot == NULL); |
| 2095 | ASSERT(XFS_IS_GQUOTA_ON(mp)); | ||
| 2096 | ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); | 2123 | ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); |
| 2097 | ip->i_gdquot = xfs_qm_dqhold(gdqp); | 2124 | ip->i_gdquot = xfs_qm_dqhold(gdqp); |
| 2098 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); | 2125 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); |
| 2099 | } | 2126 | } |
| 2100 | if (pdqp) { | 2127 | if (pdqp && XFS_IS_PQUOTA_ON(mp)) { |
| 2101 | ASSERT(ip->i_pdquot == NULL); | 2128 | ASSERT(ip->i_pdquot == NULL); |
| 2102 | ASSERT(XFS_IS_PQUOTA_ON(mp)); | ||
| 2103 | ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); | 2129 | ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); |
| 2104 | 2130 | ||
| 2105 | ip->i_pdquot = xfs_qm_dqhold(pdqp); | 2131 | ip->i_pdquot = xfs_qm_dqhold(pdqp); |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index c035d11b7734..647b6f1d8923 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
| @@ -314,7 +314,18 @@ xfs_trans_read_buf_map( | |||
| 314 | ASSERT(bp->b_iodone == NULL); | 314 | ASSERT(bp->b_iodone == NULL); |
| 315 | XFS_BUF_READ(bp); | 315 | XFS_BUF_READ(bp); |
| 316 | bp->b_ops = ops; | 316 | bp->b_ops = ops; |
| 317 | xfsbdstrat(tp->t_mountp, bp); | 317 | |
| 318 | /* | ||
| 319 | * XXX(hch): clean up the error handling here to be less | ||
| 320 | * of a mess.. | ||
| 321 | */ | ||
| 322 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
| 323 | trace_xfs_bdstrat_shut(bp, _RET_IP_); | ||
| 324 | xfs_bioerror_relse(bp); | ||
| 325 | } else { | ||
| 326 | xfs_buf_iorequest(bp); | ||
| 327 | } | ||
| 328 | |||
| 318 | error = xfs_buf_iowait(bp); | 329 | error = xfs_buf_iowait(bp); |
| 319 | if (error) { | 330 | if (error) { |
| 320 | xfs_buf_ioerror_alert(bp, __func__); | 331 | xfs_buf_ioerror_alert(bp, __func__); |
