diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/buffer.c | 22 | ||||
-rw-r--r-- | fs/cifs/cifssmb.c | 30 | ||||
-rw-r--r-- | fs/cifs/connect.c | 18 | ||||
-rw-r--r-- | fs/cifs/readdir.c | 7 | ||||
-rw-r--r-- | fs/cifs/transport.c | 26 | ||||
-rw-r--r-- | fs/eventpoll.c | 2 | ||||
-rw-r--r-- | fs/ext4/ioctl.c | 1 | ||||
-rw-r--r-- | fs/fat/inode.c | 13 | ||||
-rw-r--r-- | fs/fifo.c | 9 | ||||
-rw-r--r-- | fs/locks.c | 2 | ||||
-rw-r--r-- | fs/nfs/direct.c | 6 | ||||
-rw-r--r-- | fs/nfs/super.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/file.c | 2 | ||||
-rw-r--r-- | fs/ramfs/file-nommu.c | 1 | ||||
-rw-r--r-- | fs/xfs/xfs_alloc.c | 19 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.c | 53 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.h | 1 | ||||
-rw-r--r-- | fs/xfs/xfs_buf_item.c | 2 |
18 files changed, 138 insertions, 78 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 838a9cf246bd..c7062c896d7c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1036,6 +1036,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) | |||
1036 | static struct buffer_head * | 1036 | static struct buffer_head * |
1037 | __getblk_slow(struct block_device *bdev, sector_t block, int size) | 1037 | __getblk_slow(struct block_device *bdev, sector_t block, int size) |
1038 | { | 1038 | { |
1039 | int ret; | ||
1040 | struct buffer_head *bh; | ||
1041 | |||
1039 | /* Size must be multiple of hard sectorsize */ | 1042 | /* Size must be multiple of hard sectorsize */ |
1040 | if (unlikely(size & (bdev_logical_block_size(bdev)-1) || | 1043 | if (unlikely(size & (bdev_logical_block_size(bdev)-1) || |
1041 | (size < 512 || size > PAGE_SIZE))) { | 1044 | (size < 512 || size > PAGE_SIZE))) { |
@@ -1048,20 +1051,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) | |||
1048 | return NULL; | 1051 | return NULL; |
1049 | } | 1052 | } |
1050 | 1053 | ||
1051 | for (;;) { | 1054 | retry: |
1052 | struct buffer_head * bh; | 1055 | bh = __find_get_block(bdev, block, size); |
1053 | int ret; | 1056 | if (bh) |
1057 | return bh; | ||
1054 | 1058 | ||
1059 | ret = grow_buffers(bdev, block, size); | ||
1060 | if (ret == 0) { | ||
1061 | free_more_memory(); | ||
1062 | goto retry; | ||
1063 | } else if (ret > 0) { | ||
1055 | bh = __find_get_block(bdev, block, size); | 1064 | bh = __find_get_block(bdev, block, size); |
1056 | if (bh) | 1065 | if (bh) |
1057 | return bh; | 1066 | return bh; |
1058 | |||
1059 | ret = grow_buffers(bdev, block, size); | ||
1060 | if (ret < 0) | ||
1061 | return NULL; | ||
1062 | if (ret == 0) | ||
1063 | free_more_memory(); | ||
1064 | } | 1067 | } |
1068 | return NULL; | ||
1065 | } | 1069 | } |
1066 | 1070 | ||
1067 | /* | 1071 | /* |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 5b400730c213..4ee522b3f66f 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -86,7 +86,31 @@ static struct { | |||
86 | #endif /* CONFIG_CIFS_WEAK_PW_HASH */ | 86 | #endif /* CONFIG_CIFS_WEAK_PW_HASH */ |
87 | #endif /* CIFS_POSIX */ | 87 | #endif /* CIFS_POSIX */ |
88 | 88 | ||
89 | /* Forward declarations */ | 89 | #ifdef CONFIG_HIGHMEM |
90 | /* | ||
91 | * On arches that have high memory, kmap address space is limited. By | ||
92 | * serializing the kmap operations on those arches, we ensure that we don't | ||
93 | * end up with a bunch of threads in writeback with partially mapped page | ||
94 | * arrays, stuck waiting for kmap to come back. That situation prevents | ||
95 | * progress and can deadlock. | ||
96 | */ | ||
97 | static DEFINE_MUTEX(cifs_kmap_mutex); | ||
98 | |||
99 | static inline void | ||
100 | cifs_kmap_lock(void) | ||
101 | { | ||
102 | mutex_lock(&cifs_kmap_mutex); | ||
103 | } | ||
104 | |||
105 | static inline void | ||
106 | cifs_kmap_unlock(void) | ||
107 | { | ||
108 | mutex_unlock(&cifs_kmap_mutex); | ||
109 | } | ||
110 | #else /* !CONFIG_HIGHMEM */ | ||
111 | #define cifs_kmap_lock() do { ; } while(0) | ||
112 | #define cifs_kmap_unlock() do { ; } while(0) | ||
113 | #endif /* CONFIG_HIGHMEM */ | ||
90 | 114 | ||
91 | /* Mark as invalid, all open files on tree connections since they | 115 | /* Mark as invalid, all open files on tree connections since they |
92 | were closed when session to server was lost */ | 116 | were closed when session to server was lost */ |
@@ -1503,7 +1527,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | |||
1503 | } | 1527 | } |
1504 | 1528 | ||
1505 | /* marshal up the page array */ | 1529 | /* marshal up the page array */ |
1530 | cifs_kmap_lock(); | ||
1506 | len = rdata->marshal_iov(rdata, data_len); | 1531 | len = rdata->marshal_iov(rdata, data_len); |
1532 | cifs_kmap_unlock(); | ||
1507 | data_len -= len; | 1533 | data_len -= len; |
1508 | 1534 | ||
1509 | /* issue the read if we have any iovecs left to fill */ | 1535 | /* issue the read if we have any iovecs left to fill */ |
@@ -2069,7 +2095,9 @@ cifs_async_writev(struct cifs_writedata *wdata) | |||
2069 | * and set the iov_len properly for each one. It may also set | 2095 | * and set the iov_len properly for each one. It may also set |
2070 | * wdata->bytes too. | 2096 | * wdata->bytes too. |
2071 | */ | 2097 | */ |
2098 | cifs_kmap_lock(); | ||
2072 | wdata->marshal_iov(iov, wdata); | 2099 | wdata->marshal_iov(iov, wdata); |
2100 | cifs_kmap_unlock(); | ||
2073 | 2101 | ||
2074 | cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes); | 2102 | cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes); |
2075 | 2103 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 0ae86ddf2213..94b7788c3189 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -3445,6 +3445,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, | |||
3445 | #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024) | 3445 | #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024) |
3446 | #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536) | 3446 | #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536) |
3447 | 3447 | ||
3448 | /* | ||
3449 | * On hosts with high memory, we can't currently support wsize/rsize that are | ||
3450 | * larger than we can kmap at once. Cap the rsize/wsize at | ||
3451 | * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request | ||
3452 | * larger than that anyway. | ||
3453 | */ | ||
3454 | #ifdef CONFIG_HIGHMEM | ||
3455 | #define CIFS_KMAP_SIZE_LIMIT (LAST_PKMAP * PAGE_CACHE_SIZE) | ||
3456 | #else /* CONFIG_HIGHMEM */ | ||
3457 | #define CIFS_KMAP_SIZE_LIMIT (1<<24) | ||
3458 | #endif /* CONFIG_HIGHMEM */ | ||
3459 | |||
3448 | static unsigned int | 3460 | static unsigned int |
3449 | cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) | 3461 | cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) |
3450 | { | 3462 | { |
@@ -3475,6 +3487,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) | |||
3475 | wsize = min_t(unsigned int, wsize, | 3487 | wsize = min_t(unsigned int, wsize, |
3476 | server->maxBuf - sizeof(WRITE_REQ) + 4); | 3488 | server->maxBuf - sizeof(WRITE_REQ) + 4); |
3477 | 3489 | ||
3490 | /* limit to the amount that we can kmap at once */ | ||
3491 | wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT); | ||
3492 | |||
3478 | /* hard limit of CIFS_MAX_WSIZE */ | 3493 | /* hard limit of CIFS_MAX_WSIZE */ |
3479 | wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); | 3494 | wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); |
3480 | 3495 | ||
@@ -3516,6 +3531,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) | |||
3516 | if (!(server->capabilities & CAP_LARGE_READ_X)) | 3531 | if (!(server->capabilities & CAP_LARGE_READ_X)) |
3517 | rsize = min_t(unsigned int, CIFSMaxBufSize, rsize); | 3532 | rsize = min_t(unsigned int, CIFSMaxBufSize, rsize); |
3518 | 3533 | ||
3534 | /* limit to the amount that we can kmap at once */ | ||
3535 | rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT); | ||
3536 | |||
3519 | /* hard limit of CIFS_MAX_RSIZE */ | 3537 | /* hard limit of CIFS_MAX_RSIZE */ |
3520 | rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE); | 3538 | rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE); |
3521 | 3539 | ||
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 0a8224d1c4c5..a4217f02fab2 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, | |||
86 | 86 | ||
87 | dentry = d_lookup(parent, name); | 87 | dentry = d_lookup(parent, name); |
88 | if (dentry) { | 88 | if (dentry) { |
89 | /* FIXME: check for inode number changes? */ | 89 | inode = dentry->d_inode; |
90 | if (dentry->d_inode != NULL) | 90 | /* update inode in place if i_ino didn't change */ |
91 | if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { | ||
92 | cifs_fattr_to_inode(inode, fattr); | ||
91 | return dentry; | 93 | return dentry; |
94 | } | ||
92 | d_drop(dentry); | 95 | d_drop(dentry); |
93 | dput(dentry); | 96 | dput(dentry); |
94 | } | 97 | } |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 3097ee58fd7d..f25d4ea14be4 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -365,16 +365,14 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, | |||
365 | if (mid == NULL) | 365 | if (mid == NULL) |
366 | return -ENOMEM; | 366 | return -ENOMEM; |
367 | 367 | ||
368 | /* put it on the pending_mid_q */ | ||
369 | spin_lock(&GlobalMid_Lock); | ||
370 | list_add_tail(&mid->qhead, &server->pending_mid_q); | ||
371 | spin_unlock(&GlobalMid_Lock); | ||
372 | |||
373 | rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number); | 368 | rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number); |
374 | if (rc) | 369 | if (rc) { |
375 | delete_mid(mid); | 370 | DeleteMidQEntry(mid); |
371 | return rc; | ||
372 | } | ||
373 | |||
376 | *ret_mid = mid; | 374 | *ret_mid = mid; |
377 | return rc; | 375 | return 0; |
378 | } | 376 | } |
379 | 377 | ||
380 | /* | 378 | /* |
@@ -407,17 +405,21 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, | |||
407 | mid->callback_data = cbdata; | 405 | mid->callback_data = cbdata; |
408 | mid->mid_state = MID_REQUEST_SUBMITTED; | 406 | mid->mid_state = MID_REQUEST_SUBMITTED; |
409 | 407 | ||
408 | /* put it on the pending_mid_q */ | ||
409 | spin_lock(&GlobalMid_Lock); | ||
410 | list_add_tail(&mid->qhead, &server->pending_mid_q); | ||
411 | spin_unlock(&GlobalMid_Lock); | ||
412 | |||
413 | |||
410 | cifs_in_send_inc(server); | 414 | cifs_in_send_inc(server); |
411 | rc = smb_sendv(server, iov, nvec); | 415 | rc = smb_sendv(server, iov, nvec); |
412 | cifs_in_send_dec(server); | 416 | cifs_in_send_dec(server); |
413 | cifs_save_when_sent(mid); | 417 | cifs_save_when_sent(mid); |
414 | mutex_unlock(&server->srv_mutex); | 418 | mutex_unlock(&server->srv_mutex); |
415 | 419 | ||
416 | if (rc) | 420 | if (rc == 0) |
417 | goto out_err; | 421 | return 0; |
418 | 422 | ||
419 | return rc; | ||
420 | out_err: | ||
421 | delete_mid(mid); | 423 | delete_mid(mid); |
422 | add_credits(server, 1); | 424 | add_credits(server, 1); |
423 | wake_up(&server->request_q); | 425 | wake_up(&server->request_q); |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 74598f67efeb..1c8b55670804 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -1710,7 +1710,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1710 | goto error_tgt_fput; | 1710 | goto error_tgt_fput; |
1711 | 1711 | ||
1712 | /* Check if EPOLLWAKEUP is allowed */ | 1712 | /* Check if EPOLLWAKEUP is allowed */ |
1713 | if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP)) | 1713 | if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) |
1714 | epds.events &= ~EPOLLWAKEUP; | 1714 | epds.events &= ~EPOLLWAKEUP; |
1715 | 1715 | ||
1716 | /* | 1716 | /* |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index e34deac3f366..6ec6f9ee2fec 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -268,7 +268,6 @@ group_extend_out: | |||
268 | err = ext4_move_extents(filp, donor_filp, me.orig_start, | 268 | err = ext4_move_extents(filp, donor_filp, me.orig_start, |
269 | me.donor_start, me.len, &me.moved_len); | 269 | me.donor_start, me.len, &me.moved_len); |
270 | mnt_drop_write_file(filp); | 270 | mnt_drop_write_file(filp); |
271 | mnt_drop_write(filp->f_path.mnt); | ||
272 | 271 | ||
273 | if (copy_to_user((struct move_extent __user *)arg, | 272 | if (copy_to_user((struct move_extent __user *)arg, |
274 | &me, sizeof(me))) | 273 | &me, sizeof(me))) |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index a3d81ebf6d86..0038b32cb362 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -738,22 +738,21 @@ static int | |||
738 | fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) | 738 | fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) |
739 | { | 739 | { |
740 | int len = *lenp; | 740 | int len = *lenp; |
741 | u32 ipos_h, ipos_m, ipos_l; | 741 | struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); |
742 | loff_t i_pos; | ||
742 | 743 | ||
743 | if (len < 5) { | 744 | if (len < 5) { |
744 | *lenp = 5; | 745 | *lenp = 5; |
745 | return 255; /* no room */ | 746 | return 255; /* no room */ |
746 | } | 747 | } |
747 | 748 | ||
748 | ipos_h = MSDOS_I(inode)->i_pos >> 8; | 749 | i_pos = fat_i_pos_read(sbi, inode); |
749 | ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24; | ||
750 | ipos_l = (MSDOS_I(inode)->i_pos & 0x0f) << 28; | ||
751 | *lenp = 5; | 750 | *lenp = 5; |
752 | fh[0] = inode->i_ino; | 751 | fh[0] = inode->i_ino; |
753 | fh[1] = inode->i_generation; | 752 | fh[1] = inode->i_generation; |
754 | fh[2] = ipos_h; | 753 | fh[2] = i_pos >> 8; |
755 | fh[3] = ipos_m | MSDOS_I(inode)->i_logstart; | 754 | fh[3] = ((i_pos & 0xf0) << 24) | MSDOS_I(inode)->i_logstart; |
756 | fh[4] = ipos_l; | 755 | fh[4] = (i_pos & 0x0f) << 28; |
757 | if (parent) | 756 | if (parent) |
758 | fh[4] |= MSDOS_I(parent)->i_logstart; | 757 | fh[4] |= MSDOS_I(parent)->i_logstart; |
759 | return 3; | 758 | return 3; |
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/pipe_fs_i.h> | 15 | #include <linux/pipe_fs_i.h> |
16 | 16 | ||
17 | static void wait_for_partner(struct inode* inode, unsigned int *cnt) | 17 | static int wait_for_partner(struct inode* inode, unsigned int *cnt) |
18 | { | 18 | { |
19 | int cur = *cnt; | 19 | int cur = *cnt; |
20 | 20 | ||
@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt) | |||
23 | if (signal_pending(current)) | 23 | if (signal_pending(current)) |
24 | break; | 24 | break; |
25 | } | 25 | } |
26 | return cur == *cnt ? -ERESTARTSYS : 0; | ||
26 | } | 27 | } |
27 | 28 | ||
28 | static void wake_up_partner(struct inode* inode) | 29 | static void wake_up_partner(struct inode* inode) |
@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp) | |||
67 | * seen a writer */ | 68 | * seen a writer */ |
68 | filp->f_version = pipe->w_counter; | 69 | filp->f_version = pipe->w_counter; |
69 | } else { | 70 | } else { |
70 | wait_for_partner(inode, &pipe->w_counter); | 71 | if (wait_for_partner(inode, &pipe->w_counter)) |
71 | if(signal_pending(current)) | ||
72 | goto err_rd; | 72 | goto err_rd; |
73 | } | 73 | } |
74 | } | 74 | } |
@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp) | |||
90 | wake_up_partner(inode); | 90 | wake_up_partner(inode); |
91 | 91 | ||
92 | if (!pipe->readers) { | 92 | if (!pipe->readers) { |
93 | wait_for_partner(inode, &pipe->r_counter); | 93 | if (wait_for_partner(inode, &pipe->r_counter)) |
94 | if (signal_pending(current)) | ||
95 | goto err_wr; | 94 | goto err_wr; |
96 | } | 95 | } |
97 | break; | 96 | break; |
diff --git a/fs/locks.c b/fs/locks.c index 814c51d0de47..fce6238d52c1 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) | |||
1465 | case F_WRLCK: | 1465 | case F_WRLCK: |
1466 | return generic_add_lease(filp, arg, flp); | 1466 | return generic_add_lease(filp, arg, flp); |
1467 | default: | 1467 | default: |
1468 | BUG(); | 1468 | return -EINVAL; |
1469 | } | 1469 | } |
1470 | } | 1470 | } |
1471 | EXPORT_SYMBOL(generic_setlease); | 1471 | EXPORT_SYMBOL(generic_setlease); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 9a4cbfc85d81..48253372ab1d 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -484,6 +484,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | |||
484 | 484 | ||
485 | list_for_each_entry_safe(req, tmp, &reqs, wb_list) { | 485 | list_for_each_entry_safe(req, tmp, &reqs, wb_list) { |
486 | if (!nfs_pageio_add_request(&desc, req)) { | 486 | if (!nfs_pageio_add_request(&desc, req)) { |
487 | nfs_list_remove_request(req); | ||
487 | nfs_list_add_request(req, &failed); | 488 | nfs_list_add_request(req, &failed); |
488 | spin_lock(cinfo.lock); | 489 | spin_lock(cinfo.lock); |
489 | dreq->flags = 0; | 490 | dreq->flags = 0; |
@@ -494,8 +495,11 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | |||
494 | } | 495 | } |
495 | nfs_pageio_complete(&desc); | 496 | nfs_pageio_complete(&desc); |
496 | 497 | ||
497 | while (!list_empty(&failed)) | 498 | while (!list_empty(&failed)) { |
499 | req = nfs_list_entry(failed.next); | ||
500 | nfs_list_remove_request(req); | ||
498 | nfs_unlock_and_release_request(req); | 501 | nfs_unlock_and_release_request(req); |
502 | } | ||
499 | 503 | ||
500 | if (put_dreq(dreq)) | 504 | if (put_dreq(dreq)) |
501 | nfs_direct_write_complete(dreq, dreq->inode); | 505 | nfs_direct_write_complete(dreq, dreq->inode); |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 906f09c7d842..06228192f64e 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -2860,6 +2860,8 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name, | |||
2860 | 2860 | ||
2861 | dfprintk(MOUNT, "--> nfs4_try_mount()\n"); | 2861 | dfprintk(MOUNT, "--> nfs4_try_mount()\n"); |
2862 | 2862 | ||
2863 | mount_info->fill_super = nfs4_fill_super; | ||
2864 | |||
2863 | export_path = data->nfs_server.export_path; | 2865 | export_path = data->nfs_server.export_path; |
2864 | data->nfs_server.export_path = "/"; | 2866 | data->nfs_server.export_path = "/"; |
2865 | root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info, | 2867 | root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info, |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 98513c8ed589..7602783d7f41 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, | |||
1950 | if (ret < 0) | 1950 | if (ret < 0) |
1951 | mlog_errno(ret); | 1951 | mlog_errno(ret); |
1952 | 1952 | ||
1953 | if (file->f_flags & O_SYNC) | 1953 | if (file && (file->f_flags & O_SYNC)) |
1954 | handle->h_sync = 1; | 1954 | handle->h_sync = 1; |
1955 | 1955 | ||
1956 | ocfs2_commit_trans(osb, handle); | 1956 | ocfs2_commit_trans(osb, handle); |
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index fbb0b478a346..d5378d028589 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |||
110 | 110 | ||
111 | /* prevent the page from being discarded on memory pressure */ | 111 | /* prevent the page from being discarded on memory pressure */ |
112 | SetPageDirty(page); | 112 | SetPageDirty(page); |
113 | SetPageUptodate(page); | ||
113 | 114 | ||
114 | unlock_page(page); | 115 | unlock_page(page); |
115 | put_page(page); | 116 | put_page(page); |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 9d1aeb7e2734..4f33c32affe3 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -1074,13 +1074,13 @@ restart: | |||
1074 | * If we couldn't get anything, give up. | 1074 | * If we couldn't get anything, give up. |
1075 | */ | 1075 | */ |
1076 | if (bno_cur_lt == NULL && bno_cur_gt == NULL) { | 1076 | if (bno_cur_lt == NULL && bno_cur_gt == NULL) { |
1077 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); | ||
1078 | |||
1077 | if (!forced++) { | 1079 | if (!forced++) { |
1078 | trace_xfs_alloc_near_busy(args); | 1080 | trace_xfs_alloc_near_busy(args); |
1079 | xfs_log_force(args->mp, XFS_LOG_SYNC); | 1081 | xfs_log_force(args->mp, XFS_LOG_SYNC); |
1080 | goto restart; | 1082 | goto restart; |
1081 | } | 1083 | } |
1082 | |||
1083 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); | ||
1084 | trace_xfs_alloc_size_neither(args); | 1084 | trace_xfs_alloc_size_neither(args); |
1085 | args->agbno = NULLAGBLOCK; | 1085 | args->agbno = NULLAGBLOCK; |
1086 | return 0; | 1086 | return 0; |
@@ -2434,13 +2434,22 @@ xfs_alloc_vextent_worker( | |||
2434 | current_restore_flags_nested(&pflags, PF_FSTRANS); | 2434 | current_restore_flags_nested(&pflags, PF_FSTRANS); |
2435 | } | 2435 | } |
2436 | 2436 | ||
2437 | 2437 | /* | |
2438 | int /* error */ | 2438 | * Data allocation requests often come in with little stack to work on. Push |
2439 | * them off to a worker thread so there is lots of stack to use. Metadata | ||
2440 | * requests, OTOH, are generally from low stack usage paths, so avoid the | ||
2441 | * context switch overhead here. | ||
2442 | */ | ||
2443 | int | ||
2439 | xfs_alloc_vextent( | 2444 | xfs_alloc_vextent( |
2440 | xfs_alloc_arg_t *args) /* allocation argument structure */ | 2445 | struct xfs_alloc_arg *args) |
2441 | { | 2446 | { |
2442 | DECLARE_COMPLETION_ONSTACK(done); | 2447 | DECLARE_COMPLETION_ONSTACK(done); |
2443 | 2448 | ||
2449 | if (!args->userdata) | ||
2450 | return __xfs_alloc_vextent(args); | ||
2451 | |||
2452 | |||
2444 | args->done = &done; | 2453 | args->done = &done; |
2445 | INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker); | 2454 | INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker); |
2446 | queue_work(xfs_alloc_wq, &args->work); | 2455 | queue_work(xfs_alloc_wq, &args->work); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index a4beb421018a..269b35c084da 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -989,27 +989,6 @@ xfs_buf_ioerror_alert( | |||
989 | (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length); | 989 | (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length); |
990 | } | 990 | } |
991 | 991 | ||
992 | int | ||
993 | xfs_bwrite( | ||
994 | struct xfs_buf *bp) | ||
995 | { | ||
996 | int error; | ||
997 | |||
998 | ASSERT(xfs_buf_islocked(bp)); | ||
999 | |||
1000 | bp->b_flags |= XBF_WRITE; | ||
1001 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); | ||
1002 | |||
1003 | xfs_bdstrat_cb(bp); | ||
1004 | |||
1005 | error = xfs_buf_iowait(bp); | ||
1006 | if (error) { | ||
1007 | xfs_force_shutdown(bp->b_target->bt_mount, | ||
1008 | SHUTDOWN_META_IO_ERROR); | ||
1009 | } | ||
1010 | return error; | ||
1011 | } | ||
1012 | |||
1013 | /* | 992 | /* |
1014 | * Called when we want to stop a buffer from getting written or read. | 993 | * Called when we want to stop a buffer from getting written or read. |
1015 | * We attach the EIO error, muck with its flags, and call xfs_buf_ioend | 994 | * We attach the EIO error, muck with its flags, and call xfs_buf_ioend |
@@ -1079,14 +1058,7 @@ xfs_bioerror_relse( | |||
1079 | return EIO; | 1058 | return EIO; |
1080 | } | 1059 | } |
1081 | 1060 | ||
1082 | 1061 | STATIC int | |
1083 | /* | ||
1084 | * All xfs metadata buffers except log state machine buffers | ||
1085 | * get this attached as their b_bdstrat callback function. | ||
1086 | * This is so that we can catch a buffer | ||
1087 | * after prematurely unpinning it to forcibly shutdown the filesystem. | ||
1088 | */ | ||
1089 | int | ||
1090 | xfs_bdstrat_cb( | 1062 | xfs_bdstrat_cb( |
1091 | struct xfs_buf *bp) | 1063 | struct xfs_buf *bp) |
1092 | { | 1064 | { |
@@ -1107,6 +1079,27 @@ xfs_bdstrat_cb( | |||
1107 | return 0; | 1079 | return 0; |
1108 | } | 1080 | } |
1109 | 1081 | ||
1082 | int | ||
1083 | xfs_bwrite( | ||
1084 | struct xfs_buf *bp) | ||
1085 | { | ||
1086 | int error; | ||
1087 | |||
1088 | ASSERT(xfs_buf_islocked(bp)); | ||
1089 | |||
1090 | bp->b_flags |= XBF_WRITE; | ||
1091 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); | ||
1092 | |||
1093 | xfs_bdstrat_cb(bp); | ||
1094 | |||
1095 | error = xfs_buf_iowait(bp); | ||
1096 | if (error) { | ||
1097 | xfs_force_shutdown(bp->b_target->bt_mount, | ||
1098 | SHUTDOWN_META_IO_ERROR); | ||
1099 | } | ||
1100 | return error; | ||
1101 | } | ||
1102 | |||
1110 | /* | 1103 | /* |
1111 | * Wrapper around bdstrat so that we can stop data from going to disk in case | 1104 | * Wrapper around bdstrat so that we can stop data from going to disk in case |
1112 | * we are shutting down the filesystem. Typically user data goes thru this | 1105 | * we are shutting down the filesystem. Typically user data goes thru this |
@@ -1243,7 +1236,7 @@ xfs_buf_iorequest( | |||
1243 | */ | 1236 | */ |
1244 | atomic_set(&bp->b_io_remaining, 1); | 1237 | atomic_set(&bp->b_io_remaining, 1); |
1245 | _xfs_buf_ioapply(bp); | 1238 | _xfs_buf_ioapply(bp); |
1246 | _xfs_buf_ioend(bp, 0); | 1239 | _xfs_buf_ioend(bp, 1); |
1247 | 1240 | ||
1248 | xfs_buf_rele(bp); | 1241 | xfs_buf_rele(bp); |
1249 | } | 1242 | } |
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 7f1d1392ce37..79344c48008e 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
@@ -180,7 +180,6 @@ extern void xfs_buf_unlock(xfs_buf_t *); | |||
180 | extern int xfs_bwrite(struct xfs_buf *bp); | 180 | extern int xfs_bwrite(struct xfs_buf *bp); |
181 | 181 | ||
182 | extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); | 182 | extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); |
183 | extern int xfs_bdstrat_cb(struct xfs_buf *); | ||
184 | 183 | ||
185 | extern void xfs_buf_ioend(xfs_buf_t *, int); | 184 | extern void xfs_buf_ioend(xfs_buf_t *, int); |
186 | extern void xfs_buf_ioerror(xfs_buf_t *, int); | 185 | extern void xfs_buf_ioerror(xfs_buf_t *, int); |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 45df2b857d48..d9e451115f98 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -954,7 +954,7 @@ xfs_buf_iodone_callbacks( | |||
954 | 954 | ||
955 | if (!XFS_BUF_ISSTALE(bp)) { | 955 | if (!XFS_BUF_ISSTALE(bp)) { |
956 | bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; | 956 | bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; |
957 | xfs_bdstrat_cb(bp); | 957 | xfs_buf_iorequest(bp); |
958 | } else { | 958 | } else { |
959 | xfs_buf_relse(bp); | 959 | xfs_buf_relse(bp); |
960 | } | 960 | } |