aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ext3/balloc.c5
-rw-r--r--fs/file.c1
-rw-r--r--fs/jffs2/file.c39
-rw-r--r--fs/notify/fanotify/fanotify_user.c3
-rw-r--r--fs/proc/base.c109
-rw-r--r--fs/pstore/platform.c3
-rw-r--r--fs/reiserfs/inode.c10
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/super.c60
-rw-r--r--fs/ubifs/find.c12
-rw-r--r--fs/ubifs/lprops.c6
-rw-r--r--fs/ubifs/ubifs.h3
-rw-r--r--fs/xfs/xfs_aops.c54
-rw-r--r--fs/xfs/xfs_attr_leaf.c20
-rw-r--r--fs/xfs/xfs_buf.c14
15 files changed, 282 insertions, 61 deletions
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 7320a66e958f..22548f56197b 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -2101,8 +2101,9 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
2101 end = start + (range->len >> sb->s_blocksize_bits) - 1; 2101 end = start + (range->len >> sb->s_blocksize_bits) - 1;
2102 minlen = range->minlen >> sb->s_blocksize_bits; 2102 minlen = range->minlen >> sb->s_blocksize_bits;
2103 2103
2104 if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) || 2104 if (minlen > EXT3_BLOCKS_PER_GROUP(sb) ||
2105 unlikely(start >= max_blks)) 2105 start >= max_blks ||
2106 range->len < sb->s_blocksize)
2106 return -EINVAL; 2107 return -EINVAL;
2107 if (end >= max_blks) 2108 if (end >= max_blks)
2108 end = max_blks - 1; 2109 end = max_blks - 1;
diff --git a/fs/file.c b/fs/file.c
index 708d997a7748..7cb71b992603 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -685,7 +685,6 @@ void do_close_on_exec(struct files_struct *files)
685 struct fdtable *fdt; 685 struct fdtable *fdt;
686 686
687 /* exec unshares first */ 687 /* exec unshares first */
688 BUG_ON(atomic_read(&files->count) != 1);
689 spin_lock(&files->file_lock); 688 spin_lock(&files->file_lock);
690 for (i = 0; ; i++) { 689 for (i = 0; ; i++) {
691 unsigned long set; 690 unsigned long set;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 60ef3fb707ff..1506673c087e 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -138,33 +138,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
138 struct page *pg; 138 struct page *pg;
139 struct inode *inode = mapping->host; 139 struct inode *inode = mapping->host;
140 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 140 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
141 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
142 struct jffs2_raw_inode ri;
143 uint32_t alloc_len = 0;
141 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 144 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
142 uint32_t pageofs = index << PAGE_CACHE_SHIFT; 145 uint32_t pageofs = index << PAGE_CACHE_SHIFT;
143 int ret = 0; 146 int ret = 0;
144 147
148 jffs2_dbg(1, "%s()\n", __func__);
149
150 if (pageofs > inode->i_size) {
151 ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
152 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
153 if (ret)
154 return ret;
155 }
156
157 mutex_lock(&f->sem);
145 pg = grab_cache_page_write_begin(mapping, index, flags); 158 pg = grab_cache_page_write_begin(mapping, index, flags);
146 if (!pg) 159 if (!pg) {
160 if (alloc_len)
161 jffs2_complete_reservation(c);
162 mutex_unlock(&f->sem);
147 return -ENOMEM; 163 return -ENOMEM;
164 }
148 *pagep = pg; 165 *pagep = pg;
149 166
150 jffs2_dbg(1, "%s()\n", __func__); 167 if (alloc_len) {
151
152 if (pageofs > inode->i_size) {
153 /* Make new hole frag from old EOF to new page */ 168 /* Make new hole frag from old EOF to new page */
154 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
155 struct jffs2_raw_inode ri;
156 struct jffs2_full_dnode *fn; 169 struct jffs2_full_dnode *fn;
157 uint32_t alloc_len;
158 170
159 jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", 171 jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
160 (unsigned int)inode->i_size, pageofs); 172 (unsigned int)inode->i_size, pageofs);
161 173
162 ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
163 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
164 if (ret)
165 goto out_page;
166
167 mutex_lock(&f->sem);
168 memset(&ri, 0, sizeof(ri)); 174 memset(&ri, 0, sizeof(ri));
169 175
170 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 176 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -191,7 +197,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
191 if (IS_ERR(fn)) { 197 if (IS_ERR(fn)) {
192 ret = PTR_ERR(fn); 198 ret = PTR_ERR(fn);
193 jffs2_complete_reservation(c); 199 jffs2_complete_reservation(c);
194 mutex_unlock(&f->sem);
195 goto out_page; 200 goto out_page;
196 } 201 }
197 ret = jffs2_add_full_dnode_to_inode(c, f, fn); 202 ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -206,12 +211,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
206 jffs2_mark_node_obsolete(c, fn->raw); 211 jffs2_mark_node_obsolete(c, fn->raw);
207 jffs2_free_full_dnode(fn); 212 jffs2_free_full_dnode(fn);
208 jffs2_complete_reservation(c); 213 jffs2_complete_reservation(c);
209 mutex_unlock(&f->sem);
210 goto out_page; 214 goto out_page;
211 } 215 }
212 jffs2_complete_reservation(c); 216 jffs2_complete_reservation(c);
213 inode->i_size = pageofs; 217 inode->i_size = pageofs;
214 mutex_unlock(&f->sem);
215 } 218 }
216 219
217 /* 220 /*
@@ -220,18 +223,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
220 * case of a short-copy. 223 * case of a short-copy.
221 */ 224 */
222 if (!PageUptodate(pg)) { 225 if (!PageUptodate(pg)) {
223 mutex_lock(&f->sem);
224 ret = jffs2_do_readpage_nolock(inode, pg); 226 ret = jffs2_do_readpage_nolock(inode, pg);
225 mutex_unlock(&f->sem);
226 if (ret) 227 if (ret)
227 goto out_page; 228 goto out_page;
228 } 229 }
230 mutex_unlock(&f->sem);
229 jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); 231 jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
230 return ret; 232 return ret;
231 233
232out_page: 234out_page:
233 unlock_page(pg); 235 unlock_page(pg);
234 page_cache_release(pg); 236 page_cache_release(pg);
237 mutex_unlock(&f->sem);
235 return ret; 238 return ret;
236} 239}
237 240
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 721d692fa8d4..6fcaeb8c902e 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -258,7 +258,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
258 if (ret) 258 if (ret)
259 goto out_close_fd; 259 goto out_close_fd;
260 260
261 fd_install(fd, f); 261 if (fd != FAN_NOFD)
262 fd_install(fd, f);
262 return fanotify_event_metadata.event_len; 263 return fanotify_event_metadata.event_len;
263 264
264out_close_fd: 265out_close_fd:
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 144a96732dd7..3c231adf8450 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -873,6 +873,113 @@ static const struct file_operations proc_environ_operations = {
873 .release = mem_release, 873 .release = mem_release,
874}; 874};
875 875
876static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
877 loff_t *ppos)
878{
879 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
880 char buffer[PROC_NUMBUF];
881 int oom_adj = OOM_ADJUST_MIN;
882 size_t len;
883 unsigned long flags;
884
885 if (!task)
886 return -ESRCH;
887 if (lock_task_sighand(task, &flags)) {
888 if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
889 oom_adj = OOM_ADJUST_MAX;
890 else
891 oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
892 OOM_SCORE_ADJ_MAX;
893 unlock_task_sighand(task, &flags);
894 }
895 put_task_struct(task);
896 len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
897 return simple_read_from_buffer(buf, count, ppos, buffer, len);
898}
899
900static ssize_t oom_adj_write(struct file *file, const char __user *buf,
901 size_t count, loff_t *ppos)
902{
903 struct task_struct *task;
904 char buffer[PROC_NUMBUF];
905 int oom_adj;
906 unsigned long flags;
907 int err;
908
909 memset(buffer, 0, sizeof(buffer));
910 if (count > sizeof(buffer) - 1)
911 count = sizeof(buffer) - 1;
912 if (copy_from_user(buffer, buf, count)) {
913 err = -EFAULT;
914 goto out;
915 }
916
917 err = kstrtoint(strstrip(buffer), 0, &oom_adj);
918 if (err)
919 goto out;
920 if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) &&
921 oom_adj != OOM_DISABLE) {
922 err = -EINVAL;
923 goto out;
924 }
925
926 task = get_proc_task(file->f_path.dentry->d_inode);
927 if (!task) {
928 err = -ESRCH;
929 goto out;
930 }
931
932 task_lock(task);
933 if (!task->mm) {
934 err = -EINVAL;
935 goto err_task_lock;
936 }
937
938 if (!lock_task_sighand(task, &flags)) {
939 err = -ESRCH;
940 goto err_task_lock;
941 }
942
943 /*
944 * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
945 * value is always attainable.
946 */
947 if (oom_adj == OOM_ADJUST_MAX)
948 oom_adj = OOM_SCORE_ADJ_MAX;
949 else
950 oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
951
952 if (oom_adj < task->signal->oom_score_adj &&
953 !capable(CAP_SYS_RESOURCE)) {
954 err = -EACCES;
955 goto err_sighand;
956 }
957
958 /*
959 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
960 * /proc/pid/oom_score_adj instead.
961 */
962 printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
963 current->comm, task_pid_nr(current), task_pid_nr(task),
964 task_pid_nr(task));
965
966 task->signal->oom_score_adj = oom_adj;
967 trace_oom_score_adj_update(task);
968err_sighand:
969 unlock_task_sighand(task, &flags);
970err_task_lock:
971 task_unlock(task);
972 put_task_struct(task);
973out:
974 return err < 0 ? err : count;
975}
976
977static const struct file_operations proc_oom_adj_operations = {
978 .read = oom_adj_read,
979 .write = oom_adj_write,
980 .llseek = generic_file_llseek,
981};
982
876static ssize_t oom_score_adj_read(struct file *file, char __user *buf, 983static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
877 size_t count, loff_t *ppos) 984 size_t count, loff_t *ppos)
878{ 985{
@@ -2598,6 +2705,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2598 REG("cgroup", S_IRUGO, proc_cgroup_operations), 2705 REG("cgroup", S_IRUGO, proc_cgroup_operations),
2599#endif 2706#endif
2600 INF("oom_score", S_IRUGO, proc_oom_score), 2707 INF("oom_score", S_IRUGO, proc_oom_score),
2708 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
2601 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 2709 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
2602#ifdef CONFIG_AUDITSYSCALL 2710#ifdef CONFIG_AUDITSYSCALL
2603 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 2711 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
@@ -2964,6 +3072,7 @@ static const struct pid_entry tid_base_stuff[] = {
2964 REG("cgroup", S_IRUGO, proc_cgroup_operations), 3072 REG("cgroup", S_IRUGO, proc_cgroup_operations),
2965#endif 3073#endif
2966 INF("oom_score", S_IRUGO, proc_oom_score), 3074 INF("oom_score", S_IRUGO, proc_oom_score),
3075 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
2967 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 3076 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
2968#ifdef CONFIG_AUDITSYSCALL 3077#ifdef CONFIG_AUDITSYSCALL
2969 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 3078 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index a40da07e93d6..947fbe06c3b1 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -161,6 +161,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
161 161
162 while (s < e) { 162 while (s < e) {
163 unsigned long flags; 163 unsigned long flags;
164 u64 id;
164 165
165 if (c > psinfo->bufsize) 166 if (c > psinfo->bufsize)
166 c = psinfo->bufsize; 167 c = psinfo->bufsize;
@@ -172,7 +173,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
172 spin_lock_irqsave(&psinfo->buf_lock, flags); 173 spin_lock_irqsave(&psinfo->buf_lock, flags);
173 } 174 }
174 memcpy(psinfo->buf, s, c); 175 memcpy(psinfo->buf, s, c);
175 psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo); 176 psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, c, psinfo);
176 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 177 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
177 s += c; 178 s += c;
178 c = e - s; 179 c = e - s;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index f27f01a98aa2..d83736fbc26c 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1782,8 +1782,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1782 1782
1783 BUG_ON(!th->t_trans_id); 1783 BUG_ON(!th->t_trans_id);
1784 1784
1785 dquot_initialize(inode); 1785 reiserfs_write_unlock(inode->i_sb);
1786 err = dquot_alloc_inode(inode); 1786 err = dquot_alloc_inode(inode);
1787 reiserfs_write_lock(inode->i_sb);
1787 if (err) 1788 if (err)
1788 goto out_end_trans; 1789 goto out_end_trans;
1789 if (!dir->i_nlink) { 1790 if (!dir->i_nlink) {
@@ -1979,8 +1980,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1979 1980
1980 out_end_trans: 1981 out_end_trans:
1981 journal_end(th, th->t_super, th->t_blocks_allocated); 1982 journal_end(th, th->t_super, th->t_blocks_allocated);
1983 reiserfs_write_unlock(inode->i_sb);
1982 /* Drop can be outside and it needs more credits so it's better to have it outside */ 1984 /* Drop can be outside and it needs more credits so it's better to have it outside */
1983 dquot_drop(inode); 1985 dquot_drop(inode);
1986 reiserfs_write_lock(inode->i_sb);
1984 inode->i_flags |= S_NOQUOTA; 1987 inode->i_flags |= S_NOQUOTA;
1985 make_bad_inode(inode); 1988 make_bad_inode(inode);
1986 1989
@@ -3103,10 +3106,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3103 /* must be turned off for recursive notify_change calls */ 3106 /* must be turned off for recursive notify_change calls */
3104 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); 3107 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
3105 3108
3106 depth = reiserfs_write_lock_once(inode->i_sb);
3107 if (is_quota_modification(inode, attr)) 3109 if (is_quota_modification(inode, attr))
3108 dquot_initialize(inode); 3110 dquot_initialize(inode);
3109 3111 depth = reiserfs_write_lock_once(inode->i_sb);
3110 if (attr->ia_valid & ATTR_SIZE) { 3112 if (attr->ia_valid & ATTR_SIZE) {
3111 /* version 2 items will be caught by the s_maxbytes check 3113 /* version 2 items will be caught by the s_maxbytes check
3112 ** done for us in vmtruncate 3114 ** done for us in vmtruncate
@@ -3170,7 +3172,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3170 error = journal_begin(&th, inode->i_sb, jbegin_count); 3172 error = journal_begin(&th, inode->i_sb, jbegin_count);
3171 if (error) 3173 if (error)
3172 goto out; 3174 goto out;
3175 reiserfs_write_unlock_once(inode->i_sb, depth);
3173 error = dquot_transfer(inode, attr); 3176 error = dquot_transfer(inode, attr);
3177 depth = reiserfs_write_lock_once(inode->i_sb);
3174 if (error) { 3178 if (error) {
3175 journal_end(&th, inode->i_sb, jbegin_count); 3179 journal_end(&th, inode->i_sb, jbegin_count);
3176 goto out; 3180 goto out;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index f8afa4b162b8..2f40a4c70a4d 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
1968 key2type(&(key->on_disk_key))); 1968 key2type(&(key->on_disk_key)));
1969#endif 1969#endif
1970 1970
1971 reiserfs_write_unlock(inode->i_sb);
1971 retval = dquot_alloc_space_nodirty(inode, pasted_size); 1972 retval = dquot_alloc_space_nodirty(inode, pasted_size);
1973 reiserfs_write_lock(inode->i_sb);
1972 if (retval) { 1974 if (retval) {
1973 pathrelse(search_path); 1975 pathrelse(search_path);
1974 return retval; 1976 return retval;
@@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
2061 "reiserquota insert_item(): allocating %u id=%u type=%c", 2063 "reiserquota insert_item(): allocating %u id=%u type=%c",
2062 quota_bytes, inode->i_uid, head2type(ih)); 2064 quota_bytes, inode->i_uid, head2type(ih));
2063#endif 2065#endif
2066 reiserfs_write_unlock(inode->i_sb);
2064 /* We can't dirty inode here. It would be immediately written but 2067 /* We can't dirty inode here. It would be immediately written but
2065 * appropriate stat item isn't inserted yet... */ 2068 * appropriate stat item isn't inserted yet... */
2066 retval = dquot_alloc_space_nodirty(inode, quota_bytes); 2069 retval = dquot_alloc_space_nodirty(inode, quota_bytes);
2070 reiserfs_write_lock(inode->i_sb);
2067 if (retval) { 2071 if (retval) {
2068 pathrelse(path); 2072 pathrelse(path);
2069 return retval; 2073 return retval;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 1078ae179993..418bdc3a57da 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -298,7 +298,9 @@ static int finish_unfinished(struct super_block *s)
298 retval = remove_save_link_only(s, &save_link_key, 0); 298 retval = remove_save_link_only(s, &save_link_key, 0);
299 continue; 299 continue;
300 } 300 }
301 reiserfs_write_unlock(s);
301 dquot_initialize(inode); 302 dquot_initialize(inode);
303 reiserfs_write_lock(s);
302 304
303 if (truncate && S_ISDIR(inode->i_mode)) { 305 if (truncate && S_ISDIR(inode->i_mode)) {
304 /* We got a truncate request for a dir which is impossible. 306 /* We got a truncate request for a dir which is impossible.
@@ -1335,7 +1337,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1335 kfree(qf_names[i]); 1337 kfree(qf_names[i]);
1336#endif 1338#endif
1337 err = -EINVAL; 1339 err = -EINVAL;
1338 goto out_err; 1340 goto out_unlock;
1339 } 1341 }
1340#ifdef CONFIG_QUOTA 1342#ifdef CONFIG_QUOTA
1341 handle_quota_files(s, qf_names, &qfmt); 1343 handle_quota_files(s, qf_names, &qfmt);
@@ -1379,7 +1381,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1379 if (blocks) { 1381 if (blocks) {
1380 err = reiserfs_resize(s, blocks); 1382 err = reiserfs_resize(s, blocks);
1381 if (err != 0) 1383 if (err != 0)
1382 goto out_err; 1384 goto out_unlock;
1383 } 1385 }
1384 1386
1385 if (*mount_flags & MS_RDONLY) { 1387 if (*mount_flags & MS_RDONLY) {
@@ -1389,9 +1391,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1389 /* it is read-only already */ 1391 /* it is read-only already */
1390 goto out_ok; 1392 goto out_ok;
1391 1393
1394 /*
1395 * Drop write lock. Quota will retake it when needed and lock
1396 * ordering requires calling dquot_suspend() without it.
1397 */
1398 reiserfs_write_unlock(s);
1392 err = dquot_suspend(s, -1); 1399 err = dquot_suspend(s, -1);
1393 if (err < 0) 1400 if (err < 0)
1394 goto out_err; 1401 goto out_err;
1402 reiserfs_write_lock(s);
1395 1403
1396 /* try to remount file system with read-only permissions */ 1404 /* try to remount file system with read-only permissions */
1397 if (sb_umount_state(rs) == REISERFS_VALID_FS 1405 if (sb_umount_state(rs) == REISERFS_VALID_FS
@@ -1401,7 +1409,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1401 1409
1402 err = journal_begin(&th, s, 10); 1410 err = journal_begin(&th, s, 10);
1403 if (err) 1411 if (err)
1404 goto out_err; 1412 goto out_unlock;
1405 1413
1406 /* Mounting a rw partition read-only. */ 1414 /* Mounting a rw partition read-only. */
1407 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 1415 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1416,7 +1424,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1416 1424
1417 if (reiserfs_is_journal_aborted(journal)) { 1425 if (reiserfs_is_journal_aborted(journal)) {
1418 err = journal->j_errno; 1426 err = journal->j_errno;
1419 goto out_err; 1427 goto out_unlock;
1420 } 1428 }
1421 1429
1422 handle_data_mode(s, mount_options); 1430 handle_data_mode(s, mount_options);
@@ -1425,7 +1433,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1425 s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ 1433 s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
1426 err = journal_begin(&th, s, 10); 1434 err = journal_begin(&th, s, 10);
1427 if (err) 1435 if (err)
1428 goto out_err; 1436 goto out_unlock;
1429 1437
1430 /* Mount a partition which is read-only, read-write */ 1438 /* Mount a partition which is read-only, read-write */
1431 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); 1439 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1442,10 +1450,16 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1442 SB_JOURNAL(s)->j_must_wait = 1; 1450 SB_JOURNAL(s)->j_must_wait = 1;
1443 err = journal_end(&th, s, 10); 1451 err = journal_end(&th, s, 10);
1444 if (err) 1452 if (err)
1445 goto out_err; 1453 goto out_unlock;
1446 1454
1447 if (!(*mount_flags & MS_RDONLY)) { 1455 if (!(*mount_flags & MS_RDONLY)) {
1456 /*
1457 * Drop write lock. Quota will retake it when needed and lock
1458 * ordering requires calling dquot_resume() without it.
1459 */
1460 reiserfs_write_unlock(s);
1448 dquot_resume(s, -1); 1461 dquot_resume(s, -1);
1462 reiserfs_write_lock(s);
1449 finish_unfinished(s); 1463 finish_unfinished(s);
1450 reiserfs_xattr_init(s, *mount_flags); 1464 reiserfs_xattr_init(s, *mount_flags);
1451 } 1465 }
@@ -1455,9 +1469,10 @@ out_ok:
1455 reiserfs_write_unlock(s); 1469 reiserfs_write_unlock(s);
1456 return 0; 1470 return 0;
1457 1471
1472out_unlock:
1473 reiserfs_write_unlock(s);
1458out_err: 1474out_err:
1459 kfree(new_opts); 1475 kfree(new_opts);
1460 reiserfs_write_unlock(s);
1461 return err; 1476 return err;
1462} 1477}
1463 1478
@@ -2095,13 +2110,15 @@ static int reiserfs_write_dquot(struct dquot *dquot)
2095 REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 2110 REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
2096 if (ret) 2111 if (ret)
2097 goto out; 2112 goto out;
2113 reiserfs_write_unlock(dquot->dq_sb);
2098 ret = dquot_commit(dquot); 2114 ret = dquot_commit(dquot);
2115 reiserfs_write_lock(dquot->dq_sb);
2099 err = 2116 err =
2100 journal_end(&th, dquot->dq_sb, 2117 journal_end(&th, dquot->dq_sb,
2101 REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 2118 REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
2102 if (!ret && err) 2119 if (!ret && err)
2103 ret = err; 2120 ret = err;
2104 out: 2121out:
2105 reiserfs_write_unlock(dquot->dq_sb); 2122 reiserfs_write_unlock(dquot->dq_sb);
2106 return ret; 2123 return ret;
2107} 2124}
@@ -2117,13 +2134,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
2117 REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 2134 REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
2118 if (ret) 2135 if (ret)
2119 goto out; 2136 goto out;
2137 reiserfs_write_unlock(dquot->dq_sb);
2120 ret = dquot_acquire(dquot); 2138 ret = dquot_acquire(dquot);
2139 reiserfs_write_lock(dquot->dq_sb);
2121 err = 2140 err =
2122 journal_end(&th, dquot->dq_sb, 2141 journal_end(&th, dquot->dq_sb,
2123 REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 2142 REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
2124 if (!ret && err) 2143 if (!ret && err)
2125 ret = err; 2144 ret = err;
2126 out: 2145out:
2127 reiserfs_write_unlock(dquot->dq_sb); 2146 reiserfs_write_unlock(dquot->dq_sb);
2128 return ret; 2147 return ret;
2129} 2148}
@@ -2137,19 +2156,21 @@ static int reiserfs_release_dquot(struct dquot *dquot)
2137 ret = 2156 ret =
2138 journal_begin(&th, dquot->dq_sb, 2157 journal_begin(&th, dquot->dq_sb,
2139 REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 2158 REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
2159 reiserfs_write_unlock(dquot->dq_sb);
2140 if (ret) { 2160 if (ret) {
2141 /* Release dquot anyway to avoid endless cycle in dqput() */ 2161 /* Release dquot anyway to avoid endless cycle in dqput() */
2142 dquot_release(dquot); 2162 dquot_release(dquot);
2143 goto out; 2163 goto out;
2144 } 2164 }
2145 ret = dquot_release(dquot); 2165 ret = dquot_release(dquot);
2166 reiserfs_write_lock(dquot->dq_sb);
2146 err = 2167 err =
2147 journal_end(&th, dquot->dq_sb, 2168 journal_end(&th, dquot->dq_sb,
2148 REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 2169 REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
2149 if (!ret && err) 2170 if (!ret && err)
2150 ret = err; 2171 ret = err;
2151 out:
2152 reiserfs_write_unlock(dquot->dq_sb); 2172 reiserfs_write_unlock(dquot->dq_sb);
2173out:
2153 return ret; 2174 return ret;
2154} 2175}
2155 2176
@@ -2174,11 +2195,13 @@ static int reiserfs_write_info(struct super_block *sb, int type)
2174 ret = journal_begin(&th, sb, 2); 2195 ret = journal_begin(&th, sb, 2);
2175 if (ret) 2196 if (ret)
2176 goto out; 2197 goto out;
2198 reiserfs_write_unlock(sb);
2177 ret = dquot_commit_info(sb, type); 2199 ret = dquot_commit_info(sb, type);
2200 reiserfs_write_lock(sb);
2178 err = journal_end(&th, sb, 2); 2201 err = journal_end(&th, sb, 2);
2179 if (!ret && err) 2202 if (!ret && err)
2180 ret = err; 2203 ret = err;
2181 out: 2204out:
2182 reiserfs_write_unlock(sb); 2205 reiserfs_write_unlock(sb);
2183 return ret; 2206 return ret;
2184} 2207}
@@ -2203,8 +2226,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
2203 struct reiserfs_transaction_handle th; 2226 struct reiserfs_transaction_handle th;
2204 int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA; 2227 int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA;
2205 2228
2206 if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) 2229 reiserfs_write_lock(sb);
2207 return -EINVAL; 2230 if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) {
2231 err = -EINVAL;
2232 goto out;
2233 }
2208 2234
2209 /* Quotafile not on the same filesystem? */ 2235 /* Quotafile not on the same filesystem? */
2210 if (path->dentry->d_sb != sb) { 2236 if (path->dentry->d_sb != sb) {
@@ -2246,8 +2272,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
2246 if (err) 2272 if (err)
2247 goto out; 2273 goto out;
2248 } 2274 }
2249 err = dquot_quota_on(sb, type, format_id, path); 2275 reiserfs_write_unlock(sb);
2276 return dquot_quota_on(sb, type, format_id, path);
2250out: 2277out:
2278 reiserfs_write_unlock(sb);
2251 return err; 2279 return err;
2252} 2280}
2253 2281
@@ -2320,7 +2348,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
2320 tocopy = sb->s_blocksize - offset < towrite ? 2348 tocopy = sb->s_blocksize - offset < towrite ?
2321 sb->s_blocksize - offset : towrite; 2349 sb->s_blocksize - offset : towrite;
2322 tmp_bh.b_state = 0; 2350 tmp_bh.b_state = 0;
2351 reiserfs_write_lock(sb);
2323 err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); 2352 err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
2353 reiserfs_write_unlock(sb);
2324 if (err) 2354 if (err)
2325 goto out; 2355 goto out;
2326 if (offset || tocopy != sb->s_blocksize) 2356 if (offset || tocopy != sb->s_blocksize)
@@ -2336,10 +2366,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
2336 flush_dcache_page(bh->b_page); 2366 flush_dcache_page(bh->b_page);
2337 set_buffer_uptodate(bh); 2367 set_buffer_uptodate(bh);
2338 unlock_buffer(bh); 2368 unlock_buffer(bh);
2369 reiserfs_write_lock(sb);
2339 reiserfs_prepare_for_journal(sb, bh, 1); 2370 reiserfs_prepare_for_journal(sb, bh, 1);
2340 journal_mark_dirty(current->journal_info, sb, bh); 2371 journal_mark_dirty(current->journal_info, sb, bh);
2341 if (!journal_quota) 2372 if (!journal_quota)
2342 reiserfs_add_ordered_list(inode, bh); 2373 reiserfs_add_ordered_list(inode, bh);
2374 reiserfs_write_unlock(sb);
2343 brelse(bh); 2375 brelse(bh);
2344 offset = 0; 2376 offset = 0;
2345 towrite -= tocopy; 2377 towrite -= tocopy;
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 28ec13af28d9..2dcf3d473fec 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c)
681 if (!lprops) { 681 if (!lprops) {
682 lprops = ubifs_fast_find_freeable(c); 682 lprops = ubifs_fast_find_freeable(c);
683 if (!lprops) { 683 if (!lprops) {
684 ubifs_assert(c->freeable_cnt == 0); 684 /*
685 if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { 685 * The first condition means the following: go scan the
686 * LPT if there are uncategorized lprops, which means
687 * there may be freeable LEBs there (UBIFS does not
688 * store the information about freeable LEBs in the
689 * master node).
690 */
691 if (c->in_a_category_cnt != c->main_lebs ||
692 c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
693 ubifs_assert(c->freeable_cnt == 0);
686 lprops = scan_for_leb_for_idx(c); 694 lprops = scan_for_leb_for_idx(c);
687 if (IS_ERR(lprops)) { 695 if (IS_ERR(lprops)) {
688 err = PTR_ERR(lprops); 696 err = PTR_ERR(lprops);
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index e5a2a35a46dc..46190a7c42a6 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
300 default: 300 default:
301 ubifs_assert(0); 301 ubifs_assert(0);
302 } 302 }
303
303 lprops->flags &= ~LPROPS_CAT_MASK; 304 lprops->flags &= ~LPROPS_CAT_MASK;
304 lprops->flags |= cat; 305 lprops->flags |= cat;
306 c->in_a_category_cnt += 1;
307 ubifs_assert(c->in_a_category_cnt <= c->main_lebs);
305} 308}
306 309
307/** 310/**
@@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c,
334 default: 337 default:
335 ubifs_assert(0); 338 ubifs_assert(0);
336 } 339 }
340
341 c->in_a_category_cnt -= 1;
342 ubifs_assert(c->in_a_category_cnt >= 0);
337} 343}
338 344
339/** 345/**
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 5486346d0a3f..d133c276fe05 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1183,6 +1183,8 @@ struct ubifs_debug_info;
1183 * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size) 1183 * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size)
1184 * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size) 1184 * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size)
1185 * @freeable_cnt: number of freeable LEBs in @freeable_list 1185 * @freeable_cnt: number of freeable LEBs in @freeable_list
1186 * @in_a_category_cnt: count of lprops which are in a certain category, which
1187 * basically meants that they were loaded from the flash
1186 * 1188 *
1187 * @ltab_lnum: LEB number of LPT's own lprops table 1189 * @ltab_lnum: LEB number of LPT's own lprops table
1188 * @ltab_offs: offset of LPT's own lprops table 1190 * @ltab_offs: offset of LPT's own lprops table
@@ -1412,6 +1414,7 @@ struct ubifs_info {
1412 struct list_head freeable_list; 1414 struct list_head freeable_list;
1413 struct list_head frdi_idx_list; 1415 struct list_head frdi_idx_list;
1414 int freeable_cnt; 1416 int freeable_cnt;
1417 int in_a_category_cnt;
1415 1418
1416 int ltab_lnum; 1419 int ltab_lnum;
1417 int ltab_offs; 1420 int ltab_offs;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index e562dd43f41f..e57e2daa357c 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -481,11 +481,17 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
481 * 481 *
482 * The fix is two passes across the ioend list - one to start writeback on the 482 * The fix is two passes across the ioend list - one to start writeback on the
483 * buffer_heads, and then submit them for I/O on the second pass. 483 * buffer_heads, and then submit them for I/O on the second pass.
484 *
485 * If @fail is non-zero, it means that we have a situation where some part of
486 * the submission process has failed after we have marked paged for writeback
487 * and unlocked them. In this situation, we need to fail the ioend chain rather
488 * than submit it to IO. This typically only happens on a filesystem shutdown.
484 */ 489 */
485STATIC void 490STATIC void
486xfs_submit_ioend( 491xfs_submit_ioend(
487 struct writeback_control *wbc, 492 struct writeback_control *wbc,
488 xfs_ioend_t *ioend) 493 xfs_ioend_t *ioend,
494 int fail)
489{ 495{
490 xfs_ioend_t *head = ioend; 496 xfs_ioend_t *head = ioend;
491 xfs_ioend_t *next; 497 xfs_ioend_t *next;
@@ -506,6 +512,18 @@ xfs_submit_ioend(
506 next = ioend->io_list; 512 next = ioend->io_list;
507 bio = NULL; 513 bio = NULL;
508 514
515 /*
516 * If we are failing the IO now, just mark the ioend with an
517 * error and finish it. This will run IO completion immediately
518 * as there is only one reference to the ioend at this point in
519 * time.
520 */
521 if (fail) {
522 ioend->io_error = -fail;
523 xfs_finish_ioend(ioend);
524 continue;
525 }
526
509 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { 527 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
510 528
511 if (!bio) { 529 if (!bio) {
@@ -1060,7 +1078,18 @@ xfs_vm_writepage(
1060 1078
1061 xfs_start_page_writeback(page, 1, count); 1079 xfs_start_page_writeback(page, 1, count);
1062 1080
1063 if (ioend && imap_valid) { 1081 /* if there is no IO to be submitted for this page, we are done */
1082 if (!ioend)
1083 return 0;
1084
1085 ASSERT(iohead);
1086
1087 /*
1088 * Any errors from this point onwards need tobe reported through the IO
1089 * completion path as we have marked the initial page as under writeback
1090 * and unlocked it.
1091 */
1092 if (imap_valid) {
1064 xfs_off_t end_index; 1093 xfs_off_t end_index;
1065 1094
1066 end_index = imap.br_startoff + imap.br_blockcount; 1095 end_index = imap.br_startoff + imap.br_blockcount;
@@ -1079,20 +1108,15 @@ xfs_vm_writepage(
1079 wbc, end_index); 1108 wbc, end_index);
1080 } 1109 }
1081 1110
1082 if (iohead) {
1083 /*
1084 * Reserve log space if we might write beyond the on-disk
1085 * inode size.
1086 */
1087 if (ioend->io_type != XFS_IO_UNWRITTEN &&
1088 xfs_ioend_is_append(ioend)) {
1089 err = xfs_setfilesize_trans_alloc(ioend);
1090 if (err)
1091 goto error;
1092 }
1093 1111
1094 xfs_submit_ioend(wbc, iohead); 1112 /*
1095 } 1113 * Reserve log space if we might write beyond the on-disk inode size.
1114 */
1115 err = 0;
1116 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1117 err = xfs_setfilesize_trans_alloc(ioend);
1118
1119 xfs_submit_ioend(wbc, iohead, err);
1096 1120
1097 return 0; 1121 return 0;
1098 1122
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index d330111ca738..70eec1829776 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -1291,6 +1291,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1291 leaf2 = blk2->bp->b_addr; 1291 leaf2 = blk2->bp->b_addr;
1292 ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1292 ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1293 ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 1293 ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1294 ASSERT(leaf2->hdr.count == 0);
1294 args = state->args; 1295 args = state->args;
1295 1296
1296 trace_xfs_attr_leaf_rebalance(args); 1297 trace_xfs_attr_leaf_rebalance(args);
@@ -1361,6 +1362,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1361 * I assert that since all callers pass in an empty 1362 * I assert that since all callers pass in an empty
1362 * second buffer, this code should never execute. 1363 * second buffer, this code should never execute.
1363 */ 1364 */
1365 ASSERT(0);
1364 1366
1365 /* 1367 /*
1366 * Figure the total bytes to be added to the destination leaf. 1368 * Figure the total bytes to be added to the destination leaf.
@@ -1422,10 +1424,24 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
1422 args->index2 = 0; 1424 args->index2 = 0;
1423 args->blkno2 = blk2->blkno; 1425 args->blkno2 = blk2->blkno;
1424 } else { 1426 } else {
1427 /*
1428 * On a double leaf split, the original attr location
1429 * is already stored in blkno2/index2, so don't
1430 * overwrite it overwise we corrupt the tree.
1431 */
1425 blk2->index = blk1->index 1432 blk2->index = blk1->index
1426 - be16_to_cpu(leaf1->hdr.count); 1433 - be16_to_cpu(leaf1->hdr.count);
1427 args->index = args->index2 = blk2->index; 1434 args->index = blk2->index;
1428 args->blkno = args->blkno2 = blk2->blkno; 1435 args->blkno = blk2->blkno;
1436 if (!state->extravalid) {
1437 /*
1438 * set the new attr location to match the old
1439 * one and let the higher level split code
1440 * decide where in the leaf to place it.
1441 */
1442 args->index2 = blk2->index;
1443 args->blkno2 = blk2->blkno;
1444 }
1429 } 1445 }
1430 } else { 1446 } else {
1431 ASSERT(state->inleaf == 1); 1447 ASSERT(state->inleaf == 1);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 933b7930b863..4b0b8dd1b7b0 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1197,9 +1197,14 @@ xfs_buf_bio_end_io(
1197{ 1197{
1198 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1198 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1199 1199
1200 xfs_buf_ioerror(bp, -error); 1200 /*
1201 * don't overwrite existing errors - otherwise we can lose errors on
1202 * buffers that require multiple bios to complete.
1203 */
1204 if (!bp->b_error)
1205 xfs_buf_ioerror(bp, -error);
1201 1206
1202 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1207 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1203 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1208 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1204 1209
1205 _xfs_buf_ioend(bp, 1); 1210 _xfs_buf_ioend(bp, 1);
@@ -1279,6 +1284,11 @@ next_chunk:
1279 if (size) 1284 if (size)
1280 goto next_chunk; 1285 goto next_chunk;
1281 } else { 1286 } else {
1287 /*
1288 * This is guaranteed not to be the last io reference count
1289 * because the caller (xfs_buf_iorequest) holds a count itself.
1290 */
1291 atomic_dec(&bp->b_io_remaining);
1282 xfs_buf_ioerror(bp, EIO); 1292 xfs_buf_ioerror(bp, EIO);
1283 bio_put(bio); 1293 bio_put(bio);
1284 } 1294 }