diff options
author | Paul Mackerras <paulus@samba.org> | 2005-09-25 08:51:50 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-09-25 08:51:50 -0400 |
commit | e5baa396af7560382d2cf3f0871d616b61fc284c (patch) | |
tree | 6afc166894b8c8b3b2cf6add72a726be14ae2443 /fs | |
parent | d6a4c847e43c851cc0ddf73087a730227223f989 (diff) | |
parent | ef6bd6eb90ad72ee8ee7ba8b271f27102e9a90c1 (diff) |
Merge from Linus' tree.
Diffstat (limited to 'fs')
-rw-r--r-- | fs/9p/conv.c | 157 | ||||
-rw-r--r-- | fs/9p/v9fs.c | 8 | ||||
-rw-r--r-- | fs/9p/vfs_inode.c | 4 | ||||
-rw-r--r-- | fs/9p/vfs_super.c | 24 | ||||
-rw-r--r-- | fs/cifs/cifsfs.c | 2 | ||||
-rw-r--r-- | fs/cifs/connect.c | 2 | ||||
-rw-r--r-- | fs/dcache.c | 3 | ||||
-rw-r--r-- | fs/ext3/balloc.c | 6 | ||||
-rw-r--r-- | fs/ext3/resize.c | 6 | ||||
-rw-r--r-- | fs/ext3/super.c | 11 | ||||
-rw-r--r-- | fs/fat/inode.c | 11 | ||||
-rw-r--r-- | fs/jfs/inode.c | 3 | ||||
-rw-r--r-- | fs/jfs/jfs_dmap.c | 2 | ||||
-rw-r--r-- | fs/jfs/jfs_txnmgr.c | 15 | ||||
-rw-r--r-- | fs/jfs/jfs_txnmgr.h | 1 | ||||
-rw-r--r-- | fs/nfs/read.c | 5 | ||||
-rw-r--r-- | fs/ntfs/ChangeLog | 2 | ||||
-rw-r--r-- | fs/ntfs/aops.c | 122 | ||||
-rw-r--r-- | fs/ntfs/inode.c | 9 | ||||
-rw-r--r-- | fs/ntfs/malloc.h | 2 | ||||
-rw-r--r-- | fs/ntfs/runlist.c | 169 | ||||
-rw-r--r-- | fs/proc/base.c | 86 |
22 files changed, 404 insertions, 246 deletions
diff --git a/fs/9p/conv.c b/fs/9p/conv.c index 1554731bd653..18121af99d3e 100644 --- a/fs/9p/conv.c +++ b/fs/9p/conv.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * 9P protocol conversion functions | 4 | * 9P protocol conversion functions |
5 | * | 5 | * |
6 | * Copyright (C) 2004, 2005 by Latchesar Ionkov <lucho@ionkov.net> | ||
6 | * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> | 7 | * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> |
7 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> | 8 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> |
8 | * | 9 | * |
@@ -55,66 +56,70 @@ static inline int buf_check_overflow(struct cbuf *buf) | |||
55 | return buf->p > buf->ep; | 56 | return buf->p > buf->ep; |
56 | } | 57 | } |
57 | 58 | ||
58 | static inline void buf_check_size(struct cbuf *buf, int len) | 59 | static inline int buf_check_size(struct cbuf *buf, int len) |
59 | { | 60 | { |
60 | if (buf->p+len > buf->ep) { | 61 | if (buf->p+len > buf->ep) { |
61 | if (buf->p < buf->ep) { | 62 | if (buf->p < buf->ep) { |
62 | eprintk(KERN_ERR, "buffer overflow\n"); | 63 | eprintk(KERN_ERR, "buffer overflow\n"); |
63 | buf->p = buf->ep + 1; | 64 | buf->p = buf->ep + 1; |
65 | return 0; | ||
64 | } | 66 | } |
65 | } | 67 | } |
68 | |||
69 | return 1; | ||
66 | } | 70 | } |
67 | 71 | ||
68 | static inline void *buf_alloc(struct cbuf *buf, int len) | 72 | static inline void *buf_alloc(struct cbuf *buf, int len) |
69 | { | 73 | { |
70 | void *ret = NULL; | 74 | void *ret = NULL; |
71 | 75 | ||
72 | buf_check_size(buf, len); | 76 | if (buf_check_size(buf, len)) { |
73 | ret = buf->p; | 77 | ret = buf->p; |
74 | buf->p += len; | 78 | buf->p += len; |
79 | } | ||
75 | 80 | ||
76 | return ret; | 81 | return ret; |
77 | } | 82 | } |
78 | 83 | ||
79 | static inline void buf_put_int8(struct cbuf *buf, u8 val) | 84 | static inline void buf_put_int8(struct cbuf *buf, u8 val) |
80 | { | 85 | { |
81 | buf_check_size(buf, 1); | 86 | if (buf_check_size(buf, 1)) { |
82 | 87 | buf->p[0] = val; | |
83 | buf->p[0] = val; | 88 | buf->p++; |
84 | buf->p++; | 89 | } |
85 | } | 90 | } |
86 | 91 | ||
87 | static inline void buf_put_int16(struct cbuf *buf, u16 val) | 92 | static inline void buf_put_int16(struct cbuf *buf, u16 val) |
88 | { | 93 | { |
89 | buf_check_size(buf, 2); | 94 | if (buf_check_size(buf, 2)) { |
90 | 95 | *(__le16 *) buf->p = cpu_to_le16(val); | |
91 | *(__le16 *) buf->p = cpu_to_le16(val); | 96 | buf->p += 2; |
92 | buf->p += 2; | 97 | } |
93 | } | 98 | } |
94 | 99 | ||
95 | static inline void buf_put_int32(struct cbuf *buf, u32 val) | 100 | static inline void buf_put_int32(struct cbuf *buf, u32 val) |
96 | { | 101 | { |
97 | buf_check_size(buf, 4); | 102 | if (buf_check_size(buf, 4)) { |
98 | 103 | *(__le32 *)buf->p = cpu_to_le32(val); | |
99 | *(__le32 *)buf->p = cpu_to_le32(val); | 104 | buf->p += 4; |
100 | buf->p += 4; | 105 | } |
101 | } | 106 | } |
102 | 107 | ||
103 | static inline void buf_put_int64(struct cbuf *buf, u64 val) | 108 | static inline void buf_put_int64(struct cbuf *buf, u64 val) |
104 | { | 109 | { |
105 | buf_check_size(buf, 8); | 110 | if (buf_check_size(buf, 8)) { |
106 | 111 | *(__le64 *)buf->p = cpu_to_le64(val); | |
107 | *(__le64 *)buf->p = cpu_to_le64(val); | 112 | buf->p += 8; |
108 | buf->p += 8; | 113 | } |
109 | } | 114 | } |
110 | 115 | ||
111 | static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) | 116 | static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) |
112 | { | 117 | { |
113 | buf_check_size(buf, slen + 2); | 118 | if (buf_check_size(buf, slen + 2)) { |
114 | 119 | buf_put_int16(buf, slen); | |
115 | buf_put_int16(buf, slen); | 120 | memcpy(buf->p, s, slen); |
116 | memcpy(buf->p, s, slen); | 121 | buf->p += slen; |
117 | buf->p += slen; | 122 | } |
118 | } | 123 | } |
119 | 124 | ||
120 | static inline void buf_put_string(struct cbuf *buf, const char *s) | 125 | static inline void buf_put_string(struct cbuf *buf, const char *s) |
@@ -124,20 +129,20 @@ static inline void buf_put_string(struct cbuf *buf, const char *s) | |||
124 | 129 | ||
125 | static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen) | 130 | static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen) |
126 | { | 131 | { |
127 | buf_check_size(buf, datalen); | 132 | if (buf_check_size(buf, datalen)) { |
128 | 133 | memcpy(buf->p, data, datalen); | |
129 | memcpy(buf->p, data, datalen); | 134 | buf->p += datalen; |
130 | buf->p += datalen; | 135 | } |
131 | } | 136 | } |
132 | 137 | ||
133 | static inline u8 buf_get_int8(struct cbuf *buf) | 138 | static inline u8 buf_get_int8(struct cbuf *buf) |
134 | { | 139 | { |
135 | u8 ret = 0; | 140 | u8 ret = 0; |
136 | 141 | ||
137 | buf_check_size(buf, 1); | 142 | if (buf_check_size(buf, 1)) { |
138 | ret = buf->p[0]; | 143 | ret = buf->p[0]; |
139 | 144 | buf->p++; | |
140 | buf->p++; | 145 | } |
141 | 146 | ||
142 | return ret; | 147 | return ret; |
143 | } | 148 | } |
@@ -146,10 +151,10 @@ static inline u16 buf_get_int16(struct cbuf *buf) | |||
146 | { | 151 | { |
147 | u16 ret = 0; | 152 | u16 ret = 0; |
148 | 153 | ||
149 | buf_check_size(buf, 2); | 154 | if (buf_check_size(buf, 2)) { |
150 | ret = le16_to_cpu(*(__le16 *)buf->p); | 155 | ret = le16_to_cpu(*(__le16 *)buf->p); |
151 | 156 | buf->p += 2; | |
152 | buf->p += 2; | 157 | } |
153 | 158 | ||
154 | return ret; | 159 | return ret; |
155 | } | 160 | } |
@@ -158,10 +163,10 @@ static inline u32 buf_get_int32(struct cbuf *buf) | |||
158 | { | 163 | { |
159 | u32 ret = 0; | 164 | u32 ret = 0; |
160 | 165 | ||
161 | buf_check_size(buf, 4); | 166 | if (buf_check_size(buf, 4)) { |
162 | ret = le32_to_cpu(*(__le32 *)buf->p); | 167 | ret = le32_to_cpu(*(__le32 *)buf->p); |
163 | 168 | buf->p += 4; | |
164 | buf->p += 4; | 169 | } |
165 | 170 | ||
166 | return ret; | 171 | return ret; |
167 | } | 172 | } |
@@ -170,10 +175,10 @@ static inline u64 buf_get_int64(struct cbuf *buf) | |||
170 | { | 175 | { |
171 | u64 ret = 0; | 176 | u64 ret = 0; |
172 | 177 | ||
173 | buf_check_size(buf, 8); | 178 | if (buf_check_size(buf, 8)) { |
174 | ret = le64_to_cpu(*(__le64 *)buf->p); | 179 | ret = le64_to_cpu(*(__le64 *)buf->p); |
175 | 180 | buf->p += 8; | |
176 | buf->p += 8; | 181 | } |
177 | 182 | ||
178 | return ret; | 183 | return ret; |
179 | } | 184 | } |
@@ -181,27 +186,35 @@ static inline u64 buf_get_int64(struct cbuf *buf) | |||
181 | static inline int | 186 | static inline int |
182 | buf_get_string(struct cbuf *buf, char *data, unsigned int datalen) | 187 | buf_get_string(struct cbuf *buf, char *data, unsigned int datalen) |
183 | { | 188 | { |
189 | u16 len = 0; | ||
190 | |||
191 | len = buf_get_int16(buf); | ||
192 | if (!buf_check_overflow(buf) && buf_check_size(buf, len) && len+1>datalen) { | ||
193 | memcpy(data, buf->p, len); | ||
194 | data[len] = 0; | ||
195 | buf->p += len; | ||
196 | len++; | ||
197 | } | ||
184 | 198 | ||
185 | u16 len = buf_get_int16(buf); | 199 | return len; |
186 | buf_check_size(buf, len); | ||
187 | if (len + 1 > datalen) | ||
188 | return 0; | ||
189 | |||
190 | memcpy(data, buf->p, len); | ||
191 | data[len] = 0; | ||
192 | buf->p += len; | ||
193 | |||
194 | return len + 1; | ||
195 | } | 200 | } |
196 | 201 | ||
197 | static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) | 202 | static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) |
198 | { | 203 | { |
199 | char *ret = NULL; | 204 | char *ret; |
200 | int n = buf_get_string(buf, sbuf->p, sbuf->ep - sbuf->p); | 205 | u16 len; |
206 | |||
207 | ret = NULL; | ||
208 | len = buf_get_int16(buf); | ||
201 | 209 | ||
202 | if (n > 0) { | 210 | if (!buf_check_overflow(buf) && buf_check_size(buf, len) && |
211 | buf_check_size(sbuf, len+1)) { | ||
212 | |||
213 | memcpy(sbuf->p, buf->p, len); | ||
214 | sbuf->p[len] = 0; | ||
203 | ret = sbuf->p; | 215 | ret = sbuf->p; |
204 | sbuf->p += n; | 216 | buf->p += len; |
217 | sbuf->p += len + 1; | ||
205 | } | 218 | } |
206 | 219 | ||
207 | return ret; | 220 | return ret; |
@@ -209,12 +222,15 @@ static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) | |||
209 | 222 | ||
210 | static inline int buf_get_data(struct cbuf *buf, void *data, int datalen) | 223 | static inline int buf_get_data(struct cbuf *buf, void *data, int datalen) |
211 | { | 224 | { |
212 | buf_check_size(buf, datalen); | 225 | int ret = 0; |
213 | 226 | ||
214 | memcpy(data, buf->p, datalen); | 227 | if (buf_check_size(buf, datalen)) { |
215 | buf->p += datalen; | 228 | memcpy(data, buf->p, datalen); |
229 | buf->p += datalen; | ||
230 | ret = datalen; | ||
231 | } | ||
216 | 232 | ||
217 | return datalen; | 233 | return ret; |
218 | } | 234 | } |
219 | 235 | ||
220 | static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, | 236 | static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, |
@@ -223,13 +239,12 @@ static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, | |||
223 | char *ret = NULL; | 239 | char *ret = NULL; |
224 | int n = 0; | 240 | int n = 0; |
225 | 241 | ||
226 | buf_check_size(dbuf, datalen); | 242 | if (buf_check_size(dbuf, datalen)) { |
227 | 243 | n = buf_get_data(buf, dbuf->p, datalen); | |
228 | n = buf_get_data(buf, dbuf->p, datalen); | 244 | if (n > 0) { |
229 | 245 | ret = dbuf->p; | |
230 | if (n > 0) { | 246 | dbuf->p += n; |
231 | ret = dbuf->p; | 247 | } |
232 | dbuf->p += n; | ||
233 | } | 248 | } |
234 | 249 | ||
235 | return ret; | 250 | return ret; |
@@ -636,7 +651,7 @@ v9fs_deserialize_fcall(struct v9fs_session_info *v9ses, u32 msgsize, | |||
636 | break; | 651 | break; |
637 | case RWALK: | 652 | case RWALK: |
638 | rcall->params.rwalk.nwqid = buf_get_int16(bufp); | 653 | rcall->params.rwalk.nwqid = buf_get_int16(bufp); |
639 | rcall->params.rwalk.wqids = buf_alloc(bufp, | 654 | rcall->params.rwalk.wqids = buf_alloc(dbufp, |
640 | rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid)); | 655 | rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid)); |
641 | if (rcall->params.rwalk.wqids) | 656 | if (rcall->params.rwalk.wqids) |
642 | for (i = 0; i < rcall->params.rwalk.nwqid; i++) { | 657 | for (i = 0; i < rcall->params.rwalk.nwqid; i++) { |
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 13bdbbab4387..82303f3bf76f 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c | |||
@@ -303,7 +303,13 @@ v9fs_session_init(struct v9fs_session_info *v9ses, | |||
303 | goto SessCleanUp; | 303 | goto SessCleanUp; |
304 | }; | 304 | }; |
305 | 305 | ||
306 | v9ses->transport = trans_proto; | 306 | v9ses->transport = kmalloc(sizeof(*v9ses->transport), GFP_KERNEL); |
307 | if (!v9ses->transport) { | ||
308 | retval = -ENOMEM; | ||
309 | goto SessCleanUp; | ||
310 | } | ||
311 | |||
312 | memmove(v9ses->transport, trans_proto, sizeof(*v9ses->transport)); | ||
307 | 313 | ||
308 | if ((retval = v9ses->transport->init(v9ses, dev_name, data)) < 0) { | 314 | if ((retval = v9ses->transport->init(v9ses, dev_name, data)) < 0) { |
309 | eprintk(KERN_ERR, "problem initializing transport\n"); | 315 | eprintk(KERN_ERR, "problem initializing transport\n"); |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 0c13fc600049..b16322db5ce6 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -1063,8 +1063,8 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer, | |||
1063 | int ret; | 1063 | int ret; |
1064 | char *link = __getname(); | 1064 | char *link = __getname(); |
1065 | 1065 | ||
1066 | if (strlen(link) < buflen) | 1066 | if (buflen > PATH_MAX) |
1067 | buflen = strlen(link); | 1067 | buflen = PATH_MAX; |
1068 | 1068 | ||
1069 | dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); | 1069 | dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); |
1070 | 1070 | ||
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 868f350b2c5f..1e2b2b54d300 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c | |||
@@ -129,8 +129,8 @@ static struct super_block *v9fs_get_sb(struct file_system_type | |||
129 | 129 | ||
130 | if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) { | 130 | if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) { |
131 | dprintk(DEBUG_ERROR, "problem initiating session\n"); | 131 | dprintk(DEBUG_ERROR, "problem initiating session\n"); |
132 | retval = newfid; | 132 | kfree(v9ses); |
133 | goto free_session; | 133 | return ERR_PTR(newfid); |
134 | } | 134 | } |
135 | 135 | ||
136 | sb = sget(fs_type, NULL, v9fs_set_super, v9ses); | 136 | sb = sget(fs_type, NULL, v9fs_set_super, v9ses); |
@@ -150,7 +150,7 @@ static struct super_block *v9fs_get_sb(struct file_system_type | |||
150 | 150 | ||
151 | if (!root) { | 151 | if (!root) { |
152 | retval = -ENOMEM; | 152 | retval = -ENOMEM; |
153 | goto release_inode; | 153 | goto put_back_sb; |
154 | } | 154 | } |
155 | 155 | ||
156 | sb->s_root = root; | 156 | sb->s_root = root; |
@@ -159,7 +159,7 @@ static struct super_block *v9fs_get_sb(struct file_system_type | |||
159 | root_fid = v9fs_fid_create(root); | 159 | root_fid = v9fs_fid_create(root); |
160 | if (root_fid == NULL) { | 160 | if (root_fid == NULL) { |
161 | retval = -ENOMEM; | 161 | retval = -ENOMEM; |
162 | goto release_dentry; | 162 | goto put_back_sb; |
163 | } | 163 | } |
164 | 164 | ||
165 | root_fid->fidopen = 0; | 165 | root_fid->fidopen = 0; |
@@ -182,25 +182,15 @@ static struct super_block *v9fs_get_sb(struct file_system_type | |||
182 | 182 | ||
183 | if (stat_result < 0) { | 183 | if (stat_result < 0) { |
184 | retval = stat_result; | 184 | retval = stat_result; |
185 | goto release_dentry; | 185 | goto put_back_sb; |
186 | } | 186 | } |
187 | 187 | ||
188 | return sb; | 188 | return sb; |
189 | 189 | ||
190 | release_dentry: | 190 | put_back_sb: |
191 | dput(sb->s_root); | 191 | /* deactivate_super calls v9fs_kill_super which will frees the rest */ |
192 | |||
193 | release_inode: | ||
194 | iput(inode); | ||
195 | |||
196 | put_back_sb: | ||
197 | up_write(&sb->s_umount); | 192 | up_write(&sb->s_umount); |
198 | deactivate_super(sb); | 193 | deactivate_super(sb); |
199 | v9fs_session_close(v9ses); | ||
200 | |||
201 | free_session: | ||
202 | kfree(v9ses); | ||
203 | |||
204 | return ERR_PTR(retval); | 194 | return ERR_PTR(retval); |
205 | } | 195 | } |
206 | 196 | ||
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8cc23e7d0d5d..1ebf7dafc1d7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -781,6 +781,8 @@ static int cifs_oplock_thread(void * dummyarg) | |||
781 | 781 | ||
782 | oplockThread = current; | 782 | oplockThread = current; |
783 | do { | 783 | do { |
784 | if (try_to_freeze()) | ||
785 | continue; | ||
784 | set_current_state(TASK_INTERRUPTIBLE); | 786 | set_current_state(TASK_INTERRUPTIBLE); |
785 | 787 | ||
786 | schedule_timeout(1*HZ); | 788 | schedule_timeout(1*HZ); |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 2335f14a1583..47360156cc54 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -344,6 +344,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | while (server->tcpStatus != CifsExiting) { | 346 | while (server->tcpStatus != CifsExiting) { |
347 | if (try_to_freeze()) | ||
348 | continue; | ||
347 | if (bigbuf == NULL) { | 349 | if (bigbuf == NULL) { |
348 | bigbuf = cifs_buf_get(); | 350 | bigbuf = cifs_buf_get(); |
349 | if(bigbuf == NULL) { | 351 | if(bigbuf == NULL) { |
diff --git a/fs/dcache.c b/fs/dcache.c index 7376b61269fb..fb10386c59be 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -102,7 +102,8 @@ static inline void dentry_iput(struct dentry * dentry) | |||
102 | list_del_init(&dentry->d_alias); | 102 | list_del_init(&dentry->d_alias); |
103 | spin_unlock(&dentry->d_lock); | 103 | spin_unlock(&dentry->d_lock); |
104 | spin_unlock(&dcache_lock); | 104 | spin_unlock(&dcache_lock); |
105 | fsnotify_inoderemove(inode); | 105 | if (!inode->i_nlink) |
106 | fsnotify_inoderemove(inode); | ||
106 | if (dentry->d_op && dentry->d_op->d_iput) | 107 | if (dentry->d_op && dentry->d_op->d_iput) |
107 | dentry->d_op->d_iput(dentry, inode); | 108 | dentry->d_op->d_iput(dentry, inode); |
108 | else | 109 | else |
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index e463dca008e4..0213db4911a2 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
@@ -1410,7 +1410,7 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) | |||
1410 | unsigned long desc_count; | 1410 | unsigned long desc_count; |
1411 | struct ext3_group_desc *gdp; | 1411 | struct ext3_group_desc *gdp; |
1412 | int i; | 1412 | int i; |
1413 | unsigned long ngroups; | 1413 | unsigned long ngroups = EXT3_SB(sb)->s_groups_count; |
1414 | #ifdef EXT3FS_DEBUG | 1414 | #ifdef EXT3FS_DEBUG |
1415 | struct ext3_super_block *es; | 1415 | struct ext3_super_block *es; |
1416 | unsigned long bitmap_count, x; | 1416 | unsigned long bitmap_count, x; |
@@ -1421,7 +1421,8 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) | |||
1421 | desc_count = 0; | 1421 | desc_count = 0; |
1422 | bitmap_count = 0; | 1422 | bitmap_count = 0; |
1423 | gdp = NULL; | 1423 | gdp = NULL; |
1424 | for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { | 1424 | |
1425 | for (i = 0; i < ngroups; i++) { | ||
1425 | gdp = ext3_get_group_desc(sb, i, NULL); | 1426 | gdp = ext3_get_group_desc(sb, i, NULL); |
1426 | if (!gdp) | 1427 | if (!gdp) |
1427 | continue; | 1428 | continue; |
@@ -1443,7 +1444,6 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) | |||
1443 | return bitmap_count; | 1444 | return bitmap_count; |
1444 | #else | 1445 | #else |
1445 | desc_count = 0; | 1446 | desc_count = 0; |
1446 | ngroups = EXT3_SB(sb)->s_groups_count; | ||
1447 | smp_rmb(); | 1447 | smp_rmb(); |
1448 | for (i = 0; i < ngroups; i++) { | 1448 | for (i = 0; i < ngroups; i++) { |
1449 | gdp = ext3_get_group_desc(sb, i, NULL); | 1449 | gdp = ext3_get_group_desc(sb, i, NULL); |
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 2c9f81278d5d..57f79106267d 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c | |||
@@ -242,7 +242,7 @@ static int setup_new_group_blocks(struct super_block *sb, | |||
242 | i < sbi->s_itb_per_group; i++, bit++, block++) { | 242 | i < sbi->s_itb_per_group; i++, bit++, block++) { |
243 | struct buffer_head *it; | 243 | struct buffer_head *it; |
244 | 244 | ||
245 | ext3_debug("clear inode block %#04x (+%ld)\n", block, bit); | 245 | ext3_debug("clear inode block %#04lx (+%d)\n", block, bit); |
246 | if (IS_ERR(it = bclean(handle, sb, block))) { | 246 | if (IS_ERR(it = bclean(handle, sb, block))) { |
247 | err = PTR_ERR(it); | 247 | err = PTR_ERR(it); |
248 | goto exit_bh; | 248 | goto exit_bh; |
@@ -643,8 +643,8 @@ static void update_backups(struct super_block *sb, | |||
643 | break; | 643 | break; |
644 | 644 | ||
645 | bh = sb_getblk(sb, group * bpg + blk_off); | 645 | bh = sb_getblk(sb, group * bpg + blk_off); |
646 | ext3_debug(sb, __FUNCTION__, "update metadata backup %#04lx\n", | 646 | ext3_debug("update metadata backup %#04lx\n", |
647 | bh->b_blocknr); | 647 | (unsigned long)bh->b_blocknr); |
648 | if ((err = ext3_journal_get_write_access(handle, bh))) | 648 | if ((err = ext3_journal_get_write_access(handle, bh))) |
649 | break; | 649 | break; |
650 | lock_buffer(bh); | 650 | lock_buffer(bh); |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index a93c3609025d..9e24ceb019fe 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -512,15 +512,14 @@ static void ext3_clear_inode(struct inode *inode) | |||
512 | 512 | ||
513 | static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs) | 513 | static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs) |
514 | { | 514 | { |
515 | struct ext3_sb_info *sbi = EXT3_SB(vfs->mnt_sb); | 515 | struct super_block *sb = vfs->mnt_sb; |
516 | struct ext3_sb_info *sbi = EXT3_SB(sb); | ||
516 | 517 | ||
517 | if (sbi->s_mount_opt & EXT3_MOUNT_JOURNAL_DATA) | 518 | if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA) |
518 | seq_puts(seq, ",data=journal"); | 519 | seq_puts(seq, ",data=journal"); |
519 | 520 | else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA) | |
520 | if (sbi->s_mount_opt & EXT3_MOUNT_ORDERED_DATA) | ||
521 | seq_puts(seq, ",data=ordered"); | 521 | seq_puts(seq, ",data=ordered"); |
522 | 522 | else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA) | |
523 | if (sbi->s_mount_opt & EXT3_MOUNT_WRITEBACK_DATA) | ||
524 | seq_puts(seq, ",data=writeback"); | 523 | seq_puts(seq, ",data=writeback"); |
525 | 524 | ||
526 | #if defined(CONFIG_QUOTA) | 525 | #if defined(CONFIG_QUOTA) |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 51b1d15d9d5c..e2effe2dc9b2 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -300,9 +300,9 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de) | |||
300 | inode->i_blksize = sbi->cluster_size; | 300 | inode->i_blksize = sbi->cluster_size; |
301 | inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) | 301 | inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) |
302 | & ~((loff_t)sbi->cluster_size - 1)) >> 9; | 302 | & ~((loff_t)sbi->cluster_size - 1)) >> 9; |
303 | inode->i_mtime.tv_sec = inode->i_atime.tv_sec = | 303 | inode->i_mtime.tv_sec = |
304 | date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date)); | 304 | date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date)); |
305 | inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = 0; | 305 | inode->i_mtime.tv_nsec = 0; |
306 | if (sbi->options.isvfat) { | 306 | if (sbi->options.isvfat) { |
307 | int secs = de->ctime_cs / 100; | 307 | int secs = de->ctime_cs / 100; |
308 | int csecs = de->ctime_cs % 100; | 308 | int csecs = de->ctime_cs % 100; |
@@ -310,8 +310,11 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de) | |||
310 | date_dos2unix(le16_to_cpu(de->ctime), | 310 | date_dos2unix(le16_to_cpu(de->ctime), |
311 | le16_to_cpu(de->cdate)) + secs; | 311 | le16_to_cpu(de->cdate)) + secs; |
312 | inode->i_ctime.tv_nsec = csecs * 10000000; | 312 | inode->i_ctime.tv_nsec = csecs * 10000000; |
313 | inode->i_atime.tv_sec = | ||
314 | date_dos2unix(le16_to_cpu(0), le16_to_cpu(de->adate)); | ||
315 | inode->i_atime.tv_nsec = 0; | ||
313 | } else | 316 | } else |
314 | inode->i_ctime = inode->i_mtime; | 317 | inode->i_ctime = inode->i_atime = inode->i_mtime; |
315 | 318 | ||
316 | return 0; | 319 | return 0; |
317 | } | 320 | } |
@@ -513,7 +516,9 @@ retry: | |||
513 | raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16); | 516 | raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16); |
514 | fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time, &raw_entry->date); | 517 | fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time, &raw_entry->date); |
515 | if (sbi->options.isvfat) { | 518 | if (sbi->options.isvfat) { |
519 | __le16 atime; | ||
516 | fat_date_unix2dos(inode->i_ctime.tv_sec,&raw_entry->ctime,&raw_entry->cdate); | 520 | fat_date_unix2dos(inode->i_ctime.tv_sec,&raw_entry->ctime,&raw_entry->cdate); |
521 | fat_date_unix2dos(inode->i_atime.tv_sec,&atime,&raw_entry->adate); | ||
517 | raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 + | 522 | raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 + |
518 | inode->i_ctime.tv_nsec / 10000000; | 523 | inode->i_ctime.tv_nsec / 10000000; |
519 | } | 524 | } |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 0ec62d5310db..9f942ca8e4e3 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -129,8 +129,7 @@ void jfs_delete_inode(struct inode *inode) | |||
129 | jfs_info("In jfs_delete_inode, inode = 0x%p", inode); | 129 | jfs_info("In jfs_delete_inode, inode = 0x%p", inode); |
130 | 130 | ||
131 | if (!is_bad_inode(inode) && | 131 | if (!is_bad_inode(inode) && |
132 | (JFS_IP(inode)->fileset == cpu_to_le32(FILESYSTEM_I))) { | 132 | (JFS_IP(inode)->fileset == FILESYSTEM_I)) { |
133 | |||
134 | truncate_inode_pages(&inode->i_data, 0); | 133 | truncate_inode_pages(&inode->i_data, 0); |
135 | 134 | ||
136 | if (test_cflag(COMMIT_Freewmap, inode)) | 135 | if (test_cflag(COMMIT_Freewmap, inode)) |
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index c739626f5bf1..eadf319bee22 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c | |||
@@ -3055,7 +3055,7 @@ static int cntlz(u32 value) | |||
3055 | * RETURN VALUES: | 3055 | * RETURN VALUES: |
3056 | * log2 number of blocks | 3056 | * log2 number of blocks |
3057 | */ | 3057 | */ |
3058 | int blkstol2(s64 nb) | 3058 | static int blkstol2(s64 nb) |
3059 | { | 3059 | { |
3060 | int l2nb; | 3060 | int l2nb; |
3061 | s64 mask; /* meant to be signed */ | 3061 | s64 mask; /* meant to be signed */ |
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index c7a92f9deb2b..9b71ed2674fe 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c | |||
@@ -725,6 +725,9 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp, | |||
725 | else | 725 | else |
726 | tlck->flag = tlckINODELOCK; | 726 | tlck->flag = tlckINODELOCK; |
727 | 727 | ||
728 | if (S_ISDIR(ip->i_mode)) | ||
729 | tlck->flag |= tlckDIRECTORY; | ||
730 | |||
728 | tlck->type = 0; | 731 | tlck->type = 0; |
729 | 732 | ||
730 | /* bind the tlock and the page */ | 733 | /* bind the tlock and the page */ |
@@ -1009,6 +1012,8 @@ struct tlock *txMaplock(tid_t tid, struct inode *ip, int type) | |||
1009 | 1012 | ||
1010 | /* bind the tlock and the object */ | 1013 | /* bind the tlock and the object */ |
1011 | tlck->flag = tlckINODELOCK; | 1014 | tlck->flag = tlckINODELOCK; |
1015 | if (S_ISDIR(ip->i_mode)) | ||
1016 | tlck->flag |= tlckDIRECTORY; | ||
1012 | tlck->ip = ip; | 1017 | tlck->ip = ip; |
1013 | tlck->mp = NULL; | 1018 | tlck->mp = NULL; |
1014 | 1019 | ||
@@ -1077,6 +1082,8 @@ struct linelock *txLinelock(struct linelock * tlock) | |||
1077 | linelock->flag = tlckLINELOCK; | 1082 | linelock->flag = tlckLINELOCK; |
1078 | linelock->maxcnt = TLOCKLONG; | 1083 | linelock->maxcnt = TLOCKLONG; |
1079 | linelock->index = 0; | 1084 | linelock->index = 0; |
1085 | if (tlck->flag & tlckDIRECTORY) | ||
1086 | linelock->flag |= tlckDIRECTORY; | ||
1080 | 1087 | ||
1081 | /* append linelock after tlock */ | 1088 | /* append linelock after tlock */ |
1082 | linelock->next = tlock->next; | 1089 | linelock->next = tlock->next; |
@@ -2070,8 +2077,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, | |||
2070 | * | 2077 | * |
2071 | * function: log from maplock of freed data extents; | 2078 | * function: log from maplock of freed data extents; |
2072 | */ | 2079 | */ |
2073 | void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, | 2080 | static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, |
2074 | struct tlock * tlck) | 2081 | struct tlock * tlck) |
2075 | { | 2082 | { |
2076 | struct pxd_lock *pxdlock; | 2083 | struct pxd_lock *pxdlock; |
2077 | int i, nlock; | 2084 | int i, nlock; |
@@ -2209,7 +2216,7 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea) | |||
2209 | * function: synchronously write pages locked by transaction | 2216 | * function: synchronously write pages locked by transaction |
2210 | * after txLog() but before txUpdateMap(); | 2217 | * after txLog() but before txUpdateMap(); |
2211 | */ | 2218 | */ |
2212 | void txForce(struct tblock * tblk) | 2219 | static void txForce(struct tblock * tblk) |
2213 | { | 2220 | { |
2214 | struct tlock *tlck; | 2221 | struct tlock *tlck; |
2215 | lid_t lid, next; | 2222 | lid_t lid, next; |
@@ -2358,7 +2365,7 @@ static void txUpdateMap(struct tblock * tblk) | |||
2358 | */ | 2365 | */ |
2359 | else { /* (maplock->flag & mlckFREE) */ | 2366 | else { /* (maplock->flag & mlckFREE) */ |
2360 | 2367 | ||
2361 | if (S_ISDIR(tlck->ip->i_mode)) | 2368 | if (tlck->flag & tlckDIRECTORY) |
2362 | txFreeMap(ipimap, maplock, | 2369 | txFreeMap(ipimap, maplock, |
2363 | tblk, COMMIT_PWMAP); | 2370 | tblk, COMMIT_PWMAP); |
2364 | else | 2371 | else |
diff --git a/fs/jfs/jfs_txnmgr.h b/fs/jfs/jfs_txnmgr.h index 59ad0f6b7231..0e4dc4514c47 100644 --- a/fs/jfs/jfs_txnmgr.h +++ b/fs/jfs/jfs_txnmgr.h | |||
@@ -122,6 +122,7 @@ extern struct tlock *TxLock; /* transaction lock table */ | |||
122 | #define tlckLOG 0x0800 | 122 | #define tlckLOG 0x0800 |
123 | /* updateMap state */ | 123 | /* updateMap state */ |
124 | #define tlckUPDATEMAP 0x0080 | 124 | #define tlckUPDATEMAP 0x0080 |
125 | #define tlckDIRECTORY 0x0040 | ||
125 | /* freeLock state */ | 126 | /* freeLock state */ |
126 | #define tlckFREELOCK 0x0008 | 127 | #define tlckFREELOCK 0x0008 |
127 | #define tlckWRITEPAGE 0x0004 | 128 | #define tlckWRITEPAGE 0x0004 |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 6ceb1d471f20..9758ebd49905 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -184,14 +184,13 @@ static void nfs_readpage_release(struct nfs_page *req) | |||
184 | { | 184 | { |
185 | unlock_page(req->wb_page); | 185 | unlock_page(req->wb_page); |
186 | 186 | ||
187 | nfs_clear_request(req); | ||
188 | nfs_release_request(req); | ||
189 | |||
190 | dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", | 187 | dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", |
191 | req->wb_context->dentry->d_inode->i_sb->s_id, | 188 | req->wb_context->dentry->d_inode->i_sb->s_id, |
192 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 189 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), |
193 | req->wb_bytes, | 190 | req->wb_bytes, |
194 | (long long)req_offset(req)); | 191 | (long long)req_offset(req)); |
192 | nfs_clear_request(req); | ||
193 | nfs_release_request(req); | ||
195 | } | 194 | } |
196 | 195 | ||
197 | /* | 196 | /* |
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog index 49eafbdb15c1..c7e9237379c2 100644 --- a/fs/ntfs/ChangeLog +++ b/fs/ntfs/ChangeLog | |||
@@ -92,6 +92,8 @@ ToDo/Notes: | |||
92 | an octal number to conform to how chmod(1) works, too. Thanks to | 92 | an octal number to conform to how chmod(1) works, too. Thanks to |
93 | Giuseppe Bilotta and Horst von Brand for pointing out the errors of | 93 | Giuseppe Bilotta and Horst von Brand for pointing out the errors of |
94 | my ways. | 94 | my ways. |
95 | - Fix various bugs in the runlist merging code. (Based on libntfs | ||
96 | changes by Richard Russon.) | ||
95 | 97 | ||
96 | 2.1.23 - Implement extension of resident files and make writing safe as well as | 98 | 2.1.23 - Implement extension of resident files and make writing safe as well as |
97 | many bug fixes, cleanups, and enhancements... | 99 | many bug fixes, cleanups, and enhancements... |
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index b6cc8cf24626..5e80c07c6a4d 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
@@ -59,39 +59,49 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
59 | unsigned long flags; | 59 | unsigned long flags; |
60 | struct buffer_head *first, *tmp; | 60 | struct buffer_head *first, *tmp; |
61 | struct page *page; | 61 | struct page *page; |
62 | struct inode *vi; | ||
62 | ntfs_inode *ni; | 63 | ntfs_inode *ni; |
63 | int page_uptodate = 1; | 64 | int page_uptodate = 1; |
64 | 65 | ||
65 | page = bh->b_page; | 66 | page = bh->b_page; |
66 | ni = NTFS_I(page->mapping->host); | 67 | vi = page->mapping->host; |
68 | ni = NTFS_I(vi); | ||
67 | 69 | ||
68 | if (likely(uptodate)) { | 70 | if (likely(uptodate)) { |
69 | s64 file_ofs, initialized_size; | 71 | loff_t i_size; |
72 | s64 file_ofs, init_size; | ||
70 | 73 | ||
71 | set_buffer_uptodate(bh); | 74 | set_buffer_uptodate(bh); |
72 | 75 | ||
73 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + | 76 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + |
74 | bh_offset(bh); | 77 | bh_offset(bh); |
75 | read_lock_irqsave(&ni->size_lock, flags); | 78 | read_lock_irqsave(&ni->size_lock, flags); |
76 | initialized_size = ni->initialized_size; | 79 | init_size = ni->initialized_size; |
80 | i_size = i_size_read(vi); | ||
77 | read_unlock_irqrestore(&ni->size_lock, flags); | 81 | read_unlock_irqrestore(&ni->size_lock, flags); |
82 | if (unlikely(init_size > i_size)) { | ||
83 | /* Race with shrinking truncate. */ | ||
84 | init_size = i_size; | ||
85 | } | ||
78 | /* Check for the current buffer head overflowing. */ | 86 | /* Check for the current buffer head overflowing. */ |
79 | if (file_ofs + bh->b_size > initialized_size) { | 87 | if (unlikely(file_ofs + bh->b_size > init_size)) { |
80 | char *addr; | 88 | u8 *kaddr; |
81 | int ofs = 0; | 89 | int ofs; |
82 | 90 | ||
83 | if (file_ofs < initialized_size) | 91 | ofs = 0; |
84 | ofs = initialized_size - file_ofs; | 92 | if (file_ofs < init_size) |
85 | addr = kmap_atomic(page, KM_BIO_SRC_IRQ); | 93 | ofs = init_size - file_ofs; |
86 | memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); | 94 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); |
95 | memset(kaddr + bh_offset(bh) + ofs, 0, | ||
96 | bh->b_size - ofs); | ||
97 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); | ||
87 | flush_dcache_page(page); | 98 | flush_dcache_page(page); |
88 | kunmap_atomic(addr, KM_BIO_SRC_IRQ); | ||
89 | } | 99 | } |
90 | } else { | 100 | } else { |
91 | clear_buffer_uptodate(bh); | 101 | clear_buffer_uptodate(bh); |
92 | SetPageError(page); | 102 | SetPageError(page); |
93 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.", | 103 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block " |
94 | (unsigned long long)bh->b_blocknr); | 104 | "0x%llx.", (unsigned long long)bh->b_blocknr); |
95 | } | 105 | } |
96 | first = page_buffers(page); | 106 | first = page_buffers(page); |
97 | local_irq_save(flags); | 107 | local_irq_save(flags); |
@@ -124,7 +134,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
124 | if (likely(page_uptodate && !PageError(page))) | 134 | if (likely(page_uptodate && !PageError(page))) |
125 | SetPageUptodate(page); | 135 | SetPageUptodate(page); |
126 | } else { | 136 | } else { |
127 | char *addr; | 137 | u8 *kaddr; |
128 | unsigned int i, recs; | 138 | unsigned int i, recs; |
129 | u32 rec_size; | 139 | u32 rec_size; |
130 | 140 | ||
@@ -132,12 +142,12 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
132 | recs = PAGE_CACHE_SIZE / rec_size; | 142 | recs = PAGE_CACHE_SIZE / rec_size; |
133 | /* Should have been verified before we got here... */ | 143 | /* Should have been verified before we got here... */ |
134 | BUG_ON(!recs); | 144 | BUG_ON(!recs); |
135 | addr = kmap_atomic(page, KM_BIO_SRC_IRQ); | 145 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); |
136 | for (i = 0; i < recs; i++) | 146 | for (i = 0; i < recs; i++) |
137 | post_read_mst_fixup((NTFS_RECORD*)(addr + | 147 | post_read_mst_fixup((NTFS_RECORD*)(kaddr + |
138 | i * rec_size), rec_size); | 148 | i * rec_size), rec_size); |
149 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); | ||
139 | flush_dcache_page(page); | 150 | flush_dcache_page(page); |
140 | kunmap_atomic(addr, KM_BIO_SRC_IRQ); | ||
141 | if (likely(page_uptodate && !PageError(page))) | 151 | if (likely(page_uptodate && !PageError(page))) |
142 | SetPageUptodate(page); | 152 | SetPageUptodate(page); |
143 | } | 153 | } |
@@ -168,8 +178,11 @@ still_busy: | |||
168 | */ | 178 | */ |
169 | static int ntfs_read_block(struct page *page) | 179 | static int ntfs_read_block(struct page *page) |
170 | { | 180 | { |
181 | loff_t i_size; | ||
171 | VCN vcn; | 182 | VCN vcn; |
172 | LCN lcn; | 183 | LCN lcn; |
184 | s64 init_size; | ||
185 | struct inode *vi; | ||
173 | ntfs_inode *ni; | 186 | ntfs_inode *ni; |
174 | ntfs_volume *vol; | 187 | ntfs_volume *vol; |
175 | runlist_element *rl; | 188 | runlist_element *rl; |
@@ -180,7 +193,8 @@ static int ntfs_read_block(struct page *page) | |||
180 | int i, nr; | 193 | int i, nr; |
181 | unsigned char blocksize_bits; | 194 | unsigned char blocksize_bits; |
182 | 195 | ||
183 | ni = NTFS_I(page->mapping->host); | 196 | vi = page->mapping->host; |
197 | ni = NTFS_I(vi); | ||
184 | vol = ni->vol; | 198 | vol = ni->vol; |
185 | 199 | ||
186 | /* $MFT/$DATA must have its complete runlist in memory at all times. */ | 200 | /* $MFT/$DATA must have its complete runlist in memory at all times. */ |
@@ -199,11 +213,28 @@ static int ntfs_read_block(struct page *page) | |||
199 | bh = head = page_buffers(page); | 213 | bh = head = page_buffers(page); |
200 | BUG_ON(!bh); | 214 | BUG_ON(!bh); |
201 | 215 | ||
216 | /* | ||
217 | * We may be racing with truncate. To avoid some of the problems we | ||
218 | * now take a snapshot of the various sizes and use those for the whole | ||
219 | * of the function. In case of an extending truncate it just means we | ||
220 | * may leave some buffers unmapped which are now allocated. This is | ||
221 | * not a problem since these buffers will just get mapped when a write | ||
222 | * occurs. In case of a shrinking truncate, we will detect this later | ||
223 | * on due to the runlist being incomplete and if the page is being | ||
224 | * fully truncated, truncate will throw it away as soon as we unlock | ||
225 | * it so no need to worry what we do with it. | ||
226 | */ | ||
202 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); | 227 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); |
203 | read_lock_irqsave(&ni->size_lock, flags); | 228 | read_lock_irqsave(&ni->size_lock, flags); |
204 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; | 229 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; |
205 | zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits; | 230 | init_size = ni->initialized_size; |
231 | i_size = i_size_read(vi); | ||
206 | read_unlock_irqrestore(&ni->size_lock, flags); | 232 | read_unlock_irqrestore(&ni->size_lock, flags); |
233 | if (unlikely(init_size > i_size)) { | ||
234 | /* Race with shrinking truncate. */ | ||
235 | init_size = i_size; | ||
236 | } | ||
237 | zblock = (init_size + blocksize - 1) >> blocksize_bits; | ||
207 | 238 | ||
208 | /* Loop through all the buffers in the page. */ | 239 | /* Loop through all the buffers in the page. */ |
209 | rl = NULL; | 240 | rl = NULL; |
@@ -366,6 +397,8 @@ handle_zblock: | |||
366 | */ | 397 | */ |
367 | static int ntfs_readpage(struct file *file, struct page *page) | 398 | static int ntfs_readpage(struct file *file, struct page *page) |
368 | { | 399 | { |
400 | loff_t i_size; | ||
401 | struct inode *vi; | ||
369 | ntfs_inode *ni, *base_ni; | 402 | ntfs_inode *ni, *base_ni; |
370 | u8 *kaddr; | 403 | u8 *kaddr; |
371 | ntfs_attr_search_ctx *ctx; | 404 | ntfs_attr_search_ctx *ctx; |
@@ -384,14 +417,17 @@ retry_readpage: | |||
384 | unlock_page(page); | 417 | unlock_page(page); |
385 | return 0; | 418 | return 0; |
386 | } | 419 | } |
387 | ni = NTFS_I(page->mapping->host); | 420 | vi = page->mapping->host; |
421 | ni = NTFS_I(vi); | ||
388 | /* | 422 | /* |
389 | * Only $DATA attributes can be encrypted and only unnamed $DATA | 423 | * Only $DATA attributes can be encrypted and only unnamed $DATA |
390 | * attributes can be compressed. Index root can have the flags set but | 424 | * attributes can be compressed. Index root can have the flags set but |
391 | * this means to create compressed/encrypted files, not that the | 425 | * this means to create compressed/encrypted files, not that the |
392 | * attribute is compressed/encrypted. | 426 | * attribute is compressed/encrypted. Note we need to check for |
427 | * AT_INDEX_ALLOCATION since this is the type of both directory and | ||
428 | * index inodes. | ||
393 | */ | 429 | */ |
394 | if (ni->type != AT_INDEX_ROOT) { | 430 | if (ni->type != AT_INDEX_ALLOCATION) { |
395 | /* If attribute is encrypted, deny access, just like NT4. */ | 431 | /* If attribute is encrypted, deny access, just like NT4. */ |
396 | if (NInoEncrypted(ni)) { | 432 | if (NInoEncrypted(ni)) { |
397 | BUG_ON(ni->type != AT_DATA); | 433 | BUG_ON(ni->type != AT_DATA); |
@@ -456,7 +492,12 @@ retry_readpage: | |||
456 | read_lock_irqsave(&ni->size_lock, flags); | 492 | read_lock_irqsave(&ni->size_lock, flags); |
457 | if (unlikely(attr_len > ni->initialized_size)) | 493 | if (unlikely(attr_len > ni->initialized_size)) |
458 | attr_len = ni->initialized_size; | 494 | attr_len = ni->initialized_size; |
495 | i_size = i_size_read(vi); | ||
459 | read_unlock_irqrestore(&ni->size_lock, flags); | 496 | read_unlock_irqrestore(&ni->size_lock, flags); |
497 | if (unlikely(attr_len > i_size)) { | ||
498 | /* Race with shrinking truncate. */ | ||
499 | attr_len = i_size; | ||
500 | } | ||
460 | kaddr = kmap_atomic(page, KM_USER0); | 501 | kaddr = kmap_atomic(page, KM_USER0); |
461 | /* Copy the data to the page. */ | 502 | /* Copy the data to the page. */ |
462 | memcpy(kaddr, (u8*)ctx->attr + | 503 | memcpy(kaddr, (u8*)ctx->attr + |
@@ -1341,9 +1382,11 @@ retry_writepage: | |||
1341 | * Only $DATA attributes can be encrypted and only unnamed $DATA | 1382 | * Only $DATA attributes can be encrypted and only unnamed $DATA |
1342 | * attributes can be compressed. Index root can have the flags set but | 1383 | * attributes can be compressed. Index root can have the flags set but |
1343 | * this means to create compressed/encrypted files, not that the | 1384 | * this means to create compressed/encrypted files, not that the |
1344 | * attribute is compressed/encrypted. | 1385 | * attribute is compressed/encrypted. Note we need to check for |
1386 | * AT_INDEX_ALLOCATION since this is the type of both directory and | ||
1387 | * index inodes. | ||
1345 | */ | 1388 | */ |
1346 | if (ni->type != AT_INDEX_ROOT) { | 1389 | if (ni->type != AT_INDEX_ALLOCATION) { |
1347 | /* If file is encrypted, deny access, just like NT4. */ | 1390 | /* If file is encrypted, deny access, just like NT4. */ |
1348 | if (NInoEncrypted(ni)) { | 1391 | if (NInoEncrypted(ni)) { |
1349 | unlock_page(page); | 1392 | unlock_page(page); |
@@ -1379,8 +1422,8 @@ retry_writepage: | |||
1379 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; | 1422 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; |
1380 | kaddr = kmap_atomic(page, KM_USER0); | 1423 | kaddr = kmap_atomic(page, KM_USER0); |
1381 | memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); | 1424 | memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); |
1382 | flush_dcache_page(page); | ||
1383 | kunmap_atomic(kaddr, KM_USER0); | 1425 | kunmap_atomic(kaddr, KM_USER0); |
1426 | flush_dcache_page(page); | ||
1384 | } | 1427 | } |
1385 | /* Handle mst protected attributes. */ | 1428 | /* Handle mst protected attributes. */ |
1386 | if (NInoMstProtected(ni)) | 1429 | if (NInoMstProtected(ni)) |
@@ -1443,34 +1486,33 @@ retry_writepage: | |||
1443 | BUG_ON(PageWriteback(page)); | 1486 | BUG_ON(PageWriteback(page)); |
1444 | set_page_writeback(page); | 1487 | set_page_writeback(page); |
1445 | unlock_page(page); | 1488 | unlock_page(page); |
1446 | /* | ||
1447 | * Here, we do not need to zero the out of bounds area everytime | ||
1448 | * because the below memcpy() already takes care of the | ||
1449 | * mmap-at-end-of-file requirements. If the file is converted to a | ||
1450 | * non-resident one, then the code path use is switched to the | ||
1451 | * non-resident one where the zeroing happens on each ntfs_writepage() | ||
1452 | * invocation. | ||
1453 | */ | ||
1454 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); | 1489 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); |
1455 | i_size = i_size_read(vi); | 1490 | i_size = i_size_read(vi); |
1456 | if (unlikely(attr_len > i_size)) { | 1491 | if (unlikely(attr_len > i_size)) { |
1492 | /* Race with shrinking truncate or a failed truncate. */ | ||
1457 | attr_len = i_size; | 1493 | attr_len = i_size; |
1458 | ctx->attr->data.resident.value_length = cpu_to_le32(attr_len); | 1494 | /* |
1495 | * If the truncate failed, fix it up now. If a concurrent | ||
1496 | * truncate, we do its job, so it does not have to do anything. | ||
1497 | */ | ||
1498 | err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr, | ||
1499 | attr_len); | ||
1500 | /* Shrinking cannot fail. */ | ||
1501 | BUG_ON(err); | ||
1459 | } | 1502 | } |
1460 | kaddr = kmap_atomic(page, KM_USER0); | 1503 | kaddr = kmap_atomic(page, KM_USER0); |
1461 | /* Copy the data from the page to the mft record. */ | 1504 | /* Copy the data from the page to the mft record. */ |
1462 | memcpy((u8*)ctx->attr + | 1505 | memcpy((u8*)ctx->attr + |
1463 | le16_to_cpu(ctx->attr->data.resident.value_offset), | 1506 | le16_to_cpu(ctx->attr->data.resident.value_offset), |
1464 | kaddr, attr_len); | 1507 | kaddr, attr_len); |
1465 | flush_dcache_mft_record_page(ctx->ntfs_ino); | ||
1466 | /* Zero out of bounds area in the page cache page. */ | 1508 | /* Zero out of bounds area in the page cache page. */ |
1467 | memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); | 1509 | memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); |
1468 | flush_dcache_page(page); | ||
1469 | kunmap_atomic(kaddr, KM_USER0); | 1510 | kunmap_atomic(kaddr, KM_USER0); |
1470 | 1511 | flush_dcache_mft_record_page(ctx->ntfs_ino); | |
1512 | flush_dcache_page(page); | ||
1513 | /* We are done with the page. */ | ||
1471 | end_page_writeback(page); | 1514 | end_page_writeback(page); |
1472 | 1515 | /* Finally, mark the mft record dirty, so it gets written back. */ | |
1473 | /* Mark the mft record dirty, so it gets written back. */ | ||
1474 | mark_mft_record_dirty(ctx->ntfs_ino); | 1516 | mark_mft_record_dirty(ctx->ntfs_ino); |
1475 | ntfs_attr_put_search_ctx(ctx); | 1517 | ntfs_attr_put_search_ctx(ctx); |
1476 | unmap_mft_record(base_ni); | 1518 | unmap_mft_record(base_ni); |
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index dc4bbe3acf5c..7ec045131808 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c | |||
@@ -1166,6 +1166,8 @@ err_out: | |||
1166 | * | 1166 | * |
1167 | * Return 0 on success and -errno on error. In the error case, the inode will | 1167 | * Return 0 on success and -errno on error. In the error case, the inode will |
1168 | * have had make_bad_inode() executed on it. | 1168 | * have had make_bad_inode() executed on it. |
1169 | * | ||
1170 | * Note this cannot be called for AT_INDEX_ALLOCATION. | ||
1169 | */ | 1171 | */ |
1170 | static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | 1172 | static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) |
1171 | { | 1173 | { |
@@ -1242,8 +1244,8 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | |||
1242 | } | 1244 | } |
1243 | } | 1245 | } |
1244 | /* | 1246 | /* |
1245 | * The encryption flag set in an index root just means to | 1247 | * The compressed/sparse flag set in an index root just means |
1246 | * compress all files. | 1248 | * to compress all files. |
1247 | */ | 1249 | */ |
1248 | if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) { | 1250 | if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) { |
1249 | ntfs_error(vi->i_sb, "Found mst protected attribute " | 1251 | ntfs_error(vi->i_sb, "Found mst protected attribute " |
@@ -1319,8 +1321,7 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | |||
1319 | "the mapping pairs array."); | 1321 | "the mapping pairs array."); |
1320 | goto unm_err_out; | 1322 | goto unm_err_out; |
1321 | } | 1323 | } |
1322 | if ((NInoCompressed(ni) || NInoSparse(ni)) && | 1324 | if (NInoCompressed(ni) || NInoSparse(ni)) { |
1323 | ni->type != AT_INDEX_ROOT) { | ||
1324 | if (a->data.non_resident.compression_unit != 4) { | 1325 | if (a->data.non_resident.compression_unit != 4) { |
1325 | ntfs_error(vi->i_sb, "Found nonstandard " | 1326 | ntfs_error(vi->i_sb, "Found nonstandard " |
1326 | "compression unit (%u instead " | 1327 | "compression unit (%u instead " |
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h index 3288bcc2c4aa..006946efca8c 100644 --- a/fs/ntfs/malloc.h +++ b/fs/ntfs/malloc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project. | 2 | * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2004 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c index f5b2ac929081..061b5ff6b73c 100644 --- a/fs/ntfs/runlist.c +++ b/fs/ntfs/runlist.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. | 2 | * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2005 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * Copyright (c) 2002 Richard Russon | 5 | * Copyright (c) 2002-2005 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License as published | 8 | * modify it under the terms of the GNU General Public License as published |
@@ -158,17 +158,21 @@ static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst, | |||
158 | BUG_ON(!dst); | 158 | BUG_ON(!dst); |
159 | BUG_ON(!src); | 159 | BUG_ON(!src); |
160 | 160 | ||
161 | if ((dst->lcn < 0) || (src->lcn < 0)) { /* Are we merging holes? */ | 161 | /* We can merge unmapped regions even if they are misaligned. */ |
162 | if (dst->lcn == LCN_HOLE && src->lcn == LCN_HOLE) | 162 | if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED)) |
163 | return TRUE; | 163 | return TRUE; |
164 | /* If the runs are misaligned, we cannot merge them. */ | ||
165 | if ((dst->vcn + dst->length) != src->vcn) | ||
164 | return FALSE; | 166 | return FALSE; |
165 | } | 167 | /* If both runs are non-sparse and contiguous, we can merge them. */ |
166 | if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */ | 168 | if ((dst->lcn >= 0) && (src->lcn >= 0) && |
167 | return FALSE; | 169 | ((dst->lcn + dst->length) == src->lcn)) |
168 | if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */ | 170 | return TRUE; |
169 | return FALSE; | 171 | /* If we are merging two holes, we can merge them. */ |
170 | 172 | if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE)) | |
171 | return TRUE; | 173 | return TRUE; |
174 | /* Cannot merge. */ | ||
175 | return FALSE; | ||
172 | } | 176 | } |
173 | 177 | ||
174 | /** | 178 | /** |
@@ -214,14 +218,15 @@ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src) | |||
214 | static inline runlist_element *ntfs_rl_append(runlist_element *dst, | 218 | static inline runlist_element *ntfs_rl_append(runlist_element *dst, |
215 | int dsize, runlist_element *src, int ssize, int loc) | 219 | int dsize, runlist_element *src, int ssize, int loc) |
216 | { | 220 | { |
217 | BOOL right; | 221 | BOOL right = FALSE; /* Right end of @src needs merging. */ |
218 | int magic; | 222 | int marker; /* End of the inserted runs. */ |
219 | 223 | ||
220 | BUG_ON(!dst); | 224 | BUG_ON(!dst); |
221 | BUG_ON(!src); | 225 | BUG_ON(!src); |
222 | 226 | ||
223 | /* First, check if the right hand end needs merging. */ | 227 | /* First, check if the right hand end needs merging. */ |
224 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | 228 | if ((loc + 1) < dsize) |
229 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | ||
225 | 230 | ||
226 | /* Space required: @dst size + @src size, less one if we merged. */ | 231 | /* Space required: @dst size + @src size, less one if we merged. */ |
227 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); | 232 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); |
@@ -236,18 +241,19 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst, | |||
236 | if (right) | 241 | if (right) |
237 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); | 242 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); |
238 | 243 | ||
239 | magic = loc + ssize; | 244 | /* First run after the @src runs that have been inserted. */ |
245 | marker = loc + ssize + 1; | ||
240 | 246 | ||
241 | /* Move the tail of @dst out of the way, then copy in @src. */ | 247 | /* Move the tail of @dst out of the way, then copy in @src. */ |
242 | ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right); | 248 | ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right)); |
243 | ntfs_rl_mc(dst, loc + 1, src, 0, ssize); | 249 | ntfs_rl_mc(dst, loc + 1, src, 0, ssize); |
244 | 250 | ||
245 | /* Adjust the size of the preceding hole. */ | 251 | /* Adjust the size of the preceding hole. */ |
246 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; | 252 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; |
247 | 253 | ||
248 | /* We may have changed the length of the file, so fix the end marker */ | 254 | /* We may have changed the length of the file, so fix the end marker */ |
249 | if (dst[magic + 1].lcn == LCN_ENOENT) | 255 | if (dst[marker].lcn == LCN_ENOENT) |
250 | dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length; | 256 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
251 | 257 | ||
252 | return dst; | 258 | return dst; |
253 | } | 259 | } |
@@ -279,18 +285,17 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst, | |||
279 | static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | 285 | static inline runlist_element *ntfs_rl_insert(runlist_element *dst, |
280 | int dsize, runlist_element *src, int ssize, int loc) | 286 | int dsize, runlist_element *src, int ssize, int loc) |
281 | { | 287 | { |
282 | BOOL left = FALSE; | 288 | BOOL left = FALSE; /* Left end of @src needs merging. */ |
283 | BOOL disc = FALSE; /* Discontinuity */ | 289 | BOOL disc = FALSE; /* Discontinuity between @dst and @src. */ |
284 | BOOL hole = FALSE; /* Following a hole */ | 290 | int marker; /* End of the inserted runs. */ |
285 | int magic; | ||
286 | 291 | ||
287 | BUG_ON(!dst); | 292 | BUG_ON(!dst); |
288 | BUG_ON(!src); | 293 | BUG_ON(!src); |
289 | 294 | ||
290 | /* disc => Discontinuity between the end of @dst and the start of @src. | 295 | /* |
291 | * This means we might need to insert a hole. | 296 | * disc => Discontinuity between the end of @dst and the start of @src. |
292 | * hole => @dst ends with a hole or an unmapped region which we can | 297 | * This means we might need to insert a "not mapped" run. |
293 | * extend to match the discontinuity. */ | 298 | */ |
294 | if (loc == 0) | 299 | if (loc == 0) |
295 | disc = (src[0].vcn > 0); | 300 | disc = (src[0].vcn > 0); |
296 | else { | 301 | else { |
@@ -303,58 +308,49 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | |||
303 | merged_length += src->length; | 308 | merged_length += src->length; |
304 | 309 | ||
305 | disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); | 310 | disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); |
306 | if (disc) | ||
307 | hole = (dst[loc - 1].lcn == LCN_HOLE); | ||
308 | } | 311 | } |
309 | 312 | /* | |
310 | /* Space required: @dst size + @src size, less one if we merged, plus | 313 | * Space required: @dst size + @src size, less one if we merged, plus |
311 | * one if there was a discontinuity, less one for a trailing hole. */ | 314 | * one if there was a discontinuity. |
312 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole); | 315 | */ |
316 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc); | ||
313 | if (IS_ERR(dst)) | 317 | if (IS_ERR(dst)) |
314 | return dst; | 318 | return dst; |
315 | /* | 319 | /* |
316 | * We are guaranteed to succeed from here so can start modifying the | 320 | * We are guaranteed to succeed from here so can start modifying the |
317 | * original runlist. | 321 | * original runlist. |
318 | */ | 322 | */ |
319 | |||
320 | if (left) | 323 | if (left) |
321 | __ntfs_rl_merge(dst + loc - 1, src); | 324 | __ntfs_rl_merge(dst + loc - 1, src); |
322 | 325 | /* | |
323 | magic = loc + ssize - left + disc - hole; | 326 | * First run after the @src runs that have been inserted. |
327 | * Nominally, @marker equals @loc + @ssize, i.e. location + number of | ||
328 | * runs in @src. However, if @left, then the first run in @src has | ||
329 | * been merged with one in @dst. And if @disc, then @dst and @src do | ||
330 | * not meet and we need an extra run to fill the gap. | ||
331 | */ | ||
332 | marker = loc + ssize - left + disc; | ||
324 | 333 | ||
325 | /* Move the tail of @dst out of the way, then copy in @src. */ | 334 | /* Move the tail of @dst out of the way, then copy in @src. */ |
326 | ntfs_rl_mm(dst, magic, loc, dsize - loc); | 335 | ntfs_rl_mm(dst, marker, loc, dsize - loc); |
327 | ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left); | 336 | ntfs_rl_mc(dst, loc + disc, src, left, ssize - left); |
328 | 337 | ||
329 | /* Adjust the VCN of the last run ... */ | 338 | /* Adjust the VCN of the first run after the insertion... */ |
330 | if (dst[magic].lcn <= LCN_HOLE) | 339 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
331 | dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; | ||
332 | /* ... and the length. */ | 340 | /* ... and the length. */ |
333 | if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED) | 341 | if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED) |
334 | dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn; | 342 | dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; |
335 | 343 | ||
336 | /* Writing beyond the end of the file and there's a discontinuity. */ | 344 | /* Writing beyond the end of the file and there is a discontinuity. */ |
337 | if (disc) { | 345 | if (disc) { |
338 | if (hole) | 346 | if (loc > 0) { |
339 | dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn; | 347 | dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; |
340 | else { | 348 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; |
341 | if (loc > 0) { | 349 | } else { |
342 | dst[loc].vcn = dst[loc - 1].vcn + | 350 | dst[loc].vcn = 0; |
343 | dst[loc - 1].length; | 351 | dst[loc].length = dst[loc + 1].vcn; |
344 | dst[loc].length = dst[loc + 1].vcn - | ||
345 | dst[loc].vcn; | ||
346 | } else { | ||
347 | dst[loc].vcn = 0; | ||
348 | dst[loc].length = dst[loc + 1].vcn; | ||
349 | } | ||
350 | dst[loc].lcn = LCN_RL_NOT_MAPPED; | ||
351 | } | 352 | } |
352 | 353 | dst[loc].lcn = LCN_RL_NOT_MAPPED; | |
353 | magic += hole; | ||
354 | |||
355 | if (dst[magic].lcn == LCN_ENOENT) | ||
356 | dst[magic].vcn = dst[magic - 1].vcn + | ||
357 | dst[magic - 1].length; | ||
358 | } | 354 | } |
359 | return dst; | 355 | return dst; |
360 | } | 356 | } |
@@ -385,20 +381,23 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | |||
385 | static inline runlist_element *ntfs_rl_replace(runlist_element *dst, | 381 | static inline runlist_element *ntfs_rl_replace(runlist_element *dst, |
386 | int dsize, runlist_element *src, int ssize, int loc) | 382 | int dsize, runlist_element *src, int ssize, int loc) |
387 | { | 383 | { |
388 | BOOL left = FALSE; | 384 | BOOL left = FALSE; /* Left end of @src needs merging. */ |
389 | BOOL right; | 385 | BOOL right = FALSE; /* Right end of @src needs merging. */ |
390 | int magic; | 386 | int tail; /* Start of tail of @dst. */ |
387 | int marker; /* End of the inserted runs. */ | ||
391 | 388 | ||
392 | BUG_ON(!dst); | 389 | BUG_ON(!dst); |
393 | BUG_ON(!src); | 390 | BUG_ON(!src); |
394 | 391 | ||
395 | /* First, merge the left and right ends, if necessary. */ | 392 | /* First, see if the left and right ends need merging. */ |
396 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | 393 | if ((loc + 1) < dsize) |
394 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | ||
397 | if (loc > 0) | 395 | if (loc > 0) |
398 | left = ntfs_are_rl_mergeable(dst + loc - 1, src); | 396 | left = ntfs_are_rl_mergeable(dst + loc - 1, src); |
399 | 397 | /* | |
400 | /* Allocate some space. We'll need less if the left, right, or both | 398 | * Allocate some space. We will need less if the left, right, or both |
401 | * ends were merged. */ | 399 | * ends get merged. |
400 | */ | ||
402 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); | 401 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); |
403 | if (IS_ERR(dst)) | 402 | if (IS_ERR(dst)) |
404 | return dst; | 403 | return dst; |
@@ -406,21 +405,37 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst, | |||
406 | * We are guaranteed to succeed from here so can start modifying the | 405 | * We are guaranteed to succeed from here so can start modifying the |
407 | * original runlists. | 406 | * original runlists. |
408 | */ | 407 | */ |
408 | |||
409 | /* First, merge the left and right ends, if necessary. */ | ||
409 | if (right) | 410 | if (right) |
410 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); | 411 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); |
411 | if (left) | 412 | if (left) |
412 | __ntfs_rl_merge(dst + loc - 1, src); | 413 | __ntfs_rl_merge(dst + loc - 1, src); |
413 | 414 | /* | |
414 | /* FIXME: What does this mean? (AIA) */ | 415 | * Offset of the tail of @dst. This needs to be moved out of the way |
415 | magic = loc + ssize - left; | 416 | * to make space for the runs to be copied from @src, i.e. the first |
417 | * run of the tail of @dst. | ||
418 | * Nominally, @tail equals @loc + 1, i.e. location, skipping the | ||
419 | * replaced run. However, if @right, then one of @dst's runs is | ||
420 | * already merged into @src. | ||
421 | */ | ||
422 | tail = loc + right + 1; | ||
423 | /* | ||
424 | * First run after the @src runs that have been inserted, i.e. where | ||
425 | * the tail of @dst needs to be moved to. | ||
426 | * Nominally, @marker equals @loc + @ssize, i.e. location + number of | ||
427 | * runs in @src. However, if @left, then the first run in @src has | ||
428 | * been merged with one in @dst. | ||
429 | */ | ||
430 | marker = loc + ssize - left; | ||
416 | 431 | ||
417 | /* Move the tail of @dst out of the way, then copy in @src. */ | 432 | /* Move the tail of @dst out of the way, then copy in @src. */ |
418 | ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1); | 433 | ntfs_rl_mm(dst, marker, tail, dsize - tail); |
419 | ntfs_rl_mc(dst, loc, src, left, ssize - left); | 434 | ntfs_rl_mc(dst, loc, src, left, ssize - left); |
420 | 435 | ||
421 | /* We may have changed the length of the file, so fix the end marker */ | 436 | /* We may have changed the length of the file, so fix the end marker. */ |
422 | if (dst[magic].lcn == LCN_ENOENT) | 437 | if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT) |
423 | dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; | 438 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
424 | return dst; | 439 | return dst; |
425 | } | 440 | } |
426 | 441 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index 23db452ab428..3b33f94020db 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -340,6 +340,54 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf | |||
340 | return result; | 340 | return result; |
341 | } | 341 | } |
342 | 342 | ||
343 | |||
344 | /* Same as proc_root_link, but this addionally tries to get fs from other | ||
345 | * threads in the group */ | ||
346 | static int proc_task_root_link(struct inode *inode, struct dentry **dentry, | ||
347 | struct vfsmount **mnt) | ||
348 | { | ||
349 | struct fs_struct *fs; | ||
350 | int result = -ENOENT; | ||
351 | struct task_struct *leader = proc_task(inode); | ||
352 | |||
353 | task_lock(leader); | ||
354 | fs = leader->fs; | ||
355 | if (fs) { | ||
356 | atomic_inc(&fs->count); | ||
357 | task_unlock(leader); | ||
358 | } else { | ||
359 | /* Try to get fs from other threads */ | ||
360 | task_unlock(leader); | ||
361 | read_lock(&tasklist_lock); | ||
362 | if (pid_alive(leader)) { | ||
363 | struct task_struct *task = leader; | ||
364 | |||
365 | while ((task = next_thread(task)) != leader) { | ||
366 | task_lock(task); | ||
367 | fs = task->fs; | ||
368 | if (fs) { | ||
369 | atomic_inc(&fs->count); | ||
370 | task_unlock(task); | ||
371 | break; | ||
372 | } | ||
373 | task_unlock(task); | ||
374 | } | ||
375 | } | ||
376 | read_unlock(&tasklist_lock); | ||
377 | } | ||
378 | |||
379 | if (fs) { | ||
380 | read_lock(&fs->lock); | ||
381 | *mnt = mntget(fs->rootmnt); | ||
382 | *dentry = dget(fs->root); | ||
383 | read_unlock(&fs->lock); | ||
384 | result = 0; | ||
385 | put_fs_struct(fs); | ||
386 | } | ||
387 | return result; | ||
388 | } | ||
389 | |||
390 | |||
343 | #define MAY_PTRACE(task) \ | 391 | #define MAY_PTRACE(task) \ |
344 | (task == current || \ | 392 | (task == current || \ |
345 | (task->parent == current && \ | 393 | (task->parent == current && \ |
@@ -471,14 +519,14 @@ static int proc_oom_score(struct task_struct *task, char *buffer) | |||
471 | 519 | ||
472 | /* permission checks */ | 520 | /* permission checks */ |
473 | 521 | ||
474 | static int proc_check_root(struct inode *inode) | 522 | /* If the process being read is separated by chroot from the reading process, |
523 | * don't let the reader access the threads. | ||
524 | */ | ||
525 | static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt) | ||
475 | { | 526 | { |
476 | struct dentry *de, *base, *root; | 527 | struct dentry *de, *base; |
477 | struct vfsmount *our_vfsmnt, *vfsmnt, *mnt; | 528 | struct vfsmount *our_vfsmnt, *mnt; |
478 | int res = 0; | 529 | int res = 0; |
479 | |||
480 | if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */ | ||
481 | return -ENOENT; | ||
482 | read_lock(¤t->fs->lock); | 530 | read_lock(¤t->fs->lock); |
483 | our_vfsmnt = mntget(current->fs->rootmnt); | 531 | our_vfsmnt = mntget(current->fs->rootmnt); |
484 | base = dget(current->fs->root); | 532 | base = dget(current->fs->root); |
@@ -511,6 +559,16 @@ out: | |||
511 | goto exit; | 559 | goto exit; |
512 | } | 560 | } |
513 | 561 | ||
562 | static int proc_check_root(struct inode *inode) | ||
563 | { | ||
564 | struct dentry *root; | ||
565 | struct vfsmount *vfsmnt; | ||
566 | |||
567 | if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */ | ||
568 | return -ENOENT; | ||
569 | return proc_check_chroot(root, vfsmnt); | ||
570 | } | ||
571 | |||
514 | static int proc_permission(struct inode *inode, int mask, struct nameidata *nd) | 572 | static int proc_permission(struct inode *inode, int mask, struct nameidata *nd) |
515 | { | 573 | { |
516 | if (generic_permission(inode, mask, NULL) != 0) | 574 | if (generic_permission(inode, mask, NULL) != 0) |
@@ -518,6 +576,20 @@ static int proc_permission(struct inode *inode, int mask, struct nameidata *nd) | |||
518 | return proc_check_root(inode); | 576 | return proc_check_root(inode); |
519 | } | 577 | } |
520 | 578 | ||
579 | static int proc_task_permission(struct inode *inode, int mask, struct nameidata *nd) | ||
580 | { | ||
581 | struct dentry *root; | ||
582 | struct vfsmount *vfsmnt; | ||
583 | |||
584 | if (generic_permission(inode, mask, NULL) != 0) | ||
585 | return -EACCES; | ||
586 | |||
587 | if (proc_task_root_link(inode, &root, &vfsmnt)) | ||
588 | return -ENOENT; | ||
589 | |||
590 | return proc_check_chroot(root, vfsmnt); | ||
591 | } | ||
592 | |||
521 | extern struct seq_operations proc_pid_maps_op; | 593 | extern struct seq_operations proc_pid_maps_op; |
522 | static int maps_open(struct inode *inode, struct file *file) | 594 | static int maps_open(struct inode *inode, struct file *file) |
523 | { | 595 | { |
@@ -1419,7 +1491,7 @@ static struct inode_operations proc_fd_inode_operations = { | |||
1419 | 1491 | ||
1420 | static struct inode_operations proc_task_inode_operations = { | 1492 | static struct inode_operations proc_task_inode_operations = { |
1421 | .lookup = proc_task_lookup, | 1493 | .lookup = proc_task_lookup, |
1422 | .permission = proc_permission, | 1494 | .permission = proc_task_permission, |
1423 | }; | 1495 | }; |
1424 | 1496 | ||
1425 | #ifdef CONFIG_SECURITY | 1497 | #ifdef CONFIG_SECURITY |