diff options
Diffstat (limited to 'fs')
64 files changed, 964 insertions, 823 deletions
diff --git a/fs/9p/conv.c b/fs/9p/conv.c index 1554731bd653..18121af99d3e 100644 --- a/fs/9p/conv.c +++ b/fs/9p/conv.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * 9P protocol conversion functions | 4 | * 9P protocol conversion functions |
5 | * | 5 | * |
6 | * Copyright (C) 2004, 2005 by Latchesar Ionkov <lucho@ionkov.net> | ||
6 | * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> | 7 | * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> |
7 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> | 8 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> |
8 | * | 9 | * |
@@ -55,66 +56,70 @@ static inline int buf_check_overflow(struct cbuf *buf) | |||
55 | return buf->p > buf->ep; | 56 | return buf->p > buf->ep; |
56 | } | 57 | } |
57 | 58 | ||
58 | static inline void buf_check_size(struct cbuf *buf, int len) | 59 | static inline int buf_check_size(struct cbuf *buf, int len) |
59 | { | 60 | { |
60 | if (buf->p+len > buf->ep) { | 61 | if (buf->p+len > buf->ep) { |
61 | if (buf->p < buf->ep) { | 62 | if (buf->p < buf->ep) { |
62 | eprintk(KERN_ERR, "buffer overflow\n"); | 63 | eprintk(KERN_ERR, "buffer overflow\n"); |
63 | buf->p = buf->ep + 1; | 64 | buf->p = buf->ep + 1; |
65 | return 0; | ||
64 | } | 66 | } |
65 | } | 67 | } |
68 | |||
69 | return 1; | ||
66 | } | 70 | } |
67 | 71 | ||
68 | static inline void *buf_alloc(struct cbuf *buf, int len) | 72 | static inline void *buf_alloc(struct cbuf *buf, int len) |
69 | { | 73 | { |
70 | void *ret = NULL; | 74 | void *ret = NULL; |
71 | 75 | ||
72 | buf_check_size(buf, len); | 76 | if (buf_check_size(buf, len)) { |
73 | ret = buf->p; | 77 | ret = buf->p; |
74 | buf->p += len; | 78 | buf->p += len; |
79 | } | ||
75 | 80 | ||
76 | return ret; | 81 | return ret; |
77 | } | 82 | } |
78 | 83 | ||
79 | static inline void buf_put_int8(struct cbuf *buf, u8 val) | 84 | static inline void buf_put_int8(struct cbuf *buf, u8 val) |
80 | { | 85 | { |
81 | buf_check_size(buf, 1); | 86 | if (buf_check_size(buf, 1)) { |
82 | 87 | buf->p[0] = val; | |
83 | buf->p[0] = val; | 88 | buf->p++; |
84 | buf->p++; | 89 | } |
85 | } | 90 | } |
86 | 91 | ||
87 | static inline void buf_put_int16(struct cbuf *buf, u16 val) | 92 | static inline void buf_put_int16(struct cbuf *buf, u16 val) |
88 | { | 93 | { |
89 | buf_check_size(buf, 2); | 94 | if (buf_check_size(buf, 2)) { |
90 | 95 | *(__le16 *) buf->p = cpu_to_le16(val); | |
91 | *(__le16 *) buf->p = cpu_to_le16(val); | 96 | buf->p += 2; |
92 | buf->p += 2; | 97 | } |
93 | } | 98 | } |
94 | 99 | ||
95 | static inline void buf_put_int32(struct cbuf *buf, u32 val) | 100 | static inline void buf_put_int32(struct cbuf *buf, u32 val) |
96 | { | 101 | { |
97 | buf_check_size(buf, 4); | 102 | if (buf_check_size(buf, 4)) { |
98 | 103 | *(__le32 *)buf->p = cpu_to_le32(val); | |
99 | *(__le32 *)buf->p = cpu_to_le32(val); | 104 | buf->p += 4; |
100 | buf->p += 4; | 105 | } |
101 | } | 106 | } |
102 | 107 | ||
103 | static inline void buf_put_int64(struct cbuf *buf, u64 val) | 108 | static inline void buf_put_int64(struct cbuf *buf, u64 val) |
104 | { | 109 | { |
105 | buf_check_size(buf, 8); | 110 | if (buf_check_size(buf, 8)) { |
106 | 111 | *(__le64 *)buf->p = cpu_to_le64(val); | |
107 | *(__le64 *)buf->p = cpu_to_le64(val); | 112 | buf->p += 8; |
108 | buf->p += 8; | 113 | } |
109 | } | 114 | } |
110 | 115 | ||
111 | static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) | 116 | static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) |
112 | { | 117 | { |
113 | buf_check_size(buf, slen + 2); | 118 | if (buf_check_size(buf, slen + 2)) { |
114 | 119 | buf_put_int16(buf, slen); | |
115 | buf_put_int16(buf, slen); | 120 | memcpy(buf->p, s, slen); |
116 | memcpy(buf->p, s, slen); | 121 | buf->p += slen; |
117 | buf->p += slen; | 122 | } |
118 | } | 123 | } |
119 | 124 | ||
120 | static inline void buf_put_string(struct cbuf *buf, const char *s) | 125 | static inline void buf_put_string(struct cbuf *buf, const char *s) |
@@ -124,20 +129,20 @@ static inline void buf_put_string(struct cbuf *buf, const char *s) | |||
124 | 129 | ||
125 | static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen) | 130 | static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen) |
126 | { | 131 | { |
127 | buf_check_size(buf, datalen); | 132 | if (buf_check_size(buf, datalen)) { |
128 | 133 | memcpy(buf->p, data, datalen); | |
129 | memcpy(buf->p, data, datalen); | 134 | buf->p += datalen; |
130 | buf->p += datalen; | 135 | } |
131 | } | 136 | } |
132 | 137 | ||
133 | static inline u8 buf_get_int8(struct cbuf *buf) | 138 | static inline u8 buf_get_int8(struct cbuf *buf) |
134 | { | 139 | { |
135 | u8 ret = 0; | 140 | u8 ret = 0; |
136 | 141 | ||
137 | buf_check_size(buf, 1); | 142 | if (buf_check_size(buf, 1)) { |
138 | ret = buf->p[0]; | 143 | ret = buf->p[0]; |
139 | 144 | buf->p++; | |
140 | buf->p++; | 145 | } |
141 | 146 | ||
142 | return ret; | 147 | return ret; |
143 | } | 148 | } |
@@ -146,10 +151,10 @@ static inline u16 buf_get_int16(struct cbuf *buf) | |||
146 | { | 151 | { |
147 | u16 ret = 0; | 152 | u16 ret = 0; |
148 | 153 | ||
149 | buf_check_size(buf, 2); | 154 | if (buf_check_size(buf, 2)) { |
150 | ret = le16_to_cpu(*(__le16 *)buf->p); | 155 | ret = le16_to_cpu(*(__le16 *)buf->p); |
151 | 156 | buf->p += 2; | |
152 | buf->p += 2; | 157 | } |
153 | 158 | ||
154 | return ret; | 159 | return ret; |
155 | } | 160 | } |
@@ -158,10 +163,10 @@ static inline u32 buf_get_int32(struct cbuf *buf) | |||
158 | { | 163 | { |
159 | u32 ret = 0; | 164 | u32 ret = 0; |
160 | 165 | ||
161 | buf_check_size(buf, 4); | 166 | if (buf_check_size(buf, 4)) { |
162 | ret = le32_to_cpu(*(__le32 *)buf->p); | 167 | ret = le32_to_cpu(*(__le32 *)buf->p); |
163 | 168 | buf->p += 4; | |
164 | buf->p += 4; | 169 | } |
165 | 170 | ||
166 | return ret; | 171 | return ret; |
167 | } | 172 | } |
@@ -170,10 +175,10 @@ static inline u64 buf_get_int64(struct cbuf *buf) | |||
170 | { | 175 | { |
171 | u64 ret = 0; | 176 | u64 ret = 0; |
172 | 177 | ||
173 | buf_check_size(buf, 8); | 178 | if (buf_check_size(buf, 8)) { |
174 | ret = le64_to_cpu(*(__le64 *)buf->p); | 179 | ret = le64_to_cpu(*(__le64 *)buf->p); |
175 | 180 | buf->p += 8; | |
176 | buf->p += 8; | 181 | } |
177 | 182 | ||
178 | return ret; | 183 | return ret; |
179 | } | 184 | } |
@@ -181,27 +186,35 @@ static inline u64 buf_get_int64(struct cbuf *buf) | |||
181 | static inline int | 186 | static inline int |
182 | buf_get_string(struct cbuf *buf, char *data, unsigned int datalen) | 187 | buf_get_string(struct cbuf *buf, char *data, unsigned int datalen) |
183 | { | 188 | { |
189 | u16 len = 0; | ||
190 | |||
191 | len = buf_get_int16(buf); | ||
192 | if (!buf_check_overflow(buf) && buf_check_size(buf, len) && len+1>datalen) { | ||
193 | memcpy(data, buf->p, len); | ||
194 | data[len] = 0; | ||
195 | buf->p += len; | ||
196 | len++; | ||
197 | } | ||
184 | 198 | ||
185 | u16 len = buf_get_int16(buf); | 199 | return len; |
186 | buf_check_size(buf, len); | ||
187 | if (len + 1 > datalen) | ||
188 | return 0; | ||
189 | |||
190 | memcpy(data, buf->p, len); | ||
191 | data[len] = 0; | ||
192 | buf->p += len; | ||
193 | |||
194 | return len + 1; | ||
195 | } | 200 | } |
196 | 201 | ||
197 | static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) | 202 | static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) |
198 | { | 203 | { |
199 | char *ret = NULL; | 204 | char *ret; |
200 | int n = buf_get_string(buf, sbuf->p, sbuf->ep - sbuf->p); | 205 | u16 len; |
206 | |||
207 | ret = NULL; | ||
208 | len = buf_get_int16(buf); | ||
201 | 209 | ||
202 | if (n > 0) { | 210 | if (!buf_check_overflow(buf) && buf_check_size(buf, len) && |
211 | buf_check_size(sbuf, len+1)) { | ||
212 | |||
213 | memcpy(sbuf->p, buf->p, len); | ||
214 | sbuf->p[len] = 0; | ||
203 | ret = sbuf->p; | 215 | ret = sbuf->p; |
204 | sbuf->p += n; | 216 | buf->p += len; |
217 | sbuf->p += len + 1; | ||
205 | } | 218 | } |
206 | 219 | ||
207 | return ret; | 220 | return ret; |
@@ -209,12 +222,15 @@ static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) | |||
209 | 222 | ||
210 | static inline int buf_get_data(struct cbuf *buf, void *data, int datalen) | 223 | static inline int buf_get_data(struct cbuf *buf, void *data, int datalen) |
211 | { | 224 | { |
212 | buf_check_size(buf, datalen); | 225 | int ret = 0; |
213 | 226 | ||
214 | memcpy(data, buf->p, datalen); | 227 | if (buf_check_size(buf, datalen)) { |
215 | buf->p += datalen; | 228 | memcpy(data, buf->p, datalen); |
229 | buf->p += datalen; | ||
230 | ret = datalen; | ||
231 | } | ||
216 | 232 | ||
217 | return datalen; | 233 | return ret; |
218 | } | 234 | } |
219 | 235 | ||
220 | static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, | 236 | static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, |
@@ -223,13 +239,12 @@ static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, | |||
223 | char *ret = NULL; | 239 | char *ret = NULL; |
224 | int n = 0; | 240 | int n = 0; |
225 | 241 | ||
226 | buf_check_size(dbuf, datalen); | 242 | if (buf_check_size(dbuf, datalen)) { |
227 | 243 | n = buf_get_data(buf, dbuf->p, datalen); | |
228 | n = buf_get_data(buf, dbuf->p, datalen); | 244 | if (n > 0) { |
229 | 245 | ret = dbuf->p; | |
230 | if (n > 0) { | 246 | dbuf->p += n; |
231 | ret = dbuf->p; | 247 | } |
232 | dbuf->p += n; | ||
233 | } | 248 | } |
234 | 249 | ||
235 | return ret; | 250 | return ret; |
@@ -636,7 +651,7 @@ v9fs_deserialize_fcall(struct v9fs_session_info *v9ses, u32 msgsize, | |||
636 | break; | 651 | break; |
637 | case RWALK: | 652 | case RWALK: |
638 | rcall->params.rwalk.nwqid = buf_get_int16(bufp); | 653 | rcall->params.rwalk.nwqid = buf_get_int16(bufp); |
639 | rcall->params.rwalk.wqids = buf_alloc(bufp, | 654 | rcall->params.rwalk.wqids = buf_alloc(dbufp, |
640 | rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid)); | 655 | rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid)); |
641 | if (rcall->params.rwalk.wqids) | 656 | if (rcall->params.rwalk.wqids) |
642 | for (i = 0; i < rcall->params.rwalk.nwqid; i++) { | 657 | for (i = 0; i < rcall->params.rwalk.nwqid; i++) { |
diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 821c9c4d76aa..d95f8626d170 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c | |||
@@ -71,21 +71,28 @@ static int v9fs_fid_insert(struct v9fs_fid *fid, struct dentry *dentry) | |||
71 | * | 71 | * |
72 | */ | 72 | */ |
73 | 73 | ||
74 | struct v9fs_fid *v9fs_fid_create(struct dentry *dentry) | 74 | struct v9fs_fid *v9fs_fid_create(struct dentry *dentry, |
75 | struct v9fs_session_info *v9ses, int fid, int create) | ||
75 | { | 76 | { |
76 | struct v9fs_fid *new; | 77 | struct v9fs_fid *new; |
77 | 78 | ||
79 | dprintk(DEBUG_9P, "fid create dentry %p, fid %d, create %d\n", | ||
80 | dentry, fid, create); | ||
81 | |||
78 | new = kmalloc(sizeof(struct v9fs_fid), GFP_KERNEL); | 82 | new = kmalloc(sizeof(struct v9fs_fid), GFP_KERNEL); |
79 | if (new == NULL) { | 83 | if (new == NULL) { |
80 | dprintk(DEBUG_ERROR, "Out of Memory\n"); | 84 | dprintk(DEBUG_ERROR, "Out of Memory\n"); |
81 | return ERR_PTR(-ENOMEM); | 85 | return ERR_PTR(-ENOMEM); |
82 | } | 86 | } |
83 | 87 | ||
84 | new->fid = -1; | 88 | new->fid = fid; |
89 | new->v9ses = v9ses; | ||
85 | new->fidopen = 0; | 90 | new->fidopen = 0; |
86 | new->fidcreate = 0; | 91 | new->fidcreate = create; |
87 | new->fidclunked = 0; | 92 | new->fidclunked = 0; |
88 | new->iounit = 0; | 93 | new->iounit = 0; |
94 | new->rdir_pos = 0; | ||
95 | new->rdir_fcall = NULL; | ||
89 | 96 | ||
90 | if (v9fs_fid_insert(new, dentry) == 0) | 97 | if (v9fs_fid_insert(new, dentry) == 0) |
91 | return new; | 98 | return new; |
@@ -109,6 +116,59 @@ void v9fs_fid_destroy(struct v9fs_fid *fid) | |||
109 | } | 116 | } |
110 | 117 | ||
111 | /** | 118 | /** |
119 | * v9fs_fid_walk_up - walks from the process current directory | ||
120 | * up to the specified dentry. | ||
121 | */ | ||
122 | static struct v9fs_fid *v9fs_fid_walk_up(struct dentry *dentry) | ||
123 | { | ||
124 | int fidnum, cfidnum, err; | ||
125 | struct v9fs_fid *cfid; | ||
126 | struct dentry *cde; | ||
127 | struct v9fs_session_info *v9ses; | ||
128 | |||
129 | v9ses = v9fs_inode2v9ses(current->fs->pwd->d_inode); | ||
130 | cfid = v9fs_fid_lookup(current->fs->pwd); | ||
131 | if (cfid == NULL) { | ||
132 | dprintk(DEBUG_ERROR, "process cwd doesn't have a fid\n"); | ||
133 | return ERR_PTR(-ENOENT); | ||
134 | } | ||
135 | |||
136 | cfidnum = cfid->fid; | ||
137 | cde = current->fs->pwd; | ||
138 | /* TODO: take advantage of multiwalk */ | ||
139 | |||
140 | fidnum = v9fs_get_idpool(&v9ses->fidpool); | ||
141 | if (fidnum < 0) { | ||
142 | dprintk(DEBUG_ERROR, "could not get a new fid num\n"); | ||
143 | err = -ENOENT; | ||
144 | goto clunk_fid; | ||
145 | } | ||
146 | |||
147 | while (cde != dentry) { | ||
148 | if (cde == cde->d_parent) { | ||
149 | dprintk(DEBUG_ERROR, "can't find dentry\n"); | ||
150 | err = -ENOENT; | ||
151 | goto clunk_fid; | ||
152 | } | ||
153 | |||
154 | err = v9fs_t_walk(v9ses, cfidnum, fidnum, "..", NULL); | ||
155 | if (err < 0) { | ||
156 | dprintk(DEBUG_ERROR, "problem walking to parent\n"); | ||
157 | goto clunk_fid; | ||
158 | } | ||
159 | |||
160 | cfidnum = fidnum; | ||
161 | cde = cde->d_parent; | ||
162 | } | ||
163 | |||
164 | return v9fs_fid_create(dentry, v9ses, fidnum, 0); | ||
165 | |||
166 | clunk_fid: | ||
167 | v9fs_t_clunk(v9ses, fidnum, NULL); | ||
168 | return ERR_PTR(err); | ||
169 | } | ||
170 | |||
171 | /** | ||
112 | * v9fs_fid_lookup - retrieve the right fid from a particular dentry | 172 | * v9fs_fid_lookup - retrieve the right fid from a particular dentry |
113 | * @dentry: dentry to look for fid in | 173 | * @dentry: dentry to look for fid in |
114 | * @type: intent of lookup (operation or traversal) | 174 | * @type: intent of lookup (operation or traversal) |
@@ -119,49 +179,25 @@ void v9fs_fid_destroy(struct v9fs_fid *fid) | |||
119 | * | 179 | * |
120 | */ | 180 | */ |
121 | 181 | ||
122 | struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type) | 182 | struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry) |
123 | { | 183 | { |
124 | struct list_head *fid_list = (struct list_head *)dentry->d_fsdata; | 184 | struct list_head *fid_list = (struct list_head *)dentry->d_fsdata; |
125 | struct v9fs_fid *current_fid = NULL; | 185 | struct v9fs_fid *current_fid = NULL; |
126 | struct v9fs_fid *temp = NULL; | 186 | struct v9fs_fid *temp = NULL; |
127 | struct v9fs_fid *return_fid = NULL; | 187 | struct v9fs_fid *return_fid = NULL; |
128 | int found_parent = 0; | ||
129 | int found_user = 0; | ||
130 | 188 | ||
131 | dprintk(DEBUG_9P, " dentry: %s (%p) type %d\n", dentry->d_iname, dentry, | 189 | dprintk(DEBUG_9P, " dentry: %s (%p)\n", dentry->d_iname, dentry); |
132 | type); | ||
133 | 190 | ||
134 | if (fid_list && !list_empty(fid_list)) { | 191 | if (fid_list) { |
135 | list_for_each_entry_safe(current_fid, temp, fid_list, list) { | 192 | list_for_each_entry_safe(current_fid, temp, fid_list, list) { |
136 | if (current_fid->uid == current->uid) { | 193 | if (!current_fid->fidcreate) { |
137 | if (return_fid == NULL) { | 194 | return_fid = current_fid; |
138 | if ((type == FID_OP) | 195 | break; |
139 | || (!current_fid->fidopen)) { | ||
140 | return_fid = current_fid; | ||
141 | found_user = 1; | ||
142 | } | ||
143 | } | ||
144 | } | ||
145 | if (current_fid->pid == current->real_parent->pid) { | ||
146 | if ((return_fid == NULL) || (found_parent) | ||
147 | || (found_user)) { | ||
148 | if ((type == FID_OP) | ||
149 | || (!current_fid->fidopen)) { | ||
150 | return_fid = current_fid; | ||
151 | found_parent = 1; | ||
152 | found_user = 0; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | if (current_fid->pid == current->pid) { | ||
157 | if ((type == FID_OP) || | ||
158 | (!current_fid->fidopen)) { | ||
159 | return_fid = current_fid; | ||
160 | found_parent = 0; | ||
161 | found_user = 0; | ||
162 | } | ||
163 | } | 196 | } |
164 | } | 197 | } |
198 | |||
199 | if (!return_fid) | ||
200 | return_fid = current_fid; | ||
165 | } | 201 | } |
166 | 202 | ||
167 | /* we are at the root but didn't match */ | 203 | /* we are at the root but didn't match */ |
@@ -187,55 +223,33 @@ struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type) | |||
187 | 223 | ||
188 | /* XXX - there may be some duplication we can get rid of */ | 224 | /* XXX - there may be some duplication we can get rid of */ |
189 | if (par == dentry) { | 225 | if (par == dentry) { |
190 | /* we need to fid_lookup the starting point */ | 226 | return_fid = v9fs_fid_walk_up(dentry); |
191 | int fidnum = -1; | 227 | if (IS_ERR(return_fid)) |
192 | int oldfid = -1; | 228 | return_fid = NULL; |
193 | int result = -1; | 229 | } |
194 | struct v9fs_session_info *v9ses = | 230 | } |
195 | v9fs_inode2v9ses(current->fs->pwd->d_inode); | ||
196 | |||
197 | current_fid = | ||
198 | v9fs_fid_lookup(current->fs->pwd, FID_WALK); | ||
199 | if (current_fid == NULL) { | ||
200 | dprintk(DEBUG_ERROR, | ||
201 | "process cwd doesn't have a fid\n"); | ||
202 | return return_fid; | ||
203 | } | ||
204 | oldfid = current_fid->fid; | ||
205 | par = current->fs->pwd; | ||
206 | /* TODO: take advantage of multiwalk */ | ||
207 | 231 | ||
208 | fidnum = v9fs_get_idpool(&v9ses->fidpool); | 232 | return return_fid; |
209 | if (fidnum < 0) { | 233 | } |
210 | dprintk(DEBUG_ERROR, | ||
211 | "could not get a new fid num\n"); | ||
212 | return return_fid; | ||
213 | } | ||
214 | 234 | ||
215 | while (par != dentry) { | 235 | struct v9fs_fid *v9fs_fid_get_created(struct dentry *dentry) |
216 | result = | 236 | { |
217 | v9fs_t_walk(v9ses, oldfid, fidnum, "..", | 237 | struct list_head *fid_list; |
218 | NULL); | 238 | struct v9fs_fid *fid, *ftmp, *ret; |
219 | if (result < 0) { | 239 | |
220 | dprintk(DEBUG_ERROR, | 240 | dprintk(DEBUG_9P, " dentry: %s (%p)\n", dentry->d_iname, dentry); |
221 | "problem walking to parent\n"); | 241 | fid_list = (struct list_head *)dentry->d_fsdata; |
222 | 242 | ret = NULL; | |
223 | break; | 243 | if (fid_list) { |
224 | } | 244 | list_for_each_entry_safe(fid, ftmp, fid_list, list) { |
225 | oldfid = fidnum; | 245 | if (fid->fidcreate && fid->pid == current->pid) { |
226 | if (par == par->d_parent) { | 246 | list_del(&fid->list); |
227 | dprintk(DEBUG_ERROR, | 247 | ret = fid; |
228 | "can't find dentry\n"); | 248 | break; |
229 | break; | ||
230 | } | ||
231 | par = par->d_parent; | ||
232 | } | ||
233 | if (par == dentry) { | ||
234 | return_fid = v9fs_fid_create(dentry); | ||
235 | return_fid->fid = fidnum; | ||
236 | } | 249 | } |
237 | } | 250 | } |
238 | } | 251 | } |
239 | 252 | ||
240 | return return_fid; | 253 | dprintk(DEBUG_9P, "return %p\n", ret); |
254 | return ret; | ||
241 | } | 255 | } |
diff --git a/fs/9p/fid.h b/fs/9p/fid.h index 7db478ccca36..84c673a44c83 100644 --- a/fs/9p/fid.h +++ b/fs/9p/fid.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #define FID_OP 0 | 26 | #define FID_OP 0 |
27 | #define FID_WALK 1 | 27 | #define FID_WALK 1 |
28 | #define FID_CREATE 2 | ||
28 | 29 | ||
29 | struct v9fs_fid { | 30 | struct v9fs_fid { |
30 | struct list_head list; /* list of fids associated with a dentry */ | 31 | struct list_head list; /* list of fids associated with a dentry */ |
@@ -52,6 +53,8 @@ struct v9fs_fid { | |||
52 | struct v9fs_session_info *v9ses; /* session info for this FID */ | 53 | struct v9fs_session_info *v9ses; /* session info for this FID */ |
53 | }; | 54 | }; |
54 | 55 | ||
55 | struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type); | 56 | struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry); |
57 | struct v9fs_fid *v9fs_fid_get_created(struct dentry *); | ||
56 | void v9fs_fid_destroy(struct v9fs_fid *fid); | 58 | void v9fs_fid_destroy(struct v9fs_fid *fid); |
57 | struct v9fs_fid *v9fs_fid_create(struct dentry *); | 59 | struct v9fs_fid *v9fs_fid_create(struct dentry *, |
60 | struct v9fs_session_info *v9ses, int fid, int create); | ||
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 13bdbbab4387..82303f3bf76f 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c | |||
@@ -303,7 +303,13 @@ v9fs_session_init(struct v9fs_session_info *v9ses, | |||
303 | goto SessCleanUp; | 303 | goto SessCleanUp; |
304 | }; | 304 | }; |
305 | 305 | ||
306 | v9ses->transport = trans_proto; | 306 | v9ses->transport = kmalloc(sizeof(*v9ses->transport), GFP_KERNEL); |
307 | if (!v9ses->transport) { | ||
308 | retval = -ENOMEM; | ||
309 | goto SessCleanUp; | ||
310 | } | ||
311 | |||
312 | memmove(v9ses->transport, trans_proto, sizeof(*v9ses->transport)); | ||
307 | 313 | ||
308 | if ((retval = v9ses->transport->init(v9ses, dev_name, data)) < 0) { | 314 | if ((retval = v9ses->transport->init(v9ses, dev_name, data)) < 0) { |
309 | eprintk(KERN_ERR, "problem initializing transport\n"); | 315 | eprintk(KERN_ERR, "problem initializing transport\n"); |
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index 306c96741f81..a6aa947de0f9 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c | |||
@@ -67,7 +67,7 @@ static int v9fs_dentry_validate(struct dentry *dentry, struct nameidata *nd) | |||
67 | struct dentry *dc = current->fs->pwd; | 67 | struct dentry *dc = current->fs->pwd; |
68 | 68 | ||
69 | dprintk(DEBUG_VFS, "dentry: %s (%p)\n", dentry->d_iname, dentry); | 69 | dprintk(DEBUG_VFS, "dentry: %s (%p)\n", dentry->d_iname, dentry); |
70 | if (v9fs_fid_lookup(dentry, FID_OP)) { | 70 | if (v9fs_fid_lookup(dentry)) { |
71 | dprintk(DEBUG_VFS, "VALID\n"); | 71 | dprintk(DEBUG_VFS, "VALID\n"); |
72 | return 1; | 72 | return 1; |
73 | } | 73 | } |
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index c478a7384186..57a43b8feef5 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c | |||
@@ -197,21 +197,18 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) | |||
197 | filemap_fdatawait(inode->i_mapping); | 197 | filemap_fdatawait(inode->i_mapping); |
198 | 198 | ||
199 | if (fidnum >= 0) { | 199 | if (fidnum >= 0) { |
200 | fid->fidopen--; | ||
201 | dprintk(DEBUG_VFS, "fidopen: %d v9f->fid: %d\n", fid->fidopen, | 200 | dprintk(DEBUG_VFS, "fidopen: %d v9f->fid: %d\n", fid->fidopen, |
202 | fid->fid); | 201 | fid->fid); |
203 | 202 | ||
204 | if (fid->fidopen == 0) { | 203 | if (v9fs_t_clunk(v9ses, fidnum, NULL)) |
205 | if (v9fs_t_clunk(v9ses, fidnum, NULL)) | 204 | dprintk(DEBUG_ERROR, "clunk failed\n"); |
206 | dprintk(DEBUG_ERROR, "clunk failed\n"); | ||
207 | 205 | ||
208 | v9fs_put_idpool(fid->fid, &v9ses->fidpool); | 206 | v9fs_put_idpool(fid->fid, &v9ses->fidpool); |
209 | } | ||
210 | 207 | ||
211 | kfree(fid->rdir_fcall); | 208 | kfree(fid->rdir_fcall); |
209 | kfree(fid); | ||
212 | 210 | ||
213 | filp->private_data = NULL; | 211 | filp->private_data = NULL; |
214 | v9fs_fid_destroy(fid); | ||
215 | } | 212 | } |
216 | 213 | ||
217 | d_drop(filp->f_dentry); | 214 | d_drop(filp->f_dentry); |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 1f8ae7d580ab..bbc3cc63854f 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -53,30 +53,36 @@ | |||
53 | int v9fs_file_open(struct inode *inode, struct file *file) | 53 | int v9fs_file_open(struct inode *inode, struct file *file) |
54 | { | 54 | { |
55 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); | 55 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); |
56 | struct v9fs_fid *v9fid = v9fs_fid_lookup(file->f_dentry, FID_WALK); | 56 | struct v9fs_fid *v9fid, *fid; |
57 | struct v9fs_fid *v9newfid = NULL; | ||
58 | struct v9fs_fcall *fcall = NULL; | 57 | struct v9fs_fcall *fcall = NULL; |
59 | int open_mode = 0; | 58 | int open_mode = 0; |
60 | unsigned int iounit = 0; | 59 | unsigned int iounit = 0; |
61 | int newfid = -1; | 60 | int newfid = -1; |
62 | long result = -1; | 61 | long result = -1; |
63 | 62 | ||
64 | dprintk(DEBUG_VFS, "inode: %p file: %p v9fid= %p\n", inode, file, | 63 | dprintk(DEBUG_VFS, "inode: %p file: %p \n", inode, file); |
65 | v9fid); | 64 | |
65 | v9fid = v9fs_fid_get_created(file->f_dentry); | ||
66 | if (!v9fid) | ||
67 | v9fid = v9fs_fid_lookup(file->f_dentry); | ||
66 | 68 | ||
67 | if (!v9fid) { | 69 | if (!v9fid) { |
68 | struct dentry *dentry = file->f_dentry; | ||
69 | dprintk(DEBUG_ERROR, "Couldn't resolve fid from dentry\n"); | 70 | dprintk(DEBUG_ERROR, "Couldn't resolve fid from dentry\n"); |
71 | return -EBADF; | ||
72 | } | ||
70 | 73 | ||
71 | /* XXX - some duplication from lookup, generalize later */ | 74 | if (!v9fid->fidcreate) { |
72 | /* basically vfs_lookup is too heavy weight */ | 75 | fid = kmalloc(sizeof(struct v9fs_fid), GFP_KERNEL); |
73 | v9fid = v9fs_fid_lookup(file->f_dentry, FID_OP); | 76 | if (fid == NULL) { |
74 | if (!v9fid) | 77 | dprintk(DEBUG_ERROR, "Out of Memory\n"); |
75 | return -EBADF; | 78 | return -ENOMEM; |
79 | } | ||
76 | 80 | ||
77 | v9fid = v9fs_fid_lookup(dentry->d_parent, FID_WALK); | 81 | fid->fidopen = 0; |
78 | if (!v9fid) | 82 | fid->fidcreate = 0; |
79 | return -EBADF; | 83 | fid->fidclunked = 0; |
84 | fid->iounit = 0; | ||
85 | fid->v9ses = v9ses; | ||
80 | 86 | ||
81 | newfid = v9fs_get_idpool(&v9ses->fidpool); | 87 | newfid = v9fs_get_idpool(&v9ses->fidpool); |
82 | if (newfid < 0) { | 88 | if (newfid < 0) { |
@@ -85,58 +91,16 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
85 | } | 91 | } |
86 | 92 | ||
87 | result = | 93 | result = |
88 | v9fs_t_walk(v9ses, v9fid->fid, newfid, | 94 | v9fs_t_walk(v9ses, v9fid->fid, newfid, NULL, NULL); |
89 | (char *)file->f_dentry->d_name.name, NULL); | 95 | |
90 | if (result < 0) { | 96 | if (result < 0) { |
91 | v9fs_put_idpool(newfid, &v9ses->fidpool); | 97 | v9fs_put_idpool(newfid, &v9ses->fidpool); |
92 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); | 98 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); |
93 | return -EBADF; | 99 | return -EBADF; |
94 | } | 100 | } |
95 | 101 | ||
96 | v9fid = v9fs_fid_create(dentry); | 102 | fid->fid = newfid; |
97 | if (v9fid == NULL) { | 103 | v9fid = fid; |
98 | dprintk(DEBUG_ERROR, "couldn't insert\n"); | ||
99 | return -ENOMEM; | ||
100 | } | ||
101 | v9fid->fid = newfid; | ||
102 | } | ||
103 | |||
104 | if (v9fid->fidcreate) { | ||
105 | /* create case */ | ||
106 | newfid = v9fid->fid; | ||
107 | iounit = v9fid->iounit; | ||
108 | v9fid->fidcreate = 0; | ||
109 | } else { | ||
110 | if (!S_ISDIR(inode->i_mode)) | ||
111 | newfid = v9fid->fid; | ||
112 | else { | ||
113 | newfid = v9fs_get_idpool(&v9ses->fidpool); | ||
114 | if (newfid < 0) { | ||
115 | eprintk(KERN_WARNING, "allocation failed\n"); | ||
116 | return -ENOSPC; | ||
117 | } | ||
118 | /* This would be a somewhat critical clone */ | ||
119 | result = | ||
120 | v9fs_t_walk(v9ses, v9fid->fid, newfid, NULL, | ||
121 | &fcall); | ||
122 | if (result < 0) { | ||
123 | dprintk(DEBUG_ERROR, "clone error: %s\n", | ||
124 | FCALL_ERROR(fcall)); | ||
125 | kfree(fcall); | ||
126 | return result; | ||
127 | } | ||
128 | |||
129 | v9newfid = v9fs_fid_create(file->f_dentry); | ||
130 | v9newfid->fid = newfid; | ||
131 | v9newfid->qid = v9fid->qid; | ||
132 | v9newfid->iounit = v9fid->iounit; | ||
133 | v9newfid->fidopen = 0; | ||
134 | v9newfid->fidclunked = 0; | ||
135 | v9newfid->v9ses = v9ses; | ||
136 | v9fid = v9newfid; | ||
137 | kfree(fcall); | ||
138 | } | ||
139 | |||
140 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ | 104 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ |
141 | /* translate open mode appropriately */ | 105 | /* translate open mode appropriately */ |
142 | open_mode = file->f_flags & 0x3; | 106 | open_mode = file->f_flags & 0x3; |
@@ -163,9 +127,13 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
163 | 127 | ||
164 | iounit = fcall->params.ropen.iounit; | 128 | iounit = fcall->params.ropen.iounit; |
165 | kfree(fcall); | 129 | kfree(fcall); |
130 | } else { | ||
131 | /* create case */ | ||
132 | newfid = v9fid->fid; | ||
133 | iounit = v9fid->iounit; | ||
134 | v9fid->fidcreate = 0; | ||
166 | } | 135 | } |
167 | 136 | ||
168 | |||
169 | file->private_data = v9fid; | 137 | file->private_data = v9fid; |
170 | 138 | ||
171 | v9fid->rdir_pos = 0; | 139 | v9fid->rdir_pos = 0; |
@@ -207,16 +175,16 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) | |||
207 | } | 175 | } |
208 | 176 | ||
209 | /** | 177 | /** |
210 | * v9fs_read - read from a file (internal) | 178 | * v9fs_file_read - read from a file |
211 | * @filep: file pointer to read | 179 | * @filep: file pointer to read |
212 | * @data: data buffer to read data into | 180 | * @data: data buffer to read data into |
213 | * @count: size of buffer | 181 | * @count: size of buffer |
214 | * @offset: offset at which to read data | 182 | * @offset: offset at which to read data |
215 | * | 183 | * |
216 | */ | 184 | */ |
217 | |||
218 | static ssize_t | 185 | static ssize_t |
219 | v9fs_read(struct file *filp, char *buffer, size_t count, loff_t * offset) | 186 | v9fs_file_read(struct file *filp, char __user * data, size_t count, |
187 | loff_t * offset) | ||
220 | { | 188 | { |
221 | struct inode *inode = filp->f_dentry->d_inode; | 189 | struct inode *inode = filp->f_dentry->d_inode; |
222 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); | 190 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); |
@@ -226,6 +194,7 @@ v9fs_read(struct file *filp, char *buffer, size_t count, loff_t * offset) | |||
226 | int rsize = 0; | 194 | int rsize = 0; |
227 | int result = 0; | 195 | int result = 0; |
228 | int total = 0; | 196 | int total = 0; |
197 | int n; | ||
229 | 198 | ||
230 | dprintk(DEBUG_VFS, "\n"); | 199 | dprintk(DEBUG_VFS, "\n"); |
231 | 200 | ||
@@ -248,10 +217,15 @@ v9fs_read(struct file *filp, char *buffer, size_t count, loff_t * offset) | |||
248 | } else | 217 | } else |
249 | *offset += result; | 218 | *offset += result; |
250 | 219 | ||
251 | /* XXX - extra copy */ | 220 | n = copy_to_user(data, fcall->params.rread.data, result); |
252 | memcpy(buffer, fcall->params.rread.data, result); | 221 | if (n) { |
222 | dprintk(DEBUG_ERROR, "Problem copying to user %d\n", n); | ||
223 | kfree(fcall); | ||
224 | return -EFAULT; | ||
225 | } | ||
226 | |||
253 | count -= result; | 227 | count -= result; |
254 | buffer += result; | 228 | data += result; |
255 | total += result; | 229 | total += result; |
256 | 230 | ||
257 | kfree(fcall); | 231 | kfree(fcall); |
@@ -264,42 +238,7 @@ v9fs_read(struct file *filp, char *buffer, size_t count, loff_t * offset) | |||
264 | } | 238 | } |
265 | 239 | ||
266 | /** | 240 | /** |
267 | * v9fs_file_read - read from a file | 241 | * v9fs_file_write - write to a file |
268 | * @filep: file pointer to read | ||
269 | * @data: data buffer to read data into | ||
270 | * @count: size of buffer | ||
271 | * @offset: offset at which to read data | ||
272 | * | ||
273 | */ | ||
274 | |||
275 | static ssize_t | ||
276 | v9fs_file_read(struct file *filp, char __user * data, size_t count, | ||
277 | loff_t * offset) | ||
278 | { | ||
279 | int retval = -1; | ||
280 | int ret = 0; | ||
281 | char *buffer; | ||
282 | |||
283 | buffer = kmalloc(count, GFP_KERNEL); | ||
284 | if (!buffer) | ||
285 | return -ENOMEM; | ||
286 | |||
287 | retval = v9fs_read(filp, buffer, count, offset); | ||
288 | if (retval > 0) { | ||
289 | if ((ret = copy_to_user(data, buffer, retval)) != 0) { | ||
290 | dprintk(DEBUG_ERROR, "Problem copying to user %d\n", | ||
291 | ret); | ||
292 | retval = ret; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | kfree(buffer); | ||
297 | |||
298 | return retval; | ||
299 | } | ||
300 | |||
301 | /** | ||
302 | * v9fs_write - write to a file | ||
303 | * @filep: file pointer to write | 242 | * @filep: file pointer to write |
304 | * @data: data buffer to write data from | 243 | * @data: data buffer to write data from |
305 | * @count: size of buffer | 244 | * @count: size of buffer |
@@ -308,7 +247,8 @@ v9fs_file_read(struct file *filp, char __user * data, size_t count, | |||
308 | */ | 247 | */ |
309 | 248 | ||
310 | static ssize_t | 249 | static ssize_t |
311 | v9fs_write(struct file *filp, char *buffer, size_t count, loff_t * offset) | 250 | v9fs_file_write(struct file *filp, const char __user * data, |
251 | size_t count, loff_t * offset) | ||
312 | { | 252 | { |
313 | struct inode *inode = filp->f_dentry->d_inode; | 253 | struct inode *inode = filp->f_dentry->d_inode; |
314 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); | 254 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); |
@@ -318,30 +258,42 @@ v9fs_write(struct file *filp, char *buffer, size_t count, loff_t * offset) | |||
318 | int result = -EIO; | 258 | int result = -EIO; |
319 | int rsize = 0; | 259 | int rsize = 0; |
320 | int total = 0; | 260 | int total = 0; |
261 | char *buf; | ||
321 | 262 | ||
322 | dprintk(DEBUG_VFS, "data %p count %d offset %x\n", buffer, (int)count, | 263 | dprintk(DEBUG_VFS, "data %p count %d offset %x\n", data, (int)count, |
323 | (int)*offset); | 264 | (int)*offset); |
324 | rsize = v9ses->maxdata - V9FS_IOHDRSZ; | 265 | rsize = v9ses->maxdata - V9FS_IOHDRSZ; |
325 | if (v9fid->iounit != 0 && rsize > v9fid->iounit) | 266 | if (v9fid->iounit != 0 && rsize > v9fid->iounit) |
326 | rsize = v9fid->iounit; | 267 | rsize = v9fid->iounit; |
327 | 268 | ||
328 | dump_data(buffer, count); | 269 | buf = kmalloc(v9ses->maxdata - V9FS_IOHDRSZ, GFP_KERNEL); |
270 | if (!buf) | ||
271 | return -ENOMEM; | ||
329 | 272 | ||
330 | do { | 273 | do { |
331 | if (count < rsize) | 274 | if (count < rsize) |
332 | rsize = count; | 275 | rsize = count; |
333 | 276 | ||
334 | result = | 277 | result = copy_from_user(buf, data, rsize); |
335 | v9fs_t_write(v9ses, fid, *offset, rsize, buffer, &fcall); | 278 | if (result) { |
279 | dprintk(DEBUG_ERROR, "Problem copying from user\n"); | ||
280 | kfree(buf); | ||
281 | return -EFAULT; | ||
282 | } | ||
283 | |||
284 | dump_data(buf, rsize); | ||
285 | result = v9fs_t_write(v9ses, fid, *offset, rsize, buf, &fcall); | ||
336 | if (result < 0) { | 286 | if (result < 0) { |
337 | eprintk(KERN_ERR, "error while writing: %s(%d)\n", | 287 | eprintk(KERN_ERR, "error while writing: %s(%d)\n", |
338 | FCALL_ERROR(fcall), result); | 288 | FCALL_ERROR(fcall), result); |
339 | kfree(fcall); | 289 | kfree(fcall); |
290 | kfree(buf); | ||
340 | return result; | 291 | return result; |
341 | } else | 292 | } else |
342 | *offset += result; | 293 | *offset += result; |
343 | 294 | ||
344 | kfree(fcall); | 295 | kfree(fcall); |
296 | fcall = NULL; | ||
345 | 297 | ||
346 | if (result != rsize) { | 298 | if (result != rsize) { |
347 | eprintk(KERN_ERR, | 299 | eprintk(KERN_ERR, |
@@ -351,46 +303,14 @@ v9fs_write(struct file *filp, char *buffer, size_t count, loff_t * offset) | |||
351 | } | 303 | } |
352 | 304 | ||
353 | count -= result; | 305 | count -= result; |
354 | buffer += result; | 306 | data += result; |
355 | total += result; | 307 | total += result; |
356 | } while (count); | 308 | } while (count); |
357 | 309 | ||
310 | kfree(buf); | ||
358 | return total; | 311 | return total; |
359 | } | 312 | } |
360 | 313 | ||
361 | /** | ||
362 | * v9fs_file_write - write to a file | ||
363 | * @filep: file pointer to write | ||
364 | * @data: data buffer to write data from | ||
365 | * @count: size of buffer | ||
366 | * @offset: offset at which to write data | ||
367 | * | ||
368 | */ | ||
369 | |||
370 | static ssize_t | ||
371 | v9fs_file_write(struct file *filp, const char __user * data, | ||
372 | size_t count, loff_t * offset) | ||
373 | { | ||
374 | int ret = -1; | ||
375 | char *buffer; | ||
376 | |||
377 | buffer = kmalloc(count, GFP_KERNEL); | ||
378 | if (buffer == NULL) | ||
379 | return -ENOMEM; | ||
380 | |||
381 | ret = copy_from_user(buffer, data, count); | ||
382 | if (ret) { | ||
383 | dprintk(DEBUG_ERROR, "Problem copying from user\n"); | ||
384 | ret = -EFAULT; | ||
385 | } else { | ||
386 | ret = v9fs_write(filp, buffer, count, offset); | ||
387 | } | ||
388 | |||
389 | kfree(buffer); | ||
390 | |||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | struct file_operations v9fs_file_operations = { | 314 | struct file_operations v9fs_file_operations = { |
395 | .llseek = generic_file_llseek, | 315 | .llseek = generic_file_llseek, |
396 | .read = v9fs_file_read, | 316 | .read = v9fs_file_read, |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 0c13fc600049..2b696ae6655a 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -307,7 +307,7 @@ v9fs_create(struct inode *dir, | |||
307 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); | 307 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); |
308 | struct super_block *sb = dir->i_sb; | 308 | struct super_block *sb = dir->i_sb; |
309 | struct v9fs_fid *dirfid = | 309 | struct v9fs_fid *dirfid = |
310 | v9fs_fid_lookup(file_dentry->d_parent, FID_WALK); | 310 | v9fs_fid_lookup(file_dentry->d_parent); |
311 | struct v9fs_fid *fid = NULL; | 311 | struct v9fs_fid *fid = NULL; |
312 | struct inode *file_inode = NULL; | 312 | struct inode *file_inode = NULL; |
313 | struct v9fs_fcall *fcall = NULL; | 313 | struct v9fs_fcall *fcall = NULL; |
@@ -317,6 +317,7 @@ v9fs_create(struct inode *dir, | |||
317 | long newfid = -1; | 317 | long newfid = -1; |
318 | int result = 0; | 318 | int result = 0; |
319 | unsigned int iounit = 0; | 319 | unsigned int iounit = 0; |
320 | int wfidno = -1; | ||
320 | 321 | ||
321 | perm = unixmode2p9mode(v9ses, perm); | 322 | perm = unixmode2p9mode(v9ses, perm); |
322 | 323 | ||
@@ -350,7 +351,7 @@ v9fs_create(struct inode *dir, | |||
350 | if (result < 0) { | 351 | if (result < 0) { |
351 | dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall)); | 352 | dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall)); |
352 | v9fs_put_idpool(newfid, &v9ses->fidpool); | 353 | v9fs_put_idpool(newfid, &v9ses->fidpool); |
353 | newfid = 0; | 354 | newfid = -1; |
354 | goto CleanUpFid; | 355 | goto CleanUpFid; |
355 | } | 356 | } |
356 | 357 | ||
@@ -369,20 +370,39 @@ v9fs_create(struct inode *dir, | |||
369 | qid = fcall->params.rcreate.qid; | 370 | qid = fcall->params.rcreate.qid; |
370 | kfree(fcall); | 371 | kfree(fcall); |
371 | 372 | ||
372 | fid = v9fs_fid_create(file_dentry); | 373 | fid = v9fs_fid_create(file_dentry, v9ses, newfid, 1); |
374 | dprintk(DEBUG_VFS, "fid %p %d\n", fid, fid->fidcreate); | ||
373 | if (!fid) { | 375 | if (!fid) { |
374 | result = -ENOMEM; | 376 | result = -ENOMEM; |
375 | goto CleanUpFid; | 377 | goto CleanUpFid; |
376 | } | 378 | } |
377 | 379 | ||
378 | fid->fid = newfid; | ||
379 | fid->fidopen = 0; | ||
380 | fid->fidcreate = 1; | ||
381 | fid->qid = qid; | 380 | fid->qid = qid; |
382 | fid->iounit = iounit; | 381 | fid->iounit = iounit; |
383 | fid->rdir_pos = 0; | 382 | |
384 | fid->rdir_fcall = NULL; | 383 | /* walk to the newly created file and put the fid in the dentry */ |
385 | fid->v9ses = v9ses; | 384 | wfidno = v9fs_get_idpool(&v9ses->fidpool); |
385 | if (newfid < 0) { | ||
386 | eprintk(KERN_WARNING, "no free fids available\n"); | ||
387 | return -ENOSPC; | ||
388 | } | ||
389 | |||
390 | result = v9fs_t_walk(v9ses, dirfidnum, wfidno, | ||
391 | (char *) file_dentry->d_name.name, NULL); | ||
392 | if (result < 0) { | ||
393 | dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall)); | ||
394 | v9fs_put_idpool(wfidno, &v9ses->fidpool); | ||
395 | wfidno = -1; | ||
396 | goto CleanUpFid; | ||
397 | } | ||
398 | |||
399 | if (!v9fs_fid_create(file_dentry, v9ses, wfidno, 0)) { | ||
400 | if (!v9fs_t_clunk(v9ses, newfid, &fcall)) { | ||
401 | v9fs_put_idpool(wfidno, &v9ses->fidpool); | ||
402 | } | ||
403 | |||
404 | goto CleanUpFid; | ||
405 | } | ||
386 | 406 | ||
387 | if ((perm & V9FS_DMSYMLINK) || (perm & V9FS_DMLINK) || | 407 | if ((perm & V9FS_DMSYMLINK) || (perm & V9FS_DMLINK) || |
388 | (perm & V9FS_DMNAMEDPIPE) || (perm & V9FS_DMSOCKET) || | 408 | (perm & V9FS_DMNAMEDPIPE) || (perm & V9FS_DMSOCKET) || |
@@ -410,11 +430,11 @@ v9fs_create(struct inode *dir, | |||
410 | d_instantiate(file_dentry, file_inode); | 430 | d_instantiate(file_dentry, file_inode); |
411 | 431 | ||
412 | if (perm & V9FS_DMDIR) { | 432 | if (perm & V9FS_DMDIR) { |
413 | if (v9fs_t_clunk(v9ses, newfid, &fcall)) | 433 | if (!v9fs_t_clunk(v9ses, newfid, &fcall)) |
434 | v9fs_put_idpool(newfid, &v9ses->fidpool); | ||
435 | else | ||
414 | dprintk(DEBUG_ERROR, "clunk for mkdir failed: %s\n", | 436 | dprintk(DEBUG_ERROR, "clunk for mkdir failed: %s\n", |
415 | FCALL_ERROR(fcall)); | 437 | FCALL_ERROR(fcall)); |
416 | |||
417 | v9fs_put_idpool(newfid, &v9ses->fidpool); | ||
418 | kfree(fcall); | 438 | kfree(fcall); |
419 | fid->fidopen = 0; | 439 | fid->fidopen = 0; |
420 | fid->fidcreate = 0; | 440 | fid->fidcreate = 0; |
@@ -426,12 +446,22 @@ v9fs_create(struct inode *dir, | |||
426 | CleanUpFid: | 446 | CleanUpFid: |
427 | kfree(fcall); | 447 | kfree(fcall); |
428 | 448 | ||
429 | if (newfid) { | 449 | if (newfid >= 0) { |
430 | if (v9fs_t_clunk(v9ses, newfid, &fcall)) | 450 | if (!v9fs_t_clunk(v9ses, newfid, &fcall)) |
451 | v9fs_put_idpool(newfid, &v9ses->fidpool); | ||
452 | else | ||
453 | dprintk(DEBUG_ERROR, "clunk failed: %s\n", | ||
454 | FCALL_ERROR(fcall)); | ||
455 | |||
456 | kfree(fcall); | ||
457 | } | ||
458 | if (wfidno >= 0) { | ||
459 | if (!v9fs_t_clunk(v9ses, wfidno, &fcall)) | ||
460 | v9fs_put_idpool(wfidno, &v9ses->fidpool); | ||
461 | else | ||
431 | dprintk(DEBUG_ERROR, "clunk failed: %s\n", | 462 | dprintk(DEBUG_ERROR, "clunk failed: %s\n", |
432 | FCALL_ERROR(fcall)); | 463 | FCALL_ERROR(fcall)); |
433 | 464 | ||
434 | v9fs_put_idpool(newfid, &v9ses->fidpool); | ||
435 | kfree(fcall); | 465 | kfree(fcall); |
436 | } | 466 | } |
437 | return result; | 467 | return result; |
@@ -461,7 +491,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir) | |||
461 | file_inode = file->d_inode; | 491 | file_inode = file->d_inode; |
462 | sb = file_inode->i_sb; | 492 | sb = file_inode->i_sb; |
463 | v9ses = v9fs_inode2v9ses(file_inode); | 493 | v9ses = v9fs_inode2v9ses(file_inode); |
464 | v9fid = v9fs_fid_lookup(file, FID_OP); | 494 | v9fid = v9fs_fid_lookup(file); |
465 | 495 | ||
466 | if (!v9fid) { | 496 | if (!v9fid) { |
467 | dprintk(DEBUG_ERROR, | 497 | dprintk(DEBUG_ERROR, |
@@ -545,7 +575,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
545 | 575 | ||
546 | sb = dir->i_sb; | 576 | sb = dir->i_sb; |
547 | v9ses = v9fs_inode2v9ses(dir); | 577 | v9ses = v9fs_inode2v9ses(dir); |
548 | dirfid = v9fs_fid_lookup(dentry->d_parent, FID_WALK); | 578 | dirfid = v9fs_fid_lookup(dentry->d_parent); |
549 | 579 | ||
550 | if (!dirfid) { | 580 | if (!dirfid) { |
551 | dprintk(DEBUG_ERROR, "no dirfid\n"); | 581 | dprintk(DEBUG_ERROR, "no dirfid\n"); |
@@ -573,7 +603,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
573 | v9fs_put_idpool(newfid, &v9ses->fidpool); | 603 | v9fs_put_idpool(newfid, &v9ses->fidpool); |
574 | if (result == -ENOENT) { | 604 | if (result == -ENOENT) { |
575 | d_add(dentry, NULL); | 605 | d_add(dentry, NULL); |
576 | dprintk(DEBUG_ERROR, | 606 | dprintk(DEBUG_VFS, |
577 | "Return negative dentry %p count %d\n", | 607 | "Return negative dentry %p count %d\n", |
578 | dentry, atomic_read(&dentry->d_count)); | 608 | dentry, atomic_read(&dentry->d_count)); |
579 | return NULL; | 609 | return NULL; |
@@ -601,16 +631,13 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
601 | 631 | ||
602 | inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat->qid); | 632 | inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat->qid); |
603 | 633 | ||
604 | fid = v9fs_fid_create(dentry); | 634 | fid = v9fs_fid_create(dentry, v9ses, newfid, 0); |
605 | if (fid == NULL) { | 635 | if (fid == NULL) { |
606 | dprintk(DEBUG_ERROR, "couldn't insert\n"); | 636 | dprintk(DEBUG_ERROR, "couldn't insert\n"); |
607 | result = -ENOMEM; | 637 | result = -ENOMEM; |
608 | goto FreeFcall; | 638 | goto FreeFcall; |
609 | } | 639 | } |
610 | 640 | ||
611 | fid->fid = newfid; | ||
612 | fid->fidopen = 0; | ||
613 | fid->v9ses = v9ses; | ||
614 | fid->qid = fcall->params.rstat.stat->qid; | 641 | fid->qid = fcall->params.rstat.stat->qid; |
615 | 642 | ||
616 | dentry->d_op = &v9fs_dentry_operations; | 643 | dentry->d_op = &v9fs_dentry_operations; |
@@ -665,11 +692,11 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
665 | { | 692 | { |
666 | struct inode *old_inode = old_dentry->d_inode; | 693 | struct inode *old_inode = old_dentry->d_inode; |
667 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(old_inode); | 694 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(old_inode); |
668 | struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry, FID_WALK); | 695 | struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry); |
669 | struct v9fs_fid *olddirfid = | 696 | struct v9fs_fid *olddirfid = |
670 | v9fs_fid_lookup(old_dentry->d_parent, FID_WALK); | 697 | v9fs_fid_lookup(old_dentry->d_parent); |
671 | struct v9fs_fid *newdirfid = | 698 | struct v9fs_fid *newdirfid = |
672 | v9fs_fid_lookup(new_dentry->d_parent, FID_WALK); | 699 | v9fs_fid_lookup(new_dentry->d_parent); |
673 | struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); | 700 | struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); |
674 | struct v9fs_fcall *fcall = NULL; | 701 | struct v9fs_fcall *fcall = NULL; |
675 | int fid = -1; | 702 | int fid = -1; |
@@ -744,7 +771,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
744 | { | 771 | { |
745 | struct v9fs_fcall *fcall = NULL; | 772 | struct v9fs_fcall *fcall = NULL; |
746 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); | 773 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); |
747 | struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); | 774 | struct v9fs_fid *fid = v9fs_fid_lookup(dentry); |
748 | int err = -EPERM; | 775 | int err = -EPERM; |
749 | 776 | ||
750 | dprintk(DEBUG_VFS, "dentry: %p\n", dentry); | 777 | dprintk(DEBUG_VFS, "dentry: %p\n", dentry); |
@@ -778,7 +805,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
778 | static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) | 805 | static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) |
779 | { | 806 | { |
780 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); | 807 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); |
781 | struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); | 808 | struct v9fs_fid *fid = v9fs_fid_lookup(dentry); |
782 | struct v9fs_fcall *fcall = NULL; | 809 | struct v9fs_fcall *fcall = NULL; |
783 | struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); | 810 | struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); |
784 | int res = -EPERM; | 811 | int res = -EPERM; |
@@ -960,7 +987,7 @@ v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | |||
960 | if (retval != 0) | 987 | if (retval != 0) |
961 | goto FreeFcall; | 988 | goto FreeFcall; |
962 | 989 | ||
963 | newfid = v9fs_fid_lookup(dentry, FID_OP); | 990 | newfid = v9fs_fid_lookup(dentry); |
964 | 991 | ||
965 | /* issue a twstat */ | 992 | /* issue a twstat */ |
966 | v9fs_blank_mistat(v9ses, mistat); | 993 | v9fs_blank_mistat(v9ses, mistat); |
@@ -1004,7 +1031,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen) | |||
1004 | 1031 | ||
1005 | struct v9fs_fcall *fcall = NULL; | 1032 | struct v9fs_fcall *fcall = NULL; |
1006 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); | 1033 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); |
1007 | struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); | 1034 | struct v9fs_fid *fid = v9fs_fid_lookup(dentry); |
1008 | 1035 | ||
1009 | if (!fid) { | 1036 | if (!fid) { |
1010 | dprintk(DEBUG_ERROR, "could not resolve fid from dentry\n"); | 1037 | dprintk(DEBUG_ERROR, "could not resolve fid from dentry\n"); |
@@ -1063,8 +1090,8 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer, | |||
1063 | int ret; | 1090 | int ret; |
1064 | char *link = __getname(); | 1091 | char *link = __getname(); |
1065 | 1092 | ||
1066 | if (strlen(link) < buflen) | 1093 | if (buflen > PATH_MAX) |
1067 | buflen = strlen(link); | 1094 | buflen = PATH_MAX; |
1068 | 1095 | ||
1069 | dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); | 1096 | dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); |
1070 | 1097 | ||
@@ -1148,7 +1175,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, | |||
1148 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); | 1175 | struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); |
1149 | struct v9fs_fcall *fcall = NULL; | 1176 | struct v9fs_fcall *fcall = NULL; |
1150 | struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); | 1177 | struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); |
1151 | struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry, FID_OP); | 1178 | struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry); |
1152 | struct v9fs_fid *newfid = NULL; | 1179 | struct v9fs_fid *newfid = NULL; |
1153 | char *symname = __getname(); | 1180 | char *symname = __getname(); |
1154 | 1181 | ||
@@ -1168,7 +1195,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, | |||
1168 | if (retval != 0) | 1195 | if (retval != 0) |
1169 | goto FreeMem; | 1196 | goto FreeMem; |
1170 | 1197 | ||
1171 | newfid = v9fs_fid_lookup(dentry, FID_OP); | 1198 | newfid = v9fs_fid_lookup(dentry); |
1172 | if (!newfid) { | 1199 | if (!newfid) { |
1173 | dprintk(DEBUG_ERROR, "couldn't resolve fid from dentry\n"); | 1200 | dprintk(DEBUG_ERROR, "couldn't resolve fid from dentry\n"); |
1174 | goto FreeMem; | 1201 | goto FreeMem; |
@@ -1246,7 +1273,7 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) | |||
1246 | if (retval != 0) | 1273 | if (retval != 0) |
1247 | goto FreeMem; | 1274 | goto FreeMem; |
1248 | 1275 | ||
1249 | newfid = v9fs_fid_lookup(dentry, FID_OP); | 1276 | newfid = v9fs_fid_lookup(dentry); |
1250 | if (!newfid) { | 1277 | if (!newfid) { |
1251 | dprintk(DEBUG_ERROR, "coudn't resove fid from dentry\n"); | 1278 | dprintk(DEBUG_ERROR, "coudn't resove fid from dentry\n"); |
1252 | retval = -EINVAL; | 1279 | retval = -EINVAL; |
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 868f350b2c5f..82c5b0084079 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c | |||
@@ -129,8 +129,7 @@ static struct super_block *v9fs_get_sb(struct file_system_type | |||
129 | 129 | ||
130 | if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) { | 130 | if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) { |
131 | dprintk(DEBUG_ERROR, "problem initiating session\n"); | 131 | dprintk(DEBUG_ERROR, "problem initiating session\n"); |
132 | retval = newfid; | 132 | return ERR_PTR(newfid); |
133 | goto free_session; | ||
134 | } | 133 | } |
135 | 134 | ||
136 | sb = sget(fs_type, NULL, v9fs_set_super, v9ses); | 135 | sb = sget(fs_type, NULL, v9fs_set_super, v9ses); |
@@ -150,28 +149,24 @@ static struct super_block *v9fs_get_sb(struct file_system_type | |||
150 | 149 | ||
151 | if (!root) { | 150 | if (!root) { |
152 | retval = -ENOMEM; | 151 | retval = -ENOMEM; |
153 | goto release_inode; | 152 | goto put_back_sb; |
154 | } | 153 | } |
155 | 154 | ||
156 | sb->s_root = root; | 155 | sb->s_root = root; |
157 | 156 | ||
158 | /* Setup the Root Inode */ | ||
159 | root_fid = v9fs_fid_create(root); | ||
160 | if (root_fid == NULL) { | ||
161 | retval = -ENOMEM; | ||
162 | goto release_dentry; | ||
163 | } | ||
164 | |||
165 | root_fid->fidopen = 0; | ||
166 | root_fid->v9ses = v9ses; | ||
167 | |||
168 | stat_result = v9fs_t_stat(v9ses, newfid, &fcall); | 157 | stat_result = v9fs_t_stat(v9ses, newfid, &fcall); |
169 | if (stat_result < 0) { | 158 | if (stat_result < 0) { |
170 | dprintk(DEBUG_ERROR, "stat error\n"); | 159 | dprintk(DEBUG_ERROR, "stat error\n"); |
171 | v9fs_t_clunk(v9ses, newfid, NULL); | 160 | v9fs_t_clunk(v9ses, newfid, NULL); |
172 | v9fs_put_idpool(newfid, &v9ses->fidpool); | 161 | v9fs_put_idpool(newfid, &v9ses->fidpool); |
173 | } else { | 162 | } else { |
174 | root_fid->fid = newfid; | 163 | /* Setup the Root Inode */ |
164 | root_fid = v9fs_fid_create(root, v9ses, newfid, 0); | ||
165 | if (root_fid == NULL) { | ||
166 | retval = -ENOMEM; | ||
167 | goto put_back_sb; | ||
168 | } | ||
169 | |||
175 | root_fid->qid = fcall->params.rstat.stat->qid; | 170 | root_fid->qid = fcall->params.rstat.stat->qid; |
176 | root->d_inode->i_ino = | 171 | root->d_inode->i_ino = |
177 | v9fs_qid2ino(&fcall->params.rstat.stat->qid); | 172 | v9fs_qid2ino(&fcall->params.rstat.stat->qid); |
@@ -182,25 +177,15 @@ static struct super_block *v9fs_get_sb(struct file_system_type | |||
182 | 177 | ||
183 | if (stat_result < 0) { | 178 | if (stat_result < 0) { |
184 | retval = stat_result; | 179 | retval = stat_result; |
185 | goto release_dentry; | 180 | goto put_back_sb; |
186 | } | 181 | } |
187 | 182 | ||
188 | return sb; | 183 | return sb; |
189 | 184 | ||
190 | release_dentry: | 185 | put_back_sb: |
191 | dput(sb->s_root); | 186 | /* deactivate_super calls v9fs_kill_super which will frees the rest */ |
192 | |||
193 | release_inode: | ||
194 | iput(inode); | ||
195 | |||
196 | put_back_sb: | ||
197 | up_write(&sb->s_umount); | 187 | up_write(&sb->s_umount); |
198 | deactivate_super(sb); | 188 | deactivate_super(sb); |
199 | v9fs_session_close(v9ses); | ||
200 | |||
201 | free_session: | ||
202 | kfree(v9ses); | ||
203 | |||
204 | return ERR_PTR(retval); | 189 | return ERR_PTR(retval); |
205 | } | 190 | } |
206 | 191 | ||
diff --git a/fs/Kconfig b/fs/Kconfig index 068ccea2f184..48f5422cb19a 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -472,6 +472,9 @@ config FUSE_FS | |||
472 | utilities is available from the FUSE homepage: | 472 | utilities is available from the FUSE homepage: |
473 | <http://fuse.sourceforge.net/> | 473 | <http://fuse.sourceforge.net/> |
474 | 474 | ||
475 | See <file:Documentation/filesystems/fuse.txt> for more information. | ||
476 | See <file:Documentation/Changes> for needed library/utility version. | ||
477 | |||
475 | If you want to develop a userspace FS, or if you want to use | 478 | If you want to develop a userspace FS, or if you want to use |
476 | a filesystem based on FUSE, answer Y or M. | 479 | a filesystem based on FUSE, answer Y or M. |
477 | 480 | ||
@@ -398,7 +398,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | |||
398 | if (unlikely(!req)) | 398 | if (unlikely(!req)) |
399 | return NULL; | 399 | return NULL; |
400 | 400 | ||
401 | req->ki_flags = 1 << KIF_LOCKED; | 401 | req->ki_flags = 0; |
402 | req->ki_users = 2; | 402 | req->ki_users = 2; |
403 | req->ki_key = 0; | 403 | req->ki_key = 0; |
404 | req->ki_ctx = ctx; | 404 | req->ki_ctx = ctx; |
@@ -547,24 +547,6 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
547 | return ioctx; | 547 | return ioctx; |
548 | } | 548 | } |
549 | 549 | ||
550 | static int lock_kiocb_action(void *param) | ||
551 | { | ||
552 | schedule(); | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | static inline void lock_kiocb(struct kiocb *iocb) | ||
557 | { | ||
558 | wait_on_bit_lock(&iocb->ki_flags, KIF_LOCKED, lock_kiocb_action, | ||
559 | TASK_UNINTERRUPTIBLE); | ||
560 | } | ||
561 | |||
562 | static inline void unlock_kiocb(struct kiocb *iocb) | ||
563 | { | ||
564 | kiocbClearLocked(iocb); | ||
565 | wake_up_bit(&iocb->ki_flags, KIF_LOCKED); | ||
566 | } | ||
567 | |||
568 | /* | 550 | /* |
569 | * use_mm | 551 | * use_mm |
570 | * Makes the calling kernel thread take on the specified | 552 | * Makes the calling kernel thread take on the specified |
@@ -740,19 +722,9 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) | |||
740 | ret = retry(iocb); | 722 | ret = retry(iocb); |
741 | current->io_wait = NULL; | 723 | current->io_wait = NULL; |
742 | 724 | ||
743 | if (-EIOCBRETRY != ret) { | 725 | if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { |
744 | if (-EIOCBQUEUED != ret) { | 726 | BUG_ON(!list_empty(&iocb->ki_wait.task_list)); |
745 | BUG_ON(!list_empty(&iocb->ki_wait.task_list)); | 727 | aio_complete(iocb, ret, 0); |
746 | aio_complete(iocb, ret, 0); | ||
747 | /* must not access the iocb after this */ | ||
748 | } | ||
749 | } else { | ||
750 | /* | ||
751 | * Issue an additional retry to avoid waiting forever if | ||
752 | * no waits were queued (e.g. in case of a short read). | ||
753 | */ | ||
754 | if (list_empty(&iocb->ki_wait.task_list)) | ||
755 | kiocbSetKicked(iocb); | ||
756 | } | 728 | } |
757 | out: | 729 | out: |
758 | spin_lock_irq(&ctx->ctx_lock); | 730 | spin_lock_irq(&ctx->ctx_lock); |
@@ -805,9 +777,7 @@ static int __aio_run_iocbs(struct kioctx *ctx) | |||
805 | * Hold an extra reference while retrying i/o. | 777 | * Hold an extra reference while retrying i/o. |
806 | */ | 778 | */ |
807 | iocb->ki_users++; /* grab extra reference */ | 779 | iocb->ki_users++; /* grab extra reference */ |
808 | lock_kiocb(iocb); | ||
809 | aio_run_iocb(iocb); | 780 | aio_run_iocb(iocb); |
810 | unlock_kiocb(iocb); | ||
811 | if (__aio_put_req(ctx, iocb)) /* drop extra ref */ | 781 | if (__aio_put_req(ctx, iocb)) /* drop extra ref */ |
812 | put_ioctx(ctx); | 782 | put_ioctx(ctx); |
813 | } | 783 | } |
@@ -898,16 +868,24 @@ static void aio_kick_handler(void *data) | |||
898 | * and if required activate the aio work queue to process | 868 | * and if required activate the aio work queue to process |
899 | * it | 869 | * it |
900 | */ | 870 | */ |
901 | static void queue_kicked_iocb(struct kiocb *iocb) | 871 | static void try_queue_kicked_iocb(struct kiocb *iocb) |
902 | { | 872 | { |
903 | struct kioctx *ctx = iocb->ki_ctx; | 873 | struct kioctx *ctx = iocb->ki_ctx; |
904 | unsigned long flags; | 874 | unsigned long flags; |
905 | int run = 0; | 875 | int run = 0; |
906 | 876 | ||
907 | WARN_ON((!list_empty(&iocb->ki_wait.task_list))); | 877 | /* We're supposed to be the only path putting the iocb back on the run |
878 | * list. If we find that the iocb is *back* on a wait queue already | ||
879 | * than retry has happened before we could queue the iocb. This also | ||
880 | * means that the retry could have completed and freed our iocb, no | ||
881 | * good. */ | ||
882 | BUG_ON((!list_empty(&iocb->ki_wait.task_list))); | ||
908 | 883 | ||
909 | spin_lock_irqsave(&ctx->ctx_lock, flags); | 884 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
910 | run = __queue_kicked_iocb(iocb); | 885 | /* set this inside the lock so that we can't race with aio_run_iocb() |
886 | * testing it and putting the iocb on the run list under the lock */ | ||
887 | if (!kiocbTryKick(iocb)) | ||
888 | run = __queue_kicked_iocb(iocb); | ||
911 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | 889 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
912 | if (run) | 890 | if (run) |
913 | aio_queue_work(ctx); | 891 | aio_queue_work(ctx); |
@@ -930,10 +908,7 @@ void fastcall kick_iocb(struct kiocb *iocb) | |||
930 | return; | 908 | return; |
931 | } | 909 | } |
932 | 910 | ||
933 | /* If its already kicked we shouldn't queue it again */ | 911 | try_queue_kicked_iocb(iocb); |
934 | if (!kiocbTryKick(iocb)) { | ||
935 | queue_kicked_iocb(iocb); | ||
936 | } | ||
937 | } | 912 | } |
938 | EXPORT_SYMBOL(kick_iocb); | 913 | EXPORT_SYMBOL(kick_iocb); |
939 | 914 | ||
@@ -1321,8 +1296,11 @@ asmlinkage long sys_io_destroy(aio_context_t ctx) | |||
1321 | } | 1296 | } |
1322 | 1297 | ||
1323 | /* | 1298 | /* |
1324 | * Default retry method for aio_read (also used for first time submit) | 1299 | * aio_p{read,write} are the default ki_retry methods for |
1325 | * Responsible for updating iocb state as retries progress | 1300 | * IO_CMD_P{READ,WRITE}. They maintains kiocb retry state around potentially |
1301 | * multiple calls to f_op->aio_read(). They loop around partial progress | ||
1302 | * instead of returning -EIOCBRETRY because they don't have the means to call | ||
1303 | * kick_iocb(). | ||
1326 | */ | 1304 | */ |
1327 | static ssize_t aio_pread(struct kiocb *iocb) | 1305 | static ssize_t aio_pread(struct kiocb *iocb) |
1328 | { | 1306 | { |
@@ -1331,25 +1309,25 @@ static ssize_t aio_pread(struct kiocb *iocb) | |||
1331 | struct inode *inode = mapping->host; | 1309 | struct inode *inode = mapping->host; |
1332 | ssize_t ret = 0; | 1310 | ssize_t ret = 0; |
1333 | 1311 | ||
1334 | ret = file->f_op->aio_read(iocb, iocb->ki_buf, | 1312 | do { |
1335 | iocb->ki_left, iocb->ki_pos); | 1313 | ret = file->f_op->aio_read(iocb, iocb->ki_buf, |
1314 | iocb->ki_left, iocb->ki_pos); | ||
1315 | /* | ||
1316 | * Can't just depend on iocb->ki_left to determine | ||
1317 | * whether we are done. This may have been a short read. | ||
1318 | */ | ||
1319 | if (ret > 0) { | ||
1320 | iocb->ki_buf += ret; | ||
1321 | iocb->ki_left -= ret; | ||
1322 | } | ||
1336 | 1323 | ||
1337 | /* | ||
1338 | * Can't just depend on iocb->ki_left to determine | ||
1339 | * whether we are done. This may have been a short read. | ||
1340 | */ | ||
1341 | if (ret > 0) { | ||
1342 | iocb->ki_buf += ret; | ||
1343 | iocb->ki_left -= ret; | ||
1344 | /* | 1324 | /* |
1345 | * For pipes and sockets we return once we have | 1325 | * For pipes and sockets we return once we have some data; for |
1346 | * some data; for regular files we retry till we | 1326 | * regular files we retry till we complete the entire read or |
1347 | * complete the entire read or find that we can't | 1327 | * find that we can't read any more data (e.g short reads). |
1348 | * read any more data (e.g short reads). | ||
1349 | */ | 1328 | */ |
1350 | if (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)) | 1329 | } while (ret > 0 && iocb->ki_left > 0 && |
1351 | ret = -EIOCBRETRY; | 1330 | !S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)); |
1352 | } | ||
1353 | 1331 | ||
1354 | /* This means we must have transferred all that we could */ | 1332 | /* This means we must have transferred all that we could */ |
1355 | /* No need to retry anymore */ | 1333 | /* No need to retry anymore */ |
@@ -1359,27 +1337,21 @@ static ssize_t aio_pread(struct kiocb *iocb) | |||
1359 | return ret; | 1337 | return ret; |
1360 | } | 1338 | } |
1361 | 1339 | ||
1362 | /* | 1340 | /* see aio_pread() */ |
1363 | * Default retry method for aio_write (also used for first time submit) | ||
1364 | * Responsible for updating iocb state as retries progress | ||
1365 | */ | ||
1366 | static ssize_t aio_pwrite(struct kiocb *iocb) | 1341 | static ssize_t aio_pwrite(struct kiocb *iocb) |
1367 | { | 1342 | { |
1368 | struct file *file = iocb->ki_filp; | 1343 | struct file *file = iocb->ki_filp; |
1369 | ssize_t ret = 0; | 1344 | ssize_t ret = 0; |
1370 | 1345 | ||
1371 | ret = file->f_op->aio_write(iocb, iocb->ki_buf, | 1346 | do { |
1372 | iocb->ki_left, iocb->ki_pos); | 1347 | ret = file->f_op->aio_write(iocb, iocb->ki_buf, |
1373 | 1348 | iocb->ki_left, iocb->ki_pos); | |
1374 | if (ret > 0) { | 1349 | if (ret > 0) { |
1375 | iocb->ki_buf += ret; | 1350 | iocb->ki_buf += ret; |
1376 | iocb->ki_left -= ret; | 1351 | iocb->ki_left -= ret; |
1377 | 1352 | } | |
1378 | ret = -EIOCBRETRY; | 1353 | } while (ret > 0 && iocb->ki_left > 0); |
1379 | } | ||
1380 | 1354 | ||
1381 | /* This means we must have transferred all that we could */ | ||
1382 | /* No need to retry anymore */ | ||
1383 | if ((ret == 0) || (iocb->ki_left == 0)) | 1355 | if ((ret == 0) || (iocb->ki_left == 0)) |
1384 | ret = iocb->ki_nbytes - iocb->ki_left; | 1356 | ret = iocb->ki_nbytes - iocb->ki_left; |
1385 | 1357 | ||
@@ -1549,7 +1521,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1549 | 1521 | ||
1550 | spin_lock_irq(&ctx->ctx_lock); | 1522 | spin_lock_irq(&ctx->ctx_lock); |
1551 | aio_run_iocb(req); | 1523 | aio_run_iocb(req); |
1552 | unlock_kiocb(req); | ||
1553 | if (!list_empty(&ctx->run_list)) { | 1524 | if (!list_empty(&ctx->run_list)) { |
1554 | /* drain the run list */ | 1525 | /* drain the run list */ |
1555 | while (__aio_run_iocbs(ctx)) | 1526 | while (__aio_run_iocbs(ctx)) |
@@ -1681,7 +1652,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, | |||
1681 | if (NULL != cancel) { | 1652 | if (NULL != cancel) { |
1682 | struct io_event tmp; | 1653 | struct io_event tmp; |
1683 | pr_debug("calling cancel\n"); | 1654 | pr_debug("calling cancel\n"); |
1684 | lock_kiocb(kiocb); | ||
1685 | memset(&tmp, 0, sizeof(tmp)); | 1655 | memset(&tmp, 0, sizeof(tmp)); |
1686 | tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; | 1656 | tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; |
1687 | tmp.data = kiocb->ki_user_data; | 1657 | tmp.data = kiocb->ki_user_data; |
@@ -1693,7 +1663,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, | |||
1693 | if (copy_to_user(result, &tmp, sizeof(tmp))) | 1663 | if (copy_to_user(result, &tmp, sizeof(tmp))) |
1694 | ret = -EFAULT; | 1664 | ret = -EFAULT; |
1695 | } | 1665 | } |
1696 | unlock_kiocb(kiocb); | ||
1697 | } else | 1666 | } else |
1698 | ret = -EINVAL; | 1667 | ret = -EINVAL; |
1699 | 1668 | ||
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index e240c335eb23..5af928fa0449 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c | |||
@@ -108,7 +108,7 @@ static int bfs_create(struct inode * dir, struct dentry * dentry, int mode, | |||
108 | inode->i_mapping->a_ops = &bfs_aops; | 108 | inode->i_mapping->a_ops = &bfs_aops; |
109 | inode->i_mode = mode; | 109 | inode->i_mode = mode; |
110 | inode->i_ino = ino; | 110 | inode->i_ino = ino; |
111 | BFS_I(inode)->i_dsk_ino = cpu_to_le16(ino); | 111 | BFS_I(inode)->i_dsk_ino = ino; |
112 | BFS_I(inode)->i_sblock = 0; | 112 | BFS_I(inode)->i_sblock = 0; |
113 | BFS_I(inode)->i_eblock = 0; | 113 | BFS_I(inode)->i_eblock = 0; |
114 | insert_inode_hash(inode); | 114 | insert_inode_hash(inode); |
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index c7b39aa279d7..3af6c73c5b5a 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c | |||
@@ -357,28 +357,46 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | |||
357 | } | 357 | } |
358 | 358 | ||
359 | info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */ | 359 | info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */ |
360 | info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - cpu_to_le32(bfs_sb->s_start))>>BFS_BSIZE_BITS; | 360 | info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - le32_to_cpu(bfs_sb->s_start))>>BFS_BSIZE_BITS; |
361 | info->si_freei = 0; | 361 | info->si_freei = 0; |
362 | info->si_lf_eblk = 0; | 362 | info->si_lf_eblk = 0; |
363 | info->si_lf_sblk = 0; | 363 | info->si_lf_sblk = 0; |
364 | info->si_lf_ioff = 0; | 364 | info->si_lf_ioff = 0; |
365 | bh = NULL; | ||
365 | for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) { | 366 | for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) { |
366 | inode = iget(s,i); | 367 | struct bfs_inode *di; |
367 | if (BFS_I(inode)->i_dsk_ino == 0) | 368 | int block = (i - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1; |
369 | int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; | ||
370 | unsigned long sblock, eblock; | ||
371 | |||
372 | if (!off) { | ||
373 | brelse(bh); | ||
374 | bh = sb_bread(s, block); | ||
375 | } | ||
376 | |||
377 | if (!bh) | ||
378 | continue; | ||
379 | |||
380 | di = (struct bfs_inode *)bh->b_data + off; | ||
381 | |||
382 | if (!di->i_ino) { | ||
368 | info->si_freei++; | 383 | info->si_freei++; |
369 | else { | 384 | continue; |
370 | set_bit(i, info->si_imap); | 385 | } |
371 | info->si_freeb -= inode->i_blocks; | 386 | set_bit(i, info->si_imap); |
372 | if (BFS_I(inode)->i_eblock > info->si_lf_eblk) { | 387 | info->si_freeb -= BFS_FILEBLOCKS(di); |
373 | info->si_lf_eblk = BFS_I(inode)->i_eblock; | 388 | |
374 | info->si_lf_sblk = BFS_I(inode)->i_sblock; | 389 | sblock = le32_to_cpu(di->i_sblock); |
375 | info->si_lf_ioff = BFS_INO2OFF(i); | 390 | eblock = le32_to_cpu(di->i_eblock); |
376 | } | 391 | if (eblock > info->si_lf_eblk) { |
392 | info->si_lf_eblk = eblock; | ||
393 | info->si_lf_sblk = sblock; | ||
394 | info->si_lf_ioff = BFS_INO2OFF(i); | ||
377 | } | 395 | } |
378 | iput(inode); | ||
379 | } | 396 | } |
397 | brelse(bh); | ||
380 | if (!(s->s_flags & MS_RDONLY)) { | 398 | if (!(s->s_flags & MS_RDONLY)) { |
381 | mark_buffer_dirty(bh); | 399 | mark_buffer_dirty(info->si_sbh); |
382 | s->s_dirt = 1; | 400 | s->s_dirt = 1; |
383 | } | 401 | } |
384 | dump_imap("read_super", s); | 402 | dump_imap("read_super", s); |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7976a238f0a3..d4b15576e584 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -905,7 +905,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
905 | send_sig(SIGKILL, current, 0); | 905 | send_sig(SIGKILL, current, 0); |
906 | goto out_free_dentry; | 906 | goto out_free_dentry; |
907 | } | 907 | } |
908 | if (padzero(elf_bss)) { | 908 | if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { |
909 | send_sig(SIGSEGV, current, 0); | 909 | send_sig(SIGSEGV, current, 0); |
910 | retval = -EFAULT; /* Nobody gets to see this, but.. */ | 910 | retval = -EFAULT; /* Nobody gets to see this, but.. */ |
911 | goto out_free_dentry; | 911 | goto out_free_dentry; |
@@ -75,7 +75,7 @@ struct bio_set { | |||
75 | */ | 75 | */ |
76 | static struct bio_set *fs_bio_set; | 76 | static struct bio_set *fs_bio_set; |
77 | 77 | ||
78 | static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) | 78 | static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) |
79 | { | 79 | { |
80 | struct bio_vec *bvl; | 80 | struct bio_vec *bvl; |
81 | struct biovec_slab *bp; | 81 | struct biovec_slab *bp; |
@@ -155,7 +155,7 @@ inline void bio_init(struct bio *bio) | |||
155 | * allocate bio and iovecs from the memory pools specified by the | 155 | * allocate bio and iovecs from the memory pools specified by the |
156 | * bio_set structure. | 156 | * bio_set structure. |
157 | **/ | 157 | **/ |
158 | struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs) | 158 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
159 | { | 159 | { |
160 | struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); | 160 | struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); |
161 | 161 | ||
@@ -181,7 +181,7 @@ out: | |||
181 | return bio; | 181 | return bio; |
182 | } | 182 | } |
183 | 183 | ||
184 | struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs) | 184 | struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) |
185 | { | 185 | { |
186 | struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); | 186 | struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); |
187 | 187 | ||
@@ -277,7 +277,7 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src) | |||
277 | * | 277 | * |
278 | * Like __bio_clone, only also allocates the returned bio | 278 | * Like __bio_clone, only also allocates the returned bio |
279 | */ | 279 | */ |
280 | struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask) | 280 | struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) |
281 | { | 281 | { |
282 | struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); | 282 | struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); |
283 | 283 | ||
@@ -1078,7 +1078,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | |||
1078 | return bp; | 1078 | return bp; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data) | 1081 | static void *bio_pair_alloc(gfp_t gfp_flags, void *data) |
1082 | { | 1082 | { |
1083 | return kmalloc(sizeof(struct bio_pair), gfp_flags); | 1083 | return kmalloc(sizeof(struct bio_pair), gfp_flags); |
1084 | } | 1084 | } |
diff --git a/fs/buffer.c b/fs/buffer.c index 6cbfceabd95d..1216c0d3c8ce 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -3045,7 +3045,7 @@ static void recalc_bh_state(void) | |||
3045 | buffer_heads_over_limit = (tot > max_buffer_heads); | 3045 | buffer_heads_over_limit = (tot > max_buffer_heads); |
3046 | } | 3046 | } |
3047 | 3047 | ||
3048 | struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags) | 3048 | struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) |
3049 | { | 3049 | { |
3050 | struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); | 3050 | struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); |
3051 | if (ret) { | 3051 | if (ret) { |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8cc23e7d0d5d..1ebf7dafc1d7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -781,6 +781,8 @@ static int cifs_oplock_thread(void * dummyarg) | |||
781 | 781 | ||
782 | oplockThread = current; | 782 | oplockThread = current; |
783 | do { | 783 | do { |
784 | if (try_to_freeze()) | ||
785 | continue; | ||
784 | set_current_state(TASK_INTERRUPTIBLE); | 786 | set_current_state(TASK_INTERRUPTIBLE); |
785 | 787 | ||
786 | schedule_timeout(1*HZ); | 788 | schedule_timeout(1*HZ); |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 2335f14a1583..47360156cc54 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -344,6 +344,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | while (server->tcpStatus != CifsExiting) { | 346 | while (server->tcpStatus != CifsExiting) { |
347 | if (try_to_freeze()) | ||
348 | continue; | ||
347 | if (bigbuf == NULL) { | 349 | if (bigbuf == NULL) { |
348 | bigbuf = cifs_buf_get(); | 350 | bigbuf = cifs_buf_get(); |
349 | if(bigbuf == NULL) { | 351 | if(bigbuf == NULL) { |
diff --git a/fs/compat.c b/fs/compat.c index ac3fb9ed8eea..a719e158e002 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <linux/nfsd/syscall.h> | 44 | #include <linux/nfsd/syscall.h> |
45 | #include <linux/personality.h> | 45 | #include <linux/personality.h> |
46 | #include <linux/rwsem.h> | 46 | #include <linux/rwsem.h> |
47 | #include <linux/acct.h> | ||
48 | #include <linux/mm.h> | ||
47 | 49 | ||
48 | #include <net/sock.h> /* siocdevprivate_ioctl */ | 50 | #include <net/sock.h> /* siocdevprivate_ioctl */ |
49 | 51 | ||
@@ -1487,6 +1489,8 @@ int compat_do_execve(char * filename, | |||
1487 | 1489 | ||
1488 | /* execve success */ | 1490 | /* execve success */ |
1489 | security_bprm_free(bprm); | 1491 | security_bprm_free(bprm); |
1492 | acct_update_integrals(current); | ||
1493 | update_mem_hiwater(current); | ||
1490 | kfree(bprm); | 1494 | kfree(bprm); |
1491 | return retval; | 1495 | return retval; |
1492 | } | 1496 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index 7376b61269fb..fb10386c59be 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -102,7 +102,8 @@ static inline void dentry_iput(struct dentry * dentry) | |||
102 | list_del_init(&dentry->d_alias); | 102 | list_del_init(&dentry->d_alias); |
103 | spin_unlock(&dentry->d_lock); | 103 | spin_unlock(&dentry->d_lock); |
104 | spin_unlock(&dcache_lock); | 104 | spin_unlock(&dcache_lock); |
105 | fsnotify_inoderemove(inode); | 105 | if (!inode->i_nlink) |
106 | fsnotify_inoderemove(inode); | ||
106 | if (dentry->d_op && dentry->d_op->d_iput) | 107 | if (dentry->d_op && dentry->d_op->d_iput) |
107 | dentry->d_op->d_iput(dentry, inode); | 108 | dentry->d_op->d_iput(dentry, inode); |
108 | else | 109 | else |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 6ab1dd0ca904..4284cd31eba6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -101,6 +101,10 @@ | |||
101 | /* Maximum number of poll wake up nests we are allowing */ | 101 | /* Maximum number of poll wake up nests we are allowing */ |
102 | #define EP_MAX_POLLWAKE_NESTS 4 | 102 | #define EP_MAX_POLLWAKE_NESTS 4 |
103 | 103 | ||
104 | /* Maximum msec timeout value storeable in a long int */ | ||
105 | #define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ) | ||
106 | |||
107 | |||
104 | struct epoll_filefd { | 108 | struct epoll_filefd { |
105 | struct file *file; | 109 | struct file *file; |
106 | int fd; | 110 | int fd; |
@@ -231,8 +235,9 @@ struct ep_pqueue { | |||
231 | 235 | ||
232 | static void ep_poll_safewake_init(struct poll_safewake *psw); | 236 | static void ep_poll_safewake_init(struct poll_safewake *psw); |
233 | static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq); | 237 | static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq); |
234 | static int ep_getfd(int *efd, struct inode **einode, struct file **efile); | 238 | static int ep_getfd(int *efd, struct inode **einode, struct file **efile, |
235 | static int ep_file_init(struct file *file); | 239 | struct eventpoll *ep); |
240 | static int ep_alloc(struct eventpoll **pep); | ||
236 | static void ep_free(struct eventpoll *ep); | 241 | static void ep_free(struct eventpoll *ep); |
237 | static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd); | 242 | static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd); |
238 | static void ep_use_epitem(struct epitem *epi); | 243 | static void ep_use_epitem(struct epitem *epi); |
@@ -501,38 +506,37 @@ void eventpoll_release_file(struct file *file) | |||
501 | asmlinkage long sys_epoll_create(int size) | 506 | asmlinkage long sys_epoll_create(int size) |
502 | { | 507 | { |
503 | int error, fd; | 508 | int error, fd; |
509 | struct eventpoll *ep; | ||
504 | struct inode *inode; | 510 | struct inode *inode; |
505 | struct file *file; | 511 | struct file *file; |
506 | 512 | ||
507 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", | 513 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d)\n", |
508 | current, size)); | 514 | current, size)); |
509 | 515 | ||
510 | /* Sanity check on the size parameter */ | 516 | /* |
517 | * Sanity check on the size parameter, and create the internal data | ||
518 | * structure ( "struct eventpoll" ). | ||
519 | */ | ||
511 | error = -EINVAL; | 520 | error = -EINVAL; |
512 | if (size <= 0) | 521 | if (size <= 0 || (error = ep_alloc(&ep)) != 0) |
513 | goto eexit_1; | 522 | goto eexit_1; |
514 | 523 | ||
515 | /* | 524 | /* |
516 | * Creates all the items needed to setup an eventpoll file. That is, | 525 | * Creates all the items needed to setup an eventpoll file. That is, |
517 | * a file structure, and inode and a free file descriptor. | 526 | * a file structure, and inode and a free file descriptor. |
518 | */ | 527 | */ |
519 | error = ep_getfd(&fd, &inode, &file); | 528 | error = ep_getfd(&fd, &inode, &file, ep); |
520 | if (error) | ||
521 | goto eexit_1; | ||
522 | |||
523 | /* Setup the file internal data structure ( "struct eventpoll" ) */ | ||
524 | error = ep_file_init(file); | ||
525 | if (error) | 529 | if (error) |
526 | goto eexit_2; | 530 | goto eexit_2; |
527 | 531 | ||
528 | |||
529 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", | 532 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", |
530 | current, size, fd)); | 533 | current, size, fd)); |
531 | 534 | ||
532 | return fd; | 535 | return fd; |
533 | 536 | ||
534 | eexit_2: | 537 | eexit_2: |
535 | sys_close(fd); | 538 | ep_free(ep); |
539 | kfree(ep); | ||
536 | eexit_1: | 540 | eexit_1: |
537 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", | 541 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", |
538 | current, size, error)); | 542 | current, size, error)); |
@@ -706,7 +710,8 @@ eexit_1: | |||
706 | /* | 710 | /* |
707 | * Creates the file descriptor to be used by the epoll interface. | 711 | * Creates the file descriptor to be used by the epoll interface. |
708 | */ | 712 | */ |
709 | static int ep_getfd(int *efd, struct inode **einode, struct file **efile) | 713 | static int ep_getfd(int *efd, struct inode **einode, struct file **efile, |
714 | struct eventpoll *ep) | ||
710 | { | 715 | { |
711 | struct qstr this; | 716 | struct qstr this; |
712 | char name[32]; | 717 | char name[32]; |
@@ -756,7 +761,7 @@ static int ep_getfd(int *efd, struct inode **einode, struct file **efile) | |||
756 | file->f_op = &eventpoll_fops; | 761 | file->f_op = &eventpoll_fops; |
757 | file->f_mode = FMODE_READ; | 762 | file->f_mode = FMODE_READ; |
758 | file->f_version = 0; | 763 | file->f_version = 0; |
759 | file->private_data = NULL; | 764 | file->private_data = ep; |
760 | 765 | ||
761 | /* Install the new setup file into the allocated fd. */ | 766 | /* Install the new setup file into the allocated fd. */ |
762 | fd_install(fd, file); | 767 | fd_install(fd, file); |
@@ -777,14 +782,13 @@ eexit_1: | |||
777 | } | 782 | } |
778 | 783 | ||
779 | 784 | ||
780 | static int ep_file_init(struct file *file) | 785 | static int ep_alloc(struct eventpoll **pep) |
781 | { | 786 | { |
782 | struct eventpoll *ep; | 787 | struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL); |
783 | 788 | ||
784 | if (!(ep = kmalloc(sizeof(struct eventpoll), GFP_KERNEL))) | 789 | if (!ep) |
785 | return -ENOMEM; | 790 | return -ENOMEM; |
786 | 791 | ||
787 | memset(ep, 0, sizeof(*ep)); | ||
788 | rwlock_init(&ep->lock); | 792 | rwlock_init(&ep->lock); |
789 | init_rwsem(&ep->sem); | 793 | init_rwsem(&ep->sem); |
790 | init_waitqueue_head(&ep->wq); | 794 | init_waitqueue_head(&ep->wq); |
@@ -792,9 +796,9 @@ static int ep_file_init(struct file *file) | |||
792 | INIT_LIST_HEAD(&ep->rdllist); | 796 | INIT_LIST_HEAD(&ep->rdllist); |
793 | ep->rbr = RB_ROOT; | 797 | ep->rbr = RB_ROOT; |
794 | 798 | ||
795 | file->private_data = ep; | 799 | *pep = ep; |
796 | 800 | ||
797 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_file_init() ep=%p\n", | 801 | DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n", |
798 | current, ep)); | 802 | current, ep)); |
799 | return 0; | 803 | return 0; |
800 | } | 804 | } |
@@ -1506,8 +1510,8 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | |||
1506 | * and the overflow condition. The passed timeout is in milliseconds, | 1510 | * and the overflow condition. The passed timeout is in milliseconds, |
1507 | * that why (t * HZ) / 1000. | 1511 | * that why (t * HZ) / 1000. |
1508 | */ | 1512 | */ |
1509 | jtimeout = timeout == -1 || timeout > (MAX_SCHEDULE_TIMEOUT - 1000) / HZ ? | 1513 | jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ? |
1510 | MAX_SCHEDULE_TIMEOUT: (timeout * HZ + 999) / 1000; | 1514 | MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; |
1511 | 1515 | ||
1512 | retry: | 1516 | retry: |
1513 | write_lock_irqsave(&ep->lock, flags); | 1517 | write_lock_irqsave(&ep->lock, flags); |
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index c8d07030c897..e2d6208633a7 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c | |||
@@ -605,27 +605,28 @@ got: | |||
605 | insert_inode_hash(inode); | 605 | insert_inode_hash(inode); |
606 | 606 | ||
607 | if (DQUOT_ALLOC_INODE(inode)) { | 607 | if (DQUOT_ALLOC_INODE(inode)) { |
608 | DQUOT_DROP(inode); | ||
609 | err = -ENOSPC; | 608 | err = -ENOSPC; |
610 | goto fail2; | 609 | goto fail_drop; |
611 | } | 610 | } |
611 | |||
612 | err = ext2_init_acl(inode, dir); | 612 | err = ext2_init_acl(inode, dir); |
613 | if (err) { | 613 | if (err) |
614 | DQUOT_FREE_INODE(inode); | 614 | goto fail_free_drop; |
615 | DQUOT_DROP(inode); | 615 | |
616 | goto fail2; | ||
617 | } | ||
618 | err = ext2_init_security(inode,dir); | 616 | err = ext2_init_security(inode,dir); |
619 | if (err) { | 617 | if (err) |
620 | DQUOT_FREE_INODE(inode); | 618 | goto fail_free_drop; |
621 | goto fail2; | 619 | |
622 | } | ||
623 | mark_inode_dirty(inode); | 620 | mark_inode_dirty(inode); |
624 | ext2_debug("allocating inode %lu\n", inode->i_ino); | 621 | ext2_debug("allocating inode %lu\n", inode->i_ino); |
625 | ext2_preread_inode(inode); | 622 | ext2_preread_inode(inode); |
626 | return inode; | 623 | return inode; |
627 | 624 | ||
628 | fail2: | 625 | fail_free_drop: |
626 | DQUOT_FREE_INODE(inode); | ||
627 | |||
628 | fail_drop: | ||
629 | DQUOT_DROP(inode); | ||
629 | inode->i_flags |= S_NOQUOTA; | 630 | inode->i_flags |= S_NOQUOTA; |
630 | inode->i_nlink = 0; | 631 | inode->i_nlink = 0; |
631 | iput(inode); | 632 | iput(inode); |
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index e463dca008e4..0213db4911a2 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
@@ -1410,7 +1410,7 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) | |||
1410 | unsigned long desc_count; | 1410 | unsigned long desc_count; |
1411 | struct ext3_group_desc *gdp; | 1411 | struct ext3_group_desc *gdp; |
1412 | int i; | 1412 | int i; |
1413 | unsigned long ngroups; | 1413 | unsigned long ngroups = EXT3_SB(sb)->s_groups_count; |
1414 | #ifdef EXT3FS_DEBUG | 1414 | #ifdef EXT3FS_DEBUG |
1415 | struct ext3_super_block *es; | 1415 | struct ext3_super_block *es; |
1416 | unsigned long bitmap_count, x; | 1416 | unsigned long bitmap_count, x; |
@@ -1421,7 +1421,8 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) | |||
1421 | desc_count = 0; | 1421 | desc_count = 0; |
1422 | bitmap_count = 0; | 1422 | bitmap_count = 0; |
1423 | gdp = NULL; | 1423 | gdp = NULL; |
1424 | for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { | 1424 | |
1425 | for (i = 0; i < ngroups; i++) { | ||
1425 | gdp = ext3_get_group_desc(sb, i, NULL); | 1426 | gdp = ext3_get_group_desc(sb, i, NULL); |
1426 | if (!gdp) | 1427 | if (!gdp) |
1427 | continue; | 1428 | continue; |
@@ -1443,7 +1444,6 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) | |||
1443 | return bitmap_count; | 1444 | return bitmap_count; |
1444 | #else | 1445 | #else |
1445 | desc_count = 0; | 1446 | desc_count = 0; |
1446 | ngroups = EXT3_SB(sb)->s_groups_count; | ||
1447 | smp_rmb(); | 1447 | smp_rmb(); |
1448 | for (i = 0; i < ngroups; i++) { | 1448 | for (i = 0; i < ngroups; i++) { |
1449 | gdp = ext3_get_group_desc(sb, i, NULL); | 1449 | gdp = ext3_get_group_desc(sb, i, NULL); |
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 96552769d039..6549945f9ac1 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c | |||
@@ -597,27 +597,22 @@ got: | |||
597 | 597 | ||
598 | ret = inode; | 598 | ret = inode; |
599 | if(DQUOT_ALLOC_INODE(inode)) { | 599 | if(DQUOT_ALLOC_INODE(inode)) { |
600 | DQUOT_DROP(inode); | ||
601 | err = -EDQUOT; | 600 | err = -EDQUOT; |
602 | goto fail2; | 601 | goto fail_drop; |
603 | } | 602 | } |
603 | |||
604 | err = ext3_init_acl(handle, inode, dir); | 604 | err = ext3_init_acl(handle, inode, dir); |
605 | if (err) { | 605 | if (err) |
606 | DQUOT_FREE_INODE(inode); | 606 | goto fail_free_drop; |
607 | DQUOT_DROP(inode); | 607 | |
608 | goto fail2; | ||
609 | } | ||
610 | err = ext3_init_security(handle,inode, dir); | 608 | err = ext3_init_security(handle,inode, dir); |
611 | if (err) { | 609 | if (err) |
612 | DQUOT_FREE_INODE(inode); | 610 | goto fail_free_drop; |
613 | goto fail2; | 611 | |
614 | } | ||
615 | err = ext3_mark_inode_dirty(handle, inode); | 612 | err = ext3_mark_inode_dirty(handle, inode); |
616 | if (err) { | 613 | if (err) { |
617 | ext3_std_error(sb, err); | 614 | ext3_std_error(sb, err); |
618 | DQUOT_FREE_INODE(inode); | 615 | goto fail_free_drop; |
619 | DQUOT_DROP(inode); | ||
620 | goto fail2; | ||
621 | } | 616 | } |
622 | 617 | ||
623 | ext3_debug("allocating inode %lu\n", inode->i_ino); | 618 | ext3_debug("allocating inode %lu\n", inode->i_ino); |
@@ -631,7 +626,11 @@ really_out: | |||
631 | brelse(bitmap_bh); | 626 | brelse(bitmap_bh); |
632 | return ret; | 627 | return ret; |
633 | 628 | ||
634 | fail2: | 629 | fail_free_drop: |
630 | DQUOT_FREE_INODE(inode); | ||
631 | |||
632 | fail_drop: | ||
633 | DQUOT_DROP(inode); | ||
635 | inode->i_flags |= S_NOQUOTA; | 634 | inode->i_flags |= S_NOQUOTA; |
636 | inode->i_nlink = 0; | 635 | inode->i_nlink = 0; |
637 | iput(inode); | 636 | iput(inode); |
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 2c9f81278d5d..57f79106267d 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c | |||
@@ -242,7 +242,7 @@ static int setup_new_group_blocks(struct super_block *sb, | |||
242 | i < sbi->s_itb_per_group; i++, bit++, block++) { | 242 | i < sbi->s_itb_per_group; i++, bit++, block++) { |
243 | struct buffer_head *it; | 243 | struct buffer_head *it; |
244 | 244 | ||
245 | ext3_debug("clear inode block %#04x (+%ld)\n", block, bit); | 245 | ext3_debug("clear inode block %#04lx (+%d)\n", block, bit); |
246 | if (IS_ERR(it = bclean(handle, sb, block))) { | 246 | if (IS_ERR(it = bclean(handle, sb, block))) { |
247 | err = PTR_ERR(it); | 247 | err = PTR_ERR(it); |
248 | goto exit_bh; | 248 | goto exit_bh; |
@@ -643,8 +643,8 @@ static void update_backups(struct super_block *sb, | |||
643 | break; | 643 | break; |
644 | 644 | ||
645 | bh = sb_getblk(sb, group * bpg + blk_off); | 645 | bh = sb_getblk(sb, group * bpg + blk_off); |
646 | ext3_debug(sb, __FUNCTION__, "update metadata backup %#04lx\n", | 646 | ext3_debug("update metadata backup %#04lx\n", |
647 | bh->b_blocknr); | 647 | (unsigned long)bh->b_blocknr); |
648 | if ((err = ext3_journal_get_write_access(handle, bh))) | 648 | if ((err = ext3_journal_get_write_access(handle, bh))) |
649 | break; | 649 | break; |
650 | lock_buffer(bh); | 650 | lock_buffer(bh); |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index a93c3609025d..9e24ceb019fe 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -512,15 +512,14 @@ static void ext3_clear_inode(struct inode *inode) | |||
512 | 512 | ||
513 | static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs) | 513 | static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs) |
514 | { | 514 | { |
515 | struct ext3_sb_info *sbi = EXT3_SB(vfs->mnt_sb); | 515 | struct super_block *sb = vfs->mnt_sb; |
516 | struct ext3_sb_info *sbi = EXT3_SB(sb); | ||
516 | 517 | ||
517 | if (sbi->s_mount_opt & EXT3_MOUNT_JOURNAL_DATA) | 518 | if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA) |
518 | seq_puts(seq, ",data=journal"); | 519 | seq_puts(seq, ",data=journal"); |
519 | 520 | else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA) | |
520 | if (sbi->s_mount_opt & EXT3_MOUNT_ORDERED_DATA) | ||
521 | seq_puts(seq, ",data=ordered"); | 521 | seq_puts(seq, ",data=ordered"); |
522 | 522 | else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA) | |
523 | if (sbi->s_mount_opt & EXT3_MOUNT_WRITEBACK_DATA) | ||
524 | seq_puts(seq, ",data=writeback"); | 523 | seq_puts(seq, ",data=writeback"); |
525 | 524 | ||
526 | #if defined(CONFIG_QUOTA) | 525 | #if defined(CONFIG_QUOTA) |
diff --git a/fs/fat/file.c b/fs/fat/file.c index 62ffa9139400..7134403d5be2 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c | |||
@@ -12,39 +12,6 @@ | |||
12 | #include <linux/smp_lock.h> | 12 | #include <linux/smp_lock.h> |
13 | #include <linux/buffer_head.h> | 13 | #include <linux/buffer_head.h> |
14 | 14 | ||
15 | static ssize_t fat_file_aio_write(struct kiocb *iocb, const char __user *buf, | ||
16 | size_t count, loff_t pos) | ||
17 | { | ||
18 | struct inode *inode = iocb->ki_filp->f_dentry->d_inode; | ||
19 | int retval; | ||
20 | |||
21 | retval = generic_file_aio_write(iocb, buf, count, pos); | ||
22 | if (retval > 0) { | ||
23 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | ||
24 | MSDOS_I(inode)->i_attrs |= ATTR_ARCH; | ||
25 | mark_inode_dirty(inode); | ||
26 | // check the locking rules | ||
27 | // if (IS_SYNC(inode)) | ||
28 | // fat_sync_inode(inode); | ||
29 | } | ||
30 | return retval; | ||
31 | } | ||
32 | |||
33 | static ssize_t fat_file_writev(struct file *filp, const struct iovec *iov, | ||
34 | unsigned long nr_segs, loff_t *ppos) | ||
35 | { | ||
36 | struct inode *inode = filp->f_dentry->d_inode; | ||
37 | int retval; | ||
38 | |||
39 | retval = generic_file_writev(filp, iov, nr_segs, ppos); | ||
40 | if (retval > 0) { | ||
41 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | ||
42 | MSDOS_I(inode)->i_attrs |= ATTR_ARCH; | ||
43 | mark_inode_dirty(inode); | ||
44 | } | ||
45 | return retval; | ||
46 | } | ||
47 | |||
48 | int fat_generic_ioctl(struct inode *inode, struct file *filp, | 15 | int fat_generic_ioctl(struct inode *inode, struct file *filp, |
49 | unsigned int cmd, unsigned long arg) | 16 | unsigned int cmd, unsigned long arg) |
50 | { | 17 | { |
@@ -148,9 +115,9 @@ struct file_operations fat_file_operations = { | |||
148 | .read = do_sync_read, | 115 | .read = do_sync_read, |
149 | .write = do_sync_write, | 116 | .write = do_sync_write, |
150 | .readv = generic_file_readv, | 117 | .readv = generic_file_readv, |
151 | .writev = fat_file_writev, | 118 | .writev = generic_file_writev, |
152 | .aio_read = generic_file_aio_read, | 119 | .aio_read = generic_file_aio_read, |
153 | .aio_write = fat_file_aio_write, | 120 | .aio_write = generic_file_aio_write, |
154 | .mmap = generic_file_mmap, | 121 | .mmap = generic_file_mmap, |
155 | .ioctl = fat_generic_ioctl, | 122 | .ioctl = fat_generic_ioctl, |
156 | .fsync = file_fsync, | 123 | .fsync = file_fsync, |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index a7cbe68e2259..e2effe2dc9b2 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -102,6 +102,19 @@ static int fat_prepare_write(struct file *file, struct page *page, | |||
102 | &MSDOS_I(page->mapping->host)->mmu_private); | 102 | &MSDOS_I(page->mapping->host)->mmu_private); |
103 | } | 103 | } |
104 | 104 | ||
105 | static int fat_commit_write(struct file *file, struct page *page, | ||
106 | unsigned from, unsigned to) | ||
107 | { | ||
108 | struct inode *inode = page->mapping->host; | ||
109 | int err = generic_commit_write(file, page, from, to); | ||
110 | if (!err && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) { | ||
111 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | ||
112 | MSDOS_I(inode)->i_attrs |= ATTR_ARCH; | ||
113 | mark_inode_dirty(inode); | ||
114 | } | ||
115 | return err; | ||
116 | } | ||
117 | |||
105 | static sector_t _fat_bmap(struct address_space *mapping, sector_t block) | 118 | static sector_t _fat_bmap(struct address_space *mapping, sector_t block) |
106 | { | 119 | { |
107 | return generic_block_bmap(mapping, block, fat_get_block); | 120 | return generic_block_bmap(mapping, block, fat_get_block); |
@@ -112,7 +125,7 @@ static struct address_space_operations fat_aops = { | |||
112 | .writepage = fat_writepage, | 125 | .writepage = fat_writepage, |
113 | .sync_page = block_sync_page, | 126 | .sync_page = block_sync_page, |
114 | .prepare_write = fat_prepare_write, | 127 | .prepare_write = fat_prepare_write, |
115 | .commit_write = generic_commit_write, | 128 | .commit_write = fat_commit_write, |
116 | .bmap = _fat_bmap | 129 | .bmap = _fat_bmap |
117 | }; | 130 | }; |
118 | 131 | ||
@@ -287,9 +300,9 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de) | |||
287 | inode->i_blksize = sbi->cluster_size; | 300 | inode->i_blksize = sbi->cluster_size; |
288 | inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) | 301 | inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1)) |
289 | & ~((loff_t)sbi->cluster_size - 1)) >> 9; | 302 | & ~((loff_t)sbi->cluster_size - 1)) >> 9; |
290 | inode->i_mtime.tv_sec = inode->i_atime.tv_sec = | 303 | inode->i_mtime.tv_sec = |
291 | date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date)); | 304 | date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date)); |
292 | inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = 0; | 305 | inode->i_mtime.tv_nsec = 0; |
293 | if (sbi->options.isvfat) { | 306 | if (sbi->options.isvfat) { |
294 | int secs = de->ctime_cs / 100; | 307 | int secs = de->ctime_cs / 100; |
295 | int csecs = de->ctime_cs % 100; | 308 | int csecs = de->ctime_cs % 100; |
@@ -297,8 +310,11 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de) | |||
297 | date_dos2unix(le16_to_cpu(de->ctime), | 310 | date_dos2unix(le16_to_cpu(de->ctime), |
298 | le16_to_cpu(de->cdate)) + secs; | 311 | le16_to_cpu(de->cdate)) + secs; |
299 | inode->i_ctime.tv_nsec = csecs * 10000000; | 312 | inode->i_ctime.tv_nsec = csecs * 10000000; |
313 | inode->i_atime.tv_sec = | ||
314 | date_dos2unix(le16_to_cpu(0), le16_to_cpu(de->adate)); | ||
315 | inode->i_atime.tv_nsec = 0; | ||
300 | } else | 316 | } else |
301 | inode->i_ctime = inode->i_mtime; | 317 | inode->i_ctime = inode->i_atime = inode->i_mtime; |
302 | 318 | ||
303 | return 0; | 319 | return 0; |
304 | } | 320 | } |
@@ -500,7 +516,9 @@ retry: | |||
500 | raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16); | 516 | raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16); |
501 | fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time, &raw_entry->date); | 517 | fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time, &raw_entry->date); |
502 | if (sbi->options.isvfat) { | 518 | if (sbi->options.isvfat) { |
519 | __le16 atime; | ||
503 | fat_date_unix2dos(inode->i_ctime.tv_sec,&raw_entry->ctime,&raw_entry->cdate); | 520 | fat_date_unix2dos(inode->i_ctime.tv_sec,&raw_entry->ctime,&raw_entry->cdate); |
521 | fat_date_unix2dos(inode->i_atime.tv_sec,&atime,&raw_entry->adate); | ||
504 | raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 + | 522 | raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 + |
505 | inode->i_ctime.tv_nsec / 10000000; | 523 | inode->i_ctime.tv_nsec / 10000000; |
506 | } | 524 | } |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index e79e49b3eec7..29f1e9f6e85c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -96,6 +96,8 @@ static int fuse_lookup_iget(struct inode *dir, struct dentry *entry, | |||
96 | fuse_lookup_init(req, dir, entry, &outarg); | 96 | fuse_lookup_init(req, dir, entry, &outarg); |
97 | request_send(fc, req); | 97 | request_send(fc, req); |
98 | err = req->out.h.error; | 98 | err = req->out.h.error; |
99 | if (!err && (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID)) | ||
100 | err = -EIO; | ||
99 | if (!err) { | 101 | if (!err) { |
100 | inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, | 102 | inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, |
101 | &outarg.attr); | 103 | &outarg.attr); |
@@ -152,6 +154,10 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, | |||
152 | fuse_put_request(fc, req); | 154 | fuse_put_request(fc, req); |
153 | return err; | 155 | return err; |
154 | } | 156 | } |
157 | if (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID) { | ||
158 | fuse_put_request(fc, req); | ||
159 | return -EIO; | ||
160 | } | ||
155 | inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, | 161 | inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, |
156 | &outarg.attr); | 162 | &outarg.attr); |
157 | if (!inode) { | 163 | if (!inode) { |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 6454022b0536..657ab11c173b 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -23,6 +23,10 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir) | |||
23 | struct fuse_file *ff; | 23 | struct fuse_file *ff; |
24 | int err; | 24 | int err; |
25 | 25 | ||
26 | /* VFS checks this, but only _after_ ->open() */ | ||
27 | if (file->f_flags & O_DIRECT) | ||
28 | return -EINVAL; | ||
29 | |||
26 | err = generic_file_open(inode, file); | 30 | err = generic_file_open(inode, file); |
27 | if (err) | 31 | if (err) |
28 | return err; | 32 | return err; |
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 59c5062cd63f..dd7113106269 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -793,11 +793,6 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from, | |||
793 | return(err); | 793 | return(err); |
794 | } | 794 | } |
795 | 795 | ||
796 | void hostfs_truncate(struct inode *ino) | ||
797 | { | ||
798 | not_implemented(); | ||
799 | } | ||
800 | |||
801 | int hostfs_permission(struct inode *ino, int desired, struct nameidata *nd) | 796 | int hostfs_permission(struct inode *ino, int desired, struct nameidata *nd) |
802 | { | 797 | { |
803 | char *name; | 798 | char *name; |
@@ -894,7 +889,6 @@ static struct inode_operations hostfs_iops = { | |||
894 | .rmdir = hostfs_rmdir, | 889 | .rmdir = hostfs_rmdir, |
895 | .mknod = hostfs_mknod, | 890 | .mknod = hostfs_mknod, |
896 | .rename = hostfs_rename, | 891 | .rename = hostfs_rename, |
897 | .truncate = hostfs_truncate, | ||
898 | .permission = hostfs_permission, | 892 | .permission = hostfs_permission, |
899 | .setattr = hostfs_setattr, | 893 | .setattr = hostfs_setattr, |
900 | .getattr = hostfs_getattr, | 894 | .getattr = hostfs_getattr, |
@@ -910,7 +904,6 @@ static struct inode_operations hostfs_dir_iops = { | |||
910 | .rmdir = hostfs_rmdir, | 904 | .rmdir = hostfs_rmdir, |
911 | .mknod = hostfs_mknod, | 905 | .mknod = hostfs_mknod, |
912 | .rename = hostfs_rename, | 906 | .rename = hostfs_rename, |
913 | .truncate = hostfs_truncate, | ||
914 | .permission = hostfs_permission, | 907 | .permission = hostfs_permission, |
915 | .setattr = hostfs_setattr, | 908 | .setattr = hostfs_setattr, |
916 | .getattr = hostfs_getattr, | 909 | .getattr = hostfs_getattr, |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 0ec62d5310db..9f942ca8e4e3 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -129,8 +129,7 @@ void jfs_delete_inode(struct inode *inode) | |||
129 | jfs_info("In jfs_delete_inode, inode = 0x%p", inode); | 129 | jfs_info("In jfs_delete_inode, inode = 0x%p", inode); |
130 | 130 | ||
131 | if (!is_bad_inode(inode) && | 131 | if (!is_bad_inode(inode) && |
132 | (JFS_IP(inode)->fileset == cpu_to_le32(FILESYSTEM_I))) { | 132 | (JFS_IP(inode)->fileset == FILESYSTEM_I)) { |
133 | |||
134 | truncate_inode_pages(&inode->i_data, 0); | 133 | truncate_inode_pages(&inode->i_data, 0); |
135 | 134 | ||
136 | if (test_cflag(COMMIT_Freewmap, inode)) | 135 | if (test_cflag(COMMIT_Freewmap, inode)) |
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index c739626f5bf1..eadf319bee22 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c | |||
@@ -3055,7 +3055,7 @@ static int cntlz(u32 value) | |||
3055 | * RETURN VALUES: | 3055 | * RETURN VALUES: |
3056 | * log2 number of blocks | 3056 | * log2 number of blocks |
3057 | */ | 3057 | */ |
3058 | int blkstol2(s64 nb) | 3058 | static int blkstol2(s64 nb) |
3059 | { | 3059 | { |
3060 | int l2nb; | 3060 | int l2nb; |
3061 | s64 mask; /* meant to be signed */ | 3061 | s64 mask; /* meant to be signed */ |
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index c7a92f9deb2b..9b71ed2674fe 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c | |||
@@ -725,6 +725,9 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp, | |||
725 | else | 725 | else |
726 | tlck->flag = tlckINODELOCK; | 726 | tlck->flag = tlckINODELOCK; |
727 | 727 | ||
728 | if (S_ISDIR(ip->i_mode)) | ||
729 | tlck->flag |= tlckDIRECTORY; | ||
730 | |||
728 | tlck->type = 0; | 731 | tlck->type = 0; |
729 | 732 | ||
730 | /* bind the tlock and the page */ | 733 | /* bind the tlock and the page */ |
@@ -1009,6 +1012,8 @@ struct tlock *txMaplock(tid_t tid, struct inode *ip, int type) | |||
1009 | 1012 | ||
1010 | /* bind the tlock and the object */ | 1013 | /* bind the tlock and the object */ |
1011 | tlck->flag = tlckINODELOCK; | 1014 | tlck->flag = tlckINODELOCK; |
1015 | if (S_ISDIR(ip->i_mode)) | ||
1016 | tlck->flag |= tlckDIRECTORY; | ||
1012 | tlck->ip = ip; | 1017 | tlck->ip = ip; |
1013 | tlck->mp = NULL; | 1018 | tlck->mp = NULL; |
1014 | 1019 | ||
@@ -1077,6 +1082,8 @@ struct linelock *txLinelock(struct linelock * tlock) | |||
1077 | linelock->flag = tlckLINELOCK; | 1082 | linelock->flag = tlckLINELOCK; |
1078 | linelock->maxcnt = TLOCKLONG; | 1083 | linelock->maxcnt = TLOCKLONG; |
1079 | linelock->index = 0; | 1084 | linelock->index = 0; |
1085 | if (tlck->flag & tlckDIRECTORY) | ||
1086 | linelock->flag |= tlckDIRECTORY; | ||
1080 | 1087 | ||
1081 | /* append linelock after tlock */ | 1088 | /* append linelock after tlock */ |
1082 | linelock->next = tlock->next; | 1089 | linelock->next = tlock->next; |
@@ -2070,8 +2077,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, | |||
2070 | * | 2077 | * |
2071 | * function: log from maplock of freed data extents; | 2078 | * function: log from maplock of freed data extents; |
2072 | */ | 2079 | */ |
2073 | void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, | 2080 | static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, |
2074 | struct tlock * tlck) | 2081 | struct tlock * tlck) |
2075 | { | 2082 | { |
2076 | struct pxd_lock *pxdlock; | 2083 | struct pxd_lock *pxdlock; |
2077 | int i, nlock; | 2084 | int i, nlock; |
@@ -2209,7 +2216,7 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea) | |||
2209 | * function: synchronously write pages locked by transaction | 2216 | * function: synchronously write pages locked by transaction |
2210 | * after txLog() but before txUpdateMap(); | 2217 | * after txLog() but before txUpdateMap(); |
2211 | */ | 2218 | */ |
2212 | void txForce(struct tblock * tblk) | 2219 | static void txForce(struct tblock * tblk) |
2213 | { | 2220 | { |
2214 | struct tlock *tlck; | 2221 | struct tlock *tlck; |
2215 | lid_t lid, next; | 2222 | lid_t lid, next; |
@@ -2358,7 +2365,7 @@ static void txUpdateMap(struct tblock * tblk) | |||
2358 | */ | 2365 | */ |
2359 | else { /* (maplock->flag & mlckFREE) */ | 2366 | else { /* (maplock->flag & mlckFREE) */ |
2360 | 2367 | ||
2361 | if (S_ISDIR(tlck->ip->i_mode)) | 2368 | if (tlck->flag & tlckDIRECTORY) |
2362 | txFreeMap(ipimap, maplock, | 2369 | txFreeMap(ipimap, maplock, |
2363 | tblk, COMMIT_PWMAP); | 2370 | tblk, COMMIT_PWMAP); |
2364 | else | 2371 | else |
diff --git a/fs/jfs/jfs_txnmgr.h b/fs/jfs/jfs_txnmgr.h index 59ad0f6b7231..0e4dc4514c47 100644 --- a/fs/jfs/jfs_txnmgr.h +++ b/fs/jfs/jfs_txnmgr.h | |||
@@ -122,6 +122,7 @@ extern struct tlock *TxLock; /* transaction lock table */ | |||
122 | #define tlckLOG 0x0800 | 122 | #define tlckLOG 0x0800 |
123 | /* updateMap state */ | 123 | /* updateMap state */ |
124 | #define tlckUPDATEMAP 0x0080 | 124 | #define tlckUPDATEMAP 0x0080 |
125 | #define tlckDIRECTORY 0x0040 | ||
125 | /* freeLock state */ | 126 | /* freeLock state */ |
126 | #define tlckFREELOCK 0x0008 | 127 | #define tlckFREELOCK 0x0008 |
127 | #define tlckWRITEPAGE 0x0004 | 128 | #define tlckWRITEPAGE 0x0004 |
diff --git a/fs/locks.c b/fs/locks.c index c2c09b4798d6..f7daa5f48949 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -124,6 +124,7 @@ | |||
124 | #include <linux/smp_lock.h> | 124 | #include <linux/smp_lock.h> |
125 | #include <linux/syscalls.h> | 125 | #include <linux/syscalls.h> |
126 | #include <linux/time.h> | 126 | #include <linux/time.h> |
127 | #include <linux/rcupdate.h> | ||
127 | 128 | ||
128 | #include <asm/semaphore.h> | 129 | #include <asm/semaphore.h> |
129 | #include <asm/uaccess.h> | 130 | #include <asm/uaccess.h> |
@@ -2205,6 +2206,7 @@ void steal_locks(fl_owner_t from) | |||
2205 | 2206 | ||
2206 | lock_kernel(); | 2207 | lock_kernel(); |
2207 | j = 0; | 2208 | j = 0; |
2209 | rcu_read_lock(); | ||
2208 | fdt = files_fdtable(files); | 2210 | fdt = files_fdtable(files); |
2209 | for (;;) { | 2211 | for (;;) { |
2210 | unsigned long set; | 2212 | unsigned long set; |
@@ -2222,6 +2224,7 @@ void steal_locks(fl_owner_t from) | |||
2222 | set >>= 1; | 2224 | set >>= 1; |
2223 | } | 2225 | } |
2224 | } | 2226 | } |
2227 | rcu_read_unlock(); | ||
2225 | unlock_kernel(); | 2228 | unlock_kernel(); |
2226 | } | 2229 | } |
2227 | EXPORT_SYMBOL(steal_locks); | 2230 | EXPORT_SYMBOL(steal_locks); |
diff --git a/fs/mpage.c b/fs/mpage.c index bb9aebe93862..c5adcdddf3cc 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -102,7 +102,7 @@ static struct bio *mpage_bio_submit(int rw, struct bio *bio) | |||
102 | static struct bio * | 102 | static struct bio * |
103 | mpage_alloc(struct block_device *bdev, | 103 | mpage_alloc(struct block_device *bdev, |
104 | sector_t first_sector, int nr_vecs, | 104 | sector_t first_sector, int nr_vecs, |
105 | unsigned int __nocast gfp_flags) | 105 | gfp_t gfp_flags) |
106 | { | 106 | { |
107 | struct bio *bio; | 107 | struct bio *bio; |
108 | 108 | ||
diff --git a/fs/namei.c b/fs/namei.c index 043d587216b5..aa62dbda93ac 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1551,19 +1551,19 @@ do_link: | |||
1551 | if (nd->last_type != LAST_NORM) | 1551 | if (nd->last_type != LAST_NORM) |
1552 | goto exit; | 1552 | goto exit; |
1553 | if (nd->last.name[nd->last.len]) { | 1553 | if (nd->last.name[nd->last.len]) { |
1554 | putname(nd->last.name); | 1554 | __putname(nd->last.name); |
1555 | goto exit; | 1555 | goto exit; |
1556 | } | 1556 | } |
1557 | error = -ELOOP; | 1557 | error = -ELOOP; |
1558 | if (count++==32) { | 1558 | if (count++==32) { |
1559 | putname(nd->last.name); | 1559 | __putname(nd->last.name); |
1560 | goto exit; | 1560 | goto exit; |
1561 | } | 1561 | } |
1562 | dir = nd->dentry; | 1562 | dir = nd->dentry; |
1563 | down(&dir->d_inode->i_sem); | 1563 | down(&dir->d_inode->i_sem); |
1564 | path.dentry = __lookup_hash(&nd->last, nd->dentry, nd); | 1564 | path.dentry = __lookup_hash(&nd->last, nd->dentry, nd); |
1565 | path.mnt = nd->mnt; | 1565 | path.mnt = nd->mnt; |
1566 | putname(nd->last.name); | 1566 | __putname(nd->last.name); |
1567 | goto do_last; | 1567 | goto do_last; |
1568 | } | 1568 | } |
1569 | 1569 | ||
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index d7f7eb669d03..4a36839f0bbd 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -85,6 +85,10 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
85 | struct nfs_delegation *delegation; | 85 | struct nfs_delegation *delegation; |
86 | int status = 0; | 86 | int status = 0; |
87 | 87 | ||
88 | /* Ensure we first revalidate the attributes and page cache! */ | ||
89 | if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR))) | ||
90 | __nfs_revalidate_inode(NFS_SERVER(inode), inode); | ||
91 | |||
88 | delegation = nfs_alloc_delegation(); | 92 | delegation = nfs_alloc_delegation(); |
89 | if (delegation == NULL) | 93 | if (delegation == NULL) |
90 | return -ENOMEM; | 94 | return -ENOMEM; |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f6b9eda925c5..6bdcfa95de94 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -137,7 +137,8 @@ static int nfs_revalidate_file(struct inode *inode, struct file *filp) | |||
137 | struct nfs_inode *nfsi = NFS_I(inode); | 137 | struct nfs_inode *nfsi = NFS_I(inode); |
138 | int retval = 0; | 138 | int retval = 0; |
139 | 139 | ||
140 | if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode)) | 140 | if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)) |
141 | || nfs_attribute_timeout(inode)) | ||
141 | retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode); | 142 | retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode); |
142 | nfs_revalidate_mapping(inode, filp->f_mapping); | 143 | nfs_revalidate_mapping(inode, filp->f_mapping); |
143 | return 0; | 144 | return 0; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 6922469d6fc5..d4eadeea128e 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -877,12 +877,10 @@ static int nfs_wait_on_inode(struct inode *inode) | |||
877 | sigset_t oldmask; | 877 | sigset_t oldmask; |
878 | int error; | 878 | int error; |
879 | 879 | ||
880 | atomic_inc(&inode->i_count); | ||
881 | rpc_clnt_sigmask(clnt, &oldmask); | 880 | rpc_clnt_sigmask(clnt, &oldmask); |
882 | error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING, | 881 | error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING, |
883 | nfs_wait_schedule, TASK_INTERRUPTIBLE); | 882 | nfs_wait_schedule, TASK_INTERRUPTIBLE); |
884 | rpc_clnt_sigunmask(clnt, &oldmask); | 883 | rpc_clnt_sigunmask(clnt, &oldmask); |
885 | iput(inode); | ||
886 | 884 | ||
887 | return error; | 885 | return error; |
888 | } | 886 | } |
@@ -1226,10 +1224,6 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1226 | loff_t cur_size, new_isize; | 1224 | loff_t cur_size, new_isize; |
1227 | int data_unstable; | 1225 | int data_unstable; |
1228 | 1226 | ||
1229 | /* Do we hold a delegation? */ | ||
1230 | if (nfs_have_delegation(inode, FMODE_READ)) | ||
1231 | return 0; | ||
1232 | |||
1233 | spin_lock(&inode->i_lock); | 1227 | spin_lock(&inode->i_lock); |
1234 | 1228 | ||
1235 | /* Are we in the process of updating data on the server? */ | 1229 | /* Are we in the process of updating data on the server? */ |
@@ -1350,7 +1344,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign | |||
1350 | nfsi->read_cache_jiffies = fattr->timestamp; | 1344 | nfsi->read_cache_jiffies = fattr->timestamp; |
1351 | 1345 | ||
1352 | /* Are we racing with known updates of the metadata on the server? */ | 1346 | /* Are we racing with known updates of the metadata on the server? */ |
1353 | data_unstable = ! nfs_verify_change_attribute(inode, verifier); | 1347 | data_unstable = ! (nfs_verify_change_attribute(inode, verifier) || |
1348 | (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)); | ||
1354 | 1349 | ||
1355 | /* Check if our cached file size is stale */ | 1350 | /* Check if our cached file size is stale */ |
1356 | new_isize = nfs_size_to_loff_t(fattr->size); | 1351 | new_isize = nfs_size_to_loff_t(fattr->size); |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 6ceb1d471f20..9758ebd49905 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -184,14 +184,13 @@ static void nfs_readpage_release(struct nfs_page *req) | |||
184 | { | 184 | { |
185 | unlock_page(req->wb_page); | 185 | unlock_page(req->wb_page); |
186 | 186 | ||
187 | nfs_clear_request(req); | ||
188 | nfs_release_request(req); | ||
189 | |||
190 | dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", | 187 | dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", |
191 | req->wb_context->dentry->d_inode->i_sb->s_id, | 188 | req->wb_context->dentry->d_inode->i_sb->s_id, |
192 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 189 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), |
193 | req->wb_bytes, | 190 | req->wb_bytes, |
194 | (long long)req_offset(req)); | 191 | (long long)req_offset(req)); |
192 | nfs_clear_request(req); | ||
193 | nfs_release_request(req); | ||
195 | } | 194 | } |
196 | 195 | ||
197 | /* | 196 | /* |
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index 251e5a1bb1c4..0c2be8c0307d 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c | |||
@@ -48,43 +48,26 @@ xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem) | |||
48 | (struct nfsacl_encode_desc *) desc; | 48 | (struct nfsacl_encode_desc *) desc; |
49 | u32 *p = (u32 *) elem; | 49 | u32 *p = (u32 *) elem; |
50 | 50 | ||
51 | if (nfsacl_desc->count < nfsacl_desc->acl->a_count) { | 51 | struct posix_acl_entry *entry = |
52 | struct posix_acl_entry *entry = | 52 | &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; |
53 | &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; | ||
54 | 53 | ||
55 | *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); | 54 | *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); |
56 | switch(entry->e_tag) { | 55 | switch(entry->e_tag) { |
57 | case ACL_USER_OBJ: | 56 | case ACL_USER_OBJ: |
58 | *p++ = htonl(nfsacl_desc->uid); | 57 | *p++ = htonl(nfsacl_desc->uid); |
59 | break; | 58 | break; |
60 | case ACL_GROUP_OBJ: | 59 | case ACL_GROUP_OBJ: |
61 | *p++ = htonl(nfsacl_desc->gid); | 60 | *p++ = htonl(nfsacl_desc->gid); |
62 | break; | 61 | break; |
63 | case ACL_USER: | 62 | case ACL_USER: |
64 | case ACL_GROUP: | 63 | case ACL_GROUP: |
65 | *p++ = htonl(entry->e_id); | 64 | *p++ = htonl(entry->e_id); |
66 | break; | 65 | break; |
67 | default: /* Solaris depends on that! */ | 66 | default: /* Solaris depends on that! */ |
68 | *p++ = 0; | 67 | *p++ = 0; |
69 | break; | 68 | break; |
70 | } | ||
71 | *p++ = htonl(entry->e_perm & S_IRWXO); | ||
72 | } else { | ||
73 | const struct posix_acl_entry *pa, *pe; | ||
74 | int group_obj_perm = ACL_READ|ACL_WRITE|ACL_EXECUTE; | ||
75 | |||
76 | FOREACH_ACL_ENTRY(pa, nfsacl_desc->acl, pe) { | ||
77 | if (pa->e_tag == ACL_GROUP_OBJ) { | ||
78 | group_obj_perm = pa->e_perm & S_IRWXO; | ||
79 | break; | ||
80 | } | ||
81 | } | ||
82 | /* fake up ACL_MASK entry */ | ||
83 | *p++ = htonl(ACL_MASK | nfsacl_desc->typeflag); | ||
84 | *p++ = htonl(0); | ||
85 | *p++ = htonl(group_obj_perm); | ||
86 | } | 69 | } |
87 | 70 | *p++ = htonl(entry->e_perm & S_IRWXO); | |
88 | return 0; | 71 | return 0; |
89 | } | 72 | } |
90 | 73 | ||
@@ -105,11 +88,28 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, | |||
105 | .gid = inode->i_gid, | 88 | .gid = inode->i_gid, |
106 | }; | 89 | }; |
107 | int err; | 90 | int err; |
91 | struct posix_acl *acl2 = NULL; | ||
108 | 92 | ||
109 | if (entries > NFS_ACL_MAX_ENTRIES || | 93 | if (entries > NFS_ACL_MAX_ENTRIES || |
110 | xdr_encode_word(buf, base, entries)) | 94 | xdr_encode_word(buf, base, entries)) |
111 | return -EINVAL; | 95 | return -EINVAL; |
96 | if (encode_entries && acl && acl->a_count == 3) { | ||
97 | /* Fake up an ACL_MASK entry. */ | ||
98 | acl2 = posix_acl_alloc(4, GFP_KERNEL); | ||
99 | if (!acl2) | ||
100 | return -ENOMEM; | ||
101 | /* Insert entries in canonical order: other orders seem | ||
102 | to confuse Solaris VxFS. */ | ||
103 | acl2->a_entries[0] = acl->a_entries[0]; /* ACL_USER_OBJ */ | ||
104 | acl2->a_entries[1] = acl->a_entries[1]; /* ACL_GROUP_OBJ */ | ||
105 | acl2->a_entries[2] = acl->a_entries[1]; /* ACL_MASK */ | ||
106 | acl2->a_entries[2].e_tag = ACL_MASK; | ||
107 | acl2->a_entries[3] = acl->a_entries[2]; /* ACL_OTHER */ | ||
108 | nfsacl_desc.acl = acl2; | ||
109 | } | ||
112 | err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc); | 110 | err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc); |
111 | if (acl2) | ||
112 | posix_acl_release(acl2); | ||
113 | if (!err) | 113 | if (!err) |
114 | err = 8 + nfsacl_desc.desc.elem_size * | 114 | err = 8 + nfsacl_desc.desc.elem_size * |
115 | nfsacl_desc.desc.array_len; | 115 | nfsacl_desc.desc.array_len; |
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog index 49eafbdb15c1..de58579a1d0e 100644 --- a/fs/ntfs/ChangeLog +++ b/fs/ntfs/ChangeLog | |||
@@ -29,7 +29,8 @@ ToDo/Notes: | |||
29 | The Windows boot will run chkdsk and then reboot. The user can then | 29 | The Windows boot will run chkdsk and then reboot. The user can then |
30 | immediately boot into Linux rather than having to do a full Windows | 30 | immediately boot into Linux rather than having to do a full Windows |
31 | boot first before rebooting into Linux and we will recognize such a | 31 | boot first before rebooting into Linux and we will recognize such a |
32 | journal and empty it as it is clean by definition. | 32 | journal and empty it as it is clean by definition. Note, this only |
33 | works if chkdsk left the journal in an obviously clean state. | ||
33 | - Support journals ($LogFile) with only one restart page as well as | 34 | - Support journals ($LogFile) with only one restart page as well as |
34 | journals with two different restart pages. We sanity check both and | 35 | journals with two different restart pages. We sanity check both and |
35 | either use the only sane one or the more recent one of the two in the | 36 | either use the only sane one or the more recent one of the two in the |
@@ -92,6 +93,18 @@ ToDo/Notes: | |||
92 | an octal number to conform to how chmod(1) works, too. Thanks to | 93 | an octal number to conform to how chmod(1) works, too. Thanks to |
93 | Giuseppe Bilotta and Horst von Brand for pointing out the errors of | 94 | Giuseppe Bilotta and Horst von Brand for pointing out the errors of |
94 | my ways. | 95 | my ways. |
96 | - Fix various bugs in the runlist merging code. (Based on libntfs | ||
97 | changes by Richard Russon.) | ||
98 | - Fix sparse warnings that have crept in over time. | ||
99 | - Change ntfs_cluster_free() to require a write locked runlist on entry | ||
100 | since we otherwise get into a lock reversal deadlock if a read locked | ||
101 | runlist is passed in. In the process also change it to take an ntfs | ||
102 | inode instead of a vfs inode as parameter. | ||
103 | - Fix the definition of the CHKD ntfs record magic. It had an off by | ||
104 | two error causing it to be CHKB instead of CHKD. | ||
105 | - Fix a stupid bug in __ntfs_bitmap_set_bits_in_run() which caused the | ||
106 | count to become negative and hence we had a wild memset() scribbling | ||
107 | all over the system's ram. | ||
95 | 108 | ||
96 | 2.1.23 - Implement extension of resident files and make writing safe as well as | 109 | 2.1.23 - Implement extension of resident files and make writing safe as well as |
97 | many bug fixes, cleanups, and enhancements... | 110 | many bug fixes, cleanups, and enhancements... |
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index b6cc8cf24626..5e80c07c6a4d 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c | |||
@@ -59,39 +59,49 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
59 | unsigned long flags; | 59 | unsigned long flags; |
60 | struct buffer_head *first, *tmp; | 60 | struct buffer_head *first, *tmp; |
61 | struct page *page; | 61 | struct page *page; |
62 | struct inode *vi; | ||
62 | ntfs_inode *ni; | 63 | ntfs_inode *ni; |
63 | int page_uptodate = 1; | 64 | int page_uptodate = 1; |
64 | 65 | ||
65 | page = bh->b_page; | 66 | page = bh->b_page; |
66 | ni = NTFS_I(page->mapping->host); | 67 | vi = page->mapping->host; |
68 | ni = NTFS_I(vi); | ||
67 | 69 | ||
68 | if (likely(uptodate)) { | 70 | if (likely(uptodate)) { |
69 | s64 file_ofs, initialized_size; | 71 | loff_t i_size; |
72 | s64 file_ofs, init_size; | ||
70 | 73 | ||
71 | set_buffer_uptodate(bh); | 74 | set_buffer_uptodate(bh); |
72 | 75 | ||
73 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + | 76 | file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + |
74 | bh_offset(bh); | 77 | bh_offset(bh); |
75 | read_lock_irqsave(&ni->size_lock, flags); | 78 | read_lock_irqsave(&ni->size_lock, flags); |
76 | initialized_size = ni->initialized_size; | 79 | init_size = ni->initialized_size; |
80 | i_size = i_size_read(vi); | ||
77 | read_unlock_irqrestore(&ni->size_lock, flags); | 81 | read_unlock_irqrestore(&ni->size_lock, flags); |
82 | if (unlikely(init_size > i_size)) { | ||
83 | /* Race with shrinking truncate. */ | ||
84 | init_size = i_size; | ||
85 | } | ||
78 | /* Check for the current buffer head overflowing. */ | 86 | /* Check for the current buffer head overflowing. */ |
79 | if (file_ofs + bh->b_size > initialized_size) { | 87 | if (unlikely(file_ofs + bh->b_size > init_size)) { |
80 | char *addr; | 88 | u8 *kaddr; |
81 | int ofs = 0; | 89 | int ofs; |
82 | 90 | ||
83 | if (file_ofs < initialized_size) | 91 | ofs = 0; |
84 | ofs = initialized_size - file_ofs; | 92 | if (file_ofs < init_size) |
85 | addr = kmap_atomic(page, KM_BIO_SRC_IRQ); | 93 | ofs = init_size - file_ofs; |
86 | memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); | 94 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); |
95 | memset(kaddr + bh_offset(bh) + ofs, 0, | ||
96 | bh->b_size - ofs); | ||
97 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); | ||
87 | flush_dcache_page(page); | 98 | flush_dcache_page(page); |
88 | kunmap_atomic(addr, KM_BIO_SRC_IRQ); | ||
89 | } | 99 | } |
90 | } else { | 100 | } else { |
91 | clear_buffer_uptodate(bh); | 101 | clear_buffer_uptodate(bh); |
92 | SetPageError(page); | 102 | SetPageError(page); |
93 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.", | 103 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block " |
94 | (unsigned long long)bh->b_blocknr); | 104 | "0x%llx.", (unsigned long long)bh->b_blocknr); |
95 | } | 105 | } |
96 | first = page_buffers(page); | 106 | first = page_buffers(page); |
97 | local_irq_save(flags); | 107 | local_irq_save(flags); |
@@ -124,7 +134,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
124 | if (likely(page_uptodate && !PageError(page))) | 134 | if (likely(page_uptodate && !PageError(page))) |
125 | SetPageUptodate(page); | 135 | SetPageUptodate(page); |
126 | } else { | 136 | } else { |
127 | char *addr; | 137 | u8 *kaddr; |
128 | unsigned int i, recs; | 138 | unsigned int i, recs; |
129 | u32 rec_size; | 139 | u32 rec_size; |
130 | 140 | ||
@@ -132,12 +142,12 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |||
132 | recs = PAGE_CACHE_SIZE / rec_size; | 142 | recs = PAGE_CACHE_SIZE / rec_size; |
133 | /* Should have been verified before we got here... */ | 143 | /* Should have been verified before we got here... */ |
134 | BUG_ON(!recs); | 144 | BUG_ON(!recs); |
135 | addr = kmap_atomic(page, KM_BIO_SRC_IRQ); | 145 | kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); |
136 | for (i = 0; i < recs; i++) | 146 | for (i = 0; i < recs; i++) |
137 | post_read_mst_fixup((NTFS_RECORD*)(addr + | 147 | post_read_mst_fixup((NTFS_RECORD*)(kaddr + |
138 | i * rec_size), rec_size); | 148 | i * rec_size), rec_size); |
149 | kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); | ||
139 | flush_dcache_page(page); | 150 | flush_dcache_page(page); |
140 | kunmap_atomic(addr, KM_BIO_SRC_IRQ); | ||
141 | if (likely(page_uptodate && !PageError(page))) | 151 | if (likely(page_uptodate && !PageError(page))) |
142 | SetPageUptodate(page); | 152 | SetPageUptodate(page); |
143 | } | 153 | } |
@@ -168,8 +178,11 @@ still_busy: | |||
168 | */ | 178 | */ |
169 | static int ntfs_read_block(struct page *page) | 179 | static int ntfs_read_block(struct page *page) |
170 | { | 180 | { |
181 | loff_t i_size; | ||
171 | VCN vcn; | 182 | VCN vcn; |
172 | LCN lcn; | 183 | LCN lcn; |
184 | s64 init_size; | ||
185 | struct inode *vi; | ||
173 | ntfs_inode *ni; | 186 | ntfs_inode *ni; |
174 | ntfs_volume *vol; | 187 | ntfs_volume *vol; |
175 | runlist_element *rl; | 188 | runlist_element *rl; |
@@ -180,7 +193,8 @@ static int ntfs_read_block(struct page *page) | |||
180 | int i, nr; | 193 | int i, nr; |
181 | unsigned char blocksize_bits; | 194 | unsigned char blocksize_bits; |
182 | 195 | ||
183 | ni = NTFS_I(page->mapping->host); | 196 | vi = page->mapping->host; |
197 | ni = NTFS_I(vi); | ||
184 | vol = ni->vol; | 198 | vol = ni->vol; |
185 | 199 | ||
186 | /* $MFT/$DATA must have its complete runlist in memory at all times. */ | 200 | /* $MFT/$DATA must have its complete runlist in memory at all times. */ |
@@ -199,11 +213,28 @@ static int ntfs_read_block(struct page *page) | |||
199 | bh = head = page_buffers(page); | 213 | bh = head = page_buffers(page); |
200 | BUG_ON(!bh); | 214 | BUG_ON(!bh); |
201 | 215 | ||
216 | /* | ||
217 | * We may be racing with truncate. To avoid some of the problems we | ||
218 | * now take a snapshot of the various sizes and use those for the whole | ||
219 | * of the function. In case of an extending truncate it just means we | ||
220 | * may leave some buffers unmapped which are now allocated. This is | ||
221 | * not a problem since these buffers will just get mapped when a write | ||
222 | * occurs. In case of a shrinking truncate, we will detect this later | ||
223 | * on due to the runlist being incomplete and if the page is being | ||
224 | * fully truncated, truncate will throw it away as soon as we unlock | ||
225 | * it so no need to worry what we do with it. | ||
226 | */ | ||
202 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); | 227 | iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); |
203 | read_lock_irqsave(&ni->size_lock, flags); | 228 | read_lock_irqsave(&ni->size_lock, flags); |
204 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; | 229 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; |
205 | zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits; | 230 | init_size = ni->initialized_size; |
231 | i_size = i_size_read(vi); | ||
206 | read_unlock_irqrestore(&ni->size_lock, flags); | 232 | read_unlock_irqrestore(&ni->size_lock, flags); |
233 | if (unlikely(init_size > i_size)) { | ||
234 | /* Race with shrinking truncate. */ | ||
235 | init_size = i_size; | ||
236 | } | ||
237 | zblock = (init_size + blocksize - 1) >> blocksize_bits; | ||
207 | 238 | ||
208 | /* Loop through all the buffers in the page. */ | 239 | /* Loop through all the buffers in the page. */ |
209 | rl = NULL; | 240 | rl = NULL; |
@@ -366,6 +397,8 @@ handle_zblock: | |||
366 | */ | 397 | */ |
367 | static int ntfs_readpage(struct file *file, struct page *page) | 398 | static int ntfs_readpage(struct file *file, struct page *page) |
368 | { | 399 | { |
400 | loff_t i_size; | ||
401 | struct inode *vi; | ||
369 | ntfs_inode *ni, *base_ni; | 402 | ntfs_inode *ni, *base_ni; |
370 | u8 *kaddr; | 403 | u8 *kaddr; |
371 | ntfs_attr_search_ctx *ctx; | 404 | ntfs_attr_search_ctx *ctx; |
@@ -384,14 +417,17 @@ retry_readpage: | |||
384 | unlock_page(page); | 417 | unlock_page(page); |
385 | return 0; | 418 | return 0; |
386 | } | 419 | } |
387 | ni = NTFS_I(page->mapping->host); | 420 | vi = page->mapping->host; |
421 | ni = NTFS_I(vi); | ||
388 | /* | 422 | /* |
389 | * Only $DATA attributes can be encrypted and only unnamed $DATA | 423 | * Only $DATA attributes can be encrypted and only unnamed $DATA |
390 | * attributes can be compressed. Index root can have the flags set but | 424 | * attributes can be compressed. Index root can have the flags set but |
391 | * this means to create compressed/encrypted files, not that the | 425 | * this means to create compressed/encrypted files, not that the |
392 | * attribute is compressed/encrypted. | 426 | * attribute is compressed/encrypted. Note we need to check for |
427 | * AT_INDEX_ALLOCATION since this is the type of both directory and | ||
428 | * index inodes. | ||
393 | */ | 429 | */ |
394 | if (ni->type != AT_INDEX_ROOT) { | 430 | if (ni->type != AT_INDEX_ALLOCATION) { |
395 | /* If attribute is encrypted, deny access, just like NT4. */ | 431 | /* If attribute is encrypted, deny access, just like NT4. */ |
396 | if (NInoEncrypted(ni)) { | 432 | if (NInoEncrypted(ni)) { |
397 | BUG_ON(ni->type != AT_DATA); | 433 | BUG_ON(ni->type != AT_DATA); |
@@ -456,7 +492,12 @@ retry_readpage: | |||
456 | read_lock_irqsave(&ni->size_lock, flags); | 492 | read_lock_irqsave(&ni->size_lock, flags); |
457 | if (unlikely(attr_len > ni->initialized_size)) | 493 | if (unlikely(attr_len > ni->initialized_size)) |
458 | attr_len = ni->initialized_size; | 494 | attr_len = ni->initialized_size; |
495 | i_size = i_size_read(vi); | ||
459 | read_unlock_irqrestore(&ni->size_lock, flags); | 496 | read_unlock_irqrestore(&ni->size_lock, flags); |
497 | if (unlikely(attr_len > i_size)) { | ||
498 | /* Race with shrinking truncate. */ | ||
499 | attr_len = i_size; | ||
500 | } | ||
460 | kaddr = kmap_atomic(page, KM_USER0); | 501 | kaddr = kmap_atomic(page, KM_USER0); |
461 | /* Copy the data to the page. */ | 502 | /* Copy the data to the page. */ |
462 | memcpy(kaddr, (u8*)ctx->attr + | 503 | memcpy(kaddr, (u8*)ctx->attr + |
@@ -1341,9 +1382,11 @@ retry_writepage: | |||
1341 | * Only $DATA attributes can be encrypted and only unnamed $DATA | 1382 | * Only $DATA attributes can be encrypted and only unnamed $DATA |
1342 | * attributes can be compressed. Index root can have the flags set but | 1383 | * attributes can be compressed. Index root can have the flags set but |
1343 | * this means to create compressed/encrypted files, not that the | 1384 | * this means to create compressed/encrypted files, not that the |
1344 | * attribute is compressed/encrypted. | 1385 | * attribute is compressed/encrypted. Note we need to check for |
1386 | * AT_INDEX_ALLOCATION since this is the type of both directory and | ||
1387 | * index inodes. | ||
1345 | */ | 1388 | */ |
1346 | if (ni->type != AT_INDEX_ROOT) { | 1389 | if (ni->type != AT_INDEX_ALLOCATION) { |
1347 | /* If file is encrypted, deny access, just like NT4. */ | 1390 | /* If file is encrypted, deny access, just like NT4. */ |
1348 | if (NInoEncrypted(ni)) { | 1391 | if (NInoEncrypted(ni)) { |
1349 | unlock_page(page); | 1392 | unlock_page(page); |
@@ -1379,8 +1422,8 @@ retry_writepage: | |||
1379 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; | 1422 | unsigned int ofs = i_size & ~PAGE_CACHE_MASK; |
1380 | kaddr = kmap_atomic(page, KM_USER0); | 1423 | kaddr = kmap_atomic(page, KM_USER0); |
1381 | memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); | 1424 | memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); |
1382 | flush_dcache_page(page); | ||
1383 | kunmap_atomic(kaddr, KM_USER0); | 1425 | kunmap_atomic(kaddr, KM_USER0); |
1426 | flush_dcache_page(page); | ||
1384 | } | 1427 | } |
1385 | /* Handle mst protected attributes. */ | 1428 | /* Handle mst protected attributes. */ |
1386 | if (NInoMstProtected(ni)) | 1429 | if (NInoMstProtected(ni)) |
@@ -1443,34 +1486,33 @@ retry_writepage: | |||
1443 | BUG_ON(PageWriteback(page)); | 1486 | BUG_ON(PageWriteback(page)); |
1444 | set_page_writeback(page); | 1487 | set_page_writeback(page); |
1445 | unlock_page(page); | 1488 | unlock_page(page); |
1446 | /* | ||
1447 | * Here, we do not need to zero the out of bounds area everytime | ||
1448 | * because the below memcpy() already takes care of the | ||
1449 | * mmap-at-end-of-file requirements. If the file is converted to a | ||
1450 | * non-resident one, then the code path use is switched to the | ||
1451 | * non-resident one where the zeroing happens on each ntfs_writepage() | ||
1452 | * invocation. | ||
1453 | */ | ||
1454 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); | 1489 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); |
1455 | i_size = i_size_read(vi); | 1490 | i_size = i_size_read(vi); |
1456 | if (unlikely(attr_len > i_size)) { | 1491 | if (unlikely(attr_len > i_size)) { |
1492 | /* Race with shrinking truncate or a failed truncate. */ | ||
1457 | attr_len = i_size; | 1493 | attr_len = i_size; |
1458 | ctx->attr->data.resident.value_length = cpu_to_le32(attr_len); | 1494 | /* |
1495 | * If the truncate failed, fix it up now. If a concurrent | ||
1496 | * truncate, we do its job, so it does not have to do anything. | ||
1497 | */ | ||
1498 | err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr, | ||
1499 | attr_len); | ||
1500 | /* Shrinking cannot fail. */ | ||
1501 | BUG_ON(err); | ||
1459 | } | 1502 | } |
1460 | kaddr = kmap_atomic(page, KM_USER0); | 1503 | kaddr = kmap_atomic(page, KM_USER0); |
1461 | /* Copy the data from the page to the mft record. */ | 1504 | /* Copy the data from the page to the mft record. */ |
1462 | memcpy((u8*)ctx->attr + | 1505 | memcpy((u8*)ctx->attr + |
1463 | le16_to_cpu(ctx->attr->data.resident.value_offset), | 1506 | le16_to_cpu(ctx->attr->data.resident.value_offset), |
1464 | kaddr, attr_len); | 1507 | kaddr, attr_len); |
1465 | flush_dcache_mft_record_page(ctx->ntfs_ino); | ||
1466 | /* Zero out of bounds area in the page cache page. */ | 1508 | /* Zero out of bounds area in the page cache page. */ |
1467 | memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); | 1509 | memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); |
1468 | flush_dcache_page(page); | ||
1469 | kunmap_atomic(kaddr, KM_USER0); | 1510 | kunmap_atomic(kaddr, KM_USER0); |
1470 | 1511 | flush_dcache_mft_record_page(ctx->ntfs_ino); | |
1512 | flush_dcache_page(page); | ||
1513 | /* We are done with the page. */ | ||
1471 | end_page_writeback(page); | 1514 | end_page_writeback(page); |
1472 | 1515 | /* Finally, mark the mft record dirty, so it gets written back. */ | |
1473 | /* Mark the mft record dirty, so it gets written back. */ | ||
1474 | mark_mft_record_dirty(ctx->ntfs_ino); | 1516 | mark_mft_record_dirty(ctx->ntfs_ino); |
1475 | ntfs_attr_put_search_ctx(ctx); | 1517 | ntfs_attr_put_search_ctx(ctx); |
1476 | unmap_mft_record(base_ni); | 1518 | unmap_mft_record(base_ni); |
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c index 12cf2e30c7dd..7a190cdc60e2 100644 --- a/fs/ntfs/bitmap.c +++ b/fs/ntfs/bitmap.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. | 2 | * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2004 Anton Altaparmakov | 4 | * Copyright (c) 2004-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
@@ -90,7 +90,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, | |||
90 | /* If the first byte is partial, modify the appropriate bits in it. */ | 90 | /* If the first byte is partial, modify the appropriate bits in it. */ |
91 | if (bit) { | 91 | if (bit) { |
92 | u8 *byte = kaddr + pos; | 92 | u8 *byte = kaddr + pos; |
93 | while ((bit & 7) && cnt--) { | 93 | while ((bit & 7) && cnt) { |
94 | cnt--; | ||
94 | if (value) | 95 | if (value) |
95 | *byte |= 1 << bit++; | 96 | *byte |= 1 << bit++; |
96 | else | 97 | else |
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index dc4bbe3acf5c..7ec045131808 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c | |||
@@ -1166,6 +1166,8 @@ err_out: | |||
1166 | * | 1166 | * |
1167 | * Return 0 on success and -errno on error. In the error case, the inode will | 1167 | * Return 0 on success and -errno on error. In the error case, the inode will |
1168 | * have had make_bad_inode() executed on it. | 1168 | * have had make_bad_inode() executed on it. |
1169 | * | ||
1170 | * Note this cannot be called for AT_INDEX_ALLOCATION. | ||
1169 | */ | 1171 | */ |
1170 | static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | 1172 | static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) |
1171 | { | 1173 | { |
@@ -1242,8 +1244,8 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | |||
1242 | } | 1244 | } |
1243 | } | 1245 | } |
1244 | /* | 1246 | /* |
1245 | * The encryption flag set in an index root just means to | 1247 | * The compressed/sparse flag set in an index root just means |
1246 | * compress all files. | 1248 | * to compress all files. |
1247 | */ | 1249 | */ |
1248 | if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) { | 1250 | if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) { |
1249 | ntfs_error(vi->i_sb, "Found mst protected attribute " | 1251 | ntfs_error(vi->i_sb, "Found mst protected attribute " |
@@ -1319,8 +1321,7 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) | |||
1319 | "the mapping pairs array."); | 1321 | "the mapping pairs array."); |
1320 | goto unm_err_out; | 1322 | goto unm_err_out; |
1321 | } | 1323 | } |
1322 | if ((NInoCompressed(ni) || NInoSparse(ni)) && | 1324 | if (NInoCompressed(ni) || NInoSparse(ni)) { |
1323 | ni->type != AT_INDEX_ROOT) { | ||
1324 | if (a->data.non_resident.compression_unit != 4) { | 1325 | if (a->data.non_resident.compression_unit != 4) { |
1325 | ntfs_error(vi->i_sb, "Found nonstandard " | 1326 | ntfs_error(vi->i_sb, "Found nonstandard " |
1326 | "compression unit (%u instead " | 1327 | "compression unit (%u instead " |
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h index 609ad1728ce4..5c248d404f05 100644 --- a/fs/ntfs/layout.h +++ b/fs/ntfs/layout.h | |||
@@ -123,7 +123,7 @@ enum { | |||
123 | magic_RCRD = const_cpu_to_le32(0x44524352), /* Log record page. */ | 123 | magic_RCRD = const_cpu_to_le32(0x44524352), /* Log record page. */ |
124 | 124 | ||
125 | /* Found in $LogFile/$DATA. (May be found in $MFT/$DATA, also?) */ | 125 | /* Found in $LogFile/$DATA. (May be found in $MFT/$DATA, also?) */ |
126 | magic_CHKD = const_cpu_to_le32(0x424b4843), /* Modified by chkdsk. */ | 126 | magic_CHKD = const_cpu_to_le32(0x444b4843), /* Modified by chkdsk. */ |
127 | 127 | ||
128 | /* Found in all ntfs record containing records. */ | 128 | /* Found in all ntfs record containing records. */ |
129 | magic_BAAD = const_cpu_to_le32(0x44414142), /* Failed multi sector | 129 | magic_BAAD = const_cpu_to_le32(0x44414142), /* Failed multi sector |
@@ -308,10 +308,8 @@ typedef le16 MFT_RECORD_FLAGS; | |||
308 | * The _LE versions are to be applied on little endian MFT_REFs. | 308 | * The _LE versions are to be applied on little endian MFT_REFs. |
309 | * Note: The _LE versions will return a CPU endian formatted value! | 309 | * Note: The _LE versions will return a CPU endian formatted value! |
310 | */ | 310 | */ |
311 | typedef enum { | 311 | #define MFT_REF_MASK_CPU 0x0000ffffffffffffULL |
312 | MFT_REF_MASK_CPU = 0x0000ffffffffffffULL, | 312 | #define MFT_REF_MASK_LE const_cpu_to_le64(MFT_REF_MASK_CPU) |
313 | MFT_REF_MASK_LE = const_cpu_to_le64(0x0000ffffffffffffULL), | ||
314 | } MFT_REF_CONSTS; | ||
315 | 313 | ||
316 | typedef u64 MFT_REF; | 314 | typedef u64 MFT_REF; |
317 | typedef le64 leMFT_REF; | 315 | typedef le64 leMFT_REF; |
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c index 7b5934290685..5af3bf0b7eee 100644 --- a/fs/ntfs/lcnalloc.c +++ b/fs/ntfs/lcnalloc.c | |||
@@ -779,14 +779,13 @@ out: | |||
779 | 779 | ||
780 | /** | 780 | /** |
781 | * __ntfs_cluster_free - free clusters on an ntfs volume | 781 | * __ntfs_cluster_free - free clusters on an ntfs volume |
782 | * @vi: vfs inode whose runlist describes the clusters to free | 782 | * @ni: ntfs inode whose runlist describes the clusters to free |
783 | * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters | 783 | * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters |
784 | * @count: number of clusters to free or -1 for all clusters | 784 | * @count: number of clusters to free or -1 for all clusters |
785 | * @write_locked: true if the runlist is locked for writing | ||
786 | * @is_rollback: true if this is a rollback operation | 785 | * @is_rollback: true if this is a rollback operation |
787 | * | 786 | * |
788 | * Free @count clusters starting at the cluster @start_vcn in the runlist | 787 | * Free @count clusters starting at the cluster @start_vcn in the runlist |
789 | * described by the vfs inode @vi. | 788 | * described by the vfs inode @ni. |
790 | * | 789 | * |
791 | * If @count is -1, all clusters from @start_vcn to the end of the runlist are | 790 | * If @count is -1, all clusters from @start_vcn to the end of the runlist are |
792 | * deallocated. Thus, to completely free all clusters in a runlist, use | 791 | * deallocated. Thus, to completely free all clusters in a runlist, use |
@@ -801,31 +800,28 @@ out: | |||
801 | * Return the number of deallocated clusters (not counting sparse ones) on | 800 | * Return the number of deallocated clusters (not counting sparse ones) on |
802 | * success and -errno on error. | 801 | * success and -errno on error. |
803 | * | 802 | * |
804 | * Locking: - The runlist described by @vi must be locked on entry and is | 803 | * Locking: - The runlist described by @ni must be locked for writing on entry |
805 | * locked on return. Note if the runlist is locked for reading the | 804 | * and is locked on return. Note the runlist may be modified when |
806 | * lock may be dropped and reacquired. Note the runlist may be | 805 | * needed runlist fragments need to be mapped. |
807 | * modified when needed runlist fragments need to be mapped. | ||
808 | * - The volume lcn bitmap must be unlocked on entry and is unlocked | 806 | * - The volume lcn bitmap must be unlocked on entry and is unlocked |
809 | * on return. | 807 | * on return. |
810 | * - This function takes the volume lcn bitmap lock for writing and | 808 | * - This function takes the volume lcn bitmap lock for writing and |
811 | * modifies the bitmap contents. | 809 | * modifies the bitmap contents. |
812 | */ | 810 | */ |
813 | s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, | 811 | s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count, |
814 | const BOOL write_locked, const BOOL is_rollback) | 812 | const BOOL is_rollback) |
815 | { | 813 | { |
816 | s64 delta, to_free, total_freed, real_freed; | 814 | s64 delta, to_free, total_freed, real_freed; |
817 | ntfs_inode *ni; | ||
818 | ntfs_volume *vol; | 815 | ntfs_volume *vol; |
819 | struct inode *lcnbmp_vi; | 816 | struct inode *lcnbmp_vi; |
820 | runlist_element *rl; | 817 | runlist_element *rl; |
821 | int err; | 818 | int err; |
822 | 819 | ||
823 | BUG_ON(!vi); | 820 | BUG_ON(!ni); |
824 | ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count " | 821 | ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count " |
825 | "0x%llx.%s", vi->i_ino, (unsigned long long)start_vcn, | 822 | "0x%llx.%s", ni->mft_no, (unsigned long long)start_vcn, |
826 | (unsigned long long)count, | 823 | (unsigned long long)count, |
827 | is_rollback ? " (rollback)" : ""); | 824 | is_rollback ? " (rollback)" : ""); |
828 | ni = NTFS_I(vi); | ||
829 | vol = ni->vol; | 825 | vol = ni->vol; |
830 | lcnbmp_vi = vol->lcnbmp_ino; | 826 | lcnbmp_vi = vol->lcnbmp_ino; |
831 | BUG_ON(!lcnbmp_vi); | 827 | BUG_ON(!lcnbmp_vi); |
@@ -843,7 +839,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, | |||
843 | 839 | ||
844 | total_freed = real_freed = 0; | 840 | total_freed = real_freed = 0; |
845 | 841 | ||
846 | rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, write_locked); | 842 | rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, TRUE); |
847 | if (IS_ERR(rl)) { | 843 | if (IS_ERR(rl)) { |
848 | if (!is_rollback) | 844 | if (!is_rollback) |
849 | ntfs_error(vol->sb, "Failed to find first runlist " | 845 | ntfs_error(vol->sb, "Failed to find first runlist " |
@@ -897,7 +893,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, | |||
897 | 893 | ||
898 | /* Attempt to map runlist. */ | 894 | /* Attempt to map runlist. */ |
899 | vcn = rl->vcn; | 895 | vcn = rl->vcn; |
900 | rl = ntfs_attr_find_vcn_nolock(ni, vcn, write_locked); | 896 | rl = ntfs_attr_find_vcn_nolock(ni, vcn, TRUE); |
901 | if (IS_ERR(rl)) { | 897 | if (IS_ERR(rl)) { |
902 | err = PTR_ERR(rl); | 898 | err = PTR_ERR(rl); |
903 | if (!is_rollback) | 899 | if (!is_rollback) |
@@ -965,8 +961,7 @@ err_out: | |||
965 | * If rollback fails, set the volume errors flag, emit an error | 961 | * If rollback fails, set the volume errors flag, emit an error |
966 | * message, and return the error code. | 962 | * message, and return the error code. |
967 | */ | 963 | */ |
968 | delta = __ntfs_cluster_free(vi, start_vcn, total_freed, write_locked, | 964 | delta = __ntfs_cluster_free(ni, start_vcn, total_freed, TRUE); |
969 | TRUE); | ||
970 | if (delta < 0) { | 965 | if (delta < 0) { |
971 | ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " | 966 | ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " |
972 | "inconsistent metadata! Unmount and run " | 967 | "inconsistent metadata! Unmount and run " |
diff --git a/fs/ntfs/lcnalloc.h b/fs/ntfs/lcnalloc.h index e4d7fb98d685..a6a8827882e7 100644 --- a/fs/ntfs/lcnalloc.h +++ b/fs/ntfs/lcnalloc.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * lcnalloc.h - Exports for NTFS kernel cluster (de)allocation. Part of the | 2 | * lcnalloc.h - Exports for NTFS kernel cluster (de)allocation. Part of the |
3 | * Linux-NTFS project. | 3 | * Linux-NTFS project. |
4 | * | 4 | * |
5 | * Copyright (c) 2004 Anton Altaparmakov | 5 | * Copyright (c) 2004-2005 Anton Altaparmakov |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License as published | 8 | * modify it under the terms of the GNU General Public License as published |
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
29 | 29 | ||
30 | #include "types.h" | 30 | #include "types.h" |
31 | #include "inode.h" | ||
31 | #include "runlist.h" | 32 | #include "runlist.h" |
32 | #include "volume.h" | 33 | #include "volume.h" |
33 | 34 | ||
@@ -42,18 +43,17 @@ extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, | |||
42 | const VCN start_vcn, const s64 count, const LCN start_lcn, | 43 | const VCN start_vcn, const s64 count, const LCN start_lcn, |
43 | const NTFS_CLUSTER_ALLOCATION_ZONES zone); | 44 | const NTFS_CLUSTER_ALLOCATION_ZONES zone); |
44 | 45 | ||
45 | extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, | 46 | extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, |
46 | s64 count, const BOOL write_locked, const BOOL is_rollback); | 47 | s64 count, const BOOL is_rollback); |
47 | 48 | ||
48 | /** | 49 | /** |
49 | * ntfs_cluster_free - free clusters on an ntfs volume | 50 | * ntfs_cluster_free - free clusters on an ntfs volume |
50 | * @vi: vfs inode whose runlist describes the clusters to free | 51 | * @ni: ntfs inode whose runlist describes the clusters to free |
51 | * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters | 52 | * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters |
52 | * @count: number of clusters to free or -1 for all clusters | 53 | * @count: number of clusters to free or -1 for all clusters |
53 | * @write_locked: true if the runlist is locked for writing | ||
54 | * | 54 | * |
55 | * Free @count clusters starting at the cluster @start_vcn in the runlist | 55 | * Free @count clusters starting at the cluster @start_vcn in the runlist |
56 | * described by the vfs inode @vi. | 56 | * described by the ntfs inode @ni. |
57 | * | 57 | * |
58 | * If @count is -1, all clusters from @start_vcn to the end of the runlist are | 58 | * If @count is -1, all clusters from @start_vcn to the end of the runlist are |
59 | * deallocated. Thus, to completely free all clusters in a runlist, use | 59 | * deallocated. Thus, to completely free all clusters in a runlist, use |
@@ -65,19 +65,18 @@ extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, | |||
65 | * Return the number of deallocated clusters (not counting sparse ones) on | 65 | * Return the number of deallocated clusters (not counting sparse ones) on |
66 | * success and -errno on error. | 66 | * success and -errno on error. |
67 | * | 67 | * |
68 | * Locking: - The runlist described by @vi must be locked on entry and is | 68 | * Locking: - The runlist described by @ni must be locked for writing on entry |
69 | * locked on return. Note if the runlist is locked for reading the | 69 | * and is locked on return. Note the runlist may be modified when |
70 | * lock may be dropped and reacquired. Note the runlist may be | 70 | * needed runlist fragments need to be mapped. |
71 | * modified when needed runlist fragments need to be mapped. | ||
72 | * - The volume lcn bitmap must be unlocked on entry and is unlocked | 71 | * - The volume lcn bitmap must be unlocked on entry and is unlocked |
73 | * on return. | 72 | * on return. |
74 | * - This function takes the volume lcn bitmap lock for writing and | 73 | * - This function takes the volume lcn bitmap lock for writing and |
75 | * modifies the bitmap contents. | 74 | * modifies the bitmap contents. |
76 | */ | 75 | */ |
77 | static inline s64 ntfs_cluster_free(struct inode *vi, const VCN start_vcn, | 76 | static inline s64 ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, |
78 | s64 count, const BOOL write_locked) | 77 | s64 count) |
79 | { | 78 | { |
80 | return __ntfs_cluster_free(vi, start_vcn, count, write_locked, FALSE); | 79 | return __ntfs_cluster_free(ni, start_vcn, count, FALSE); |
81 | } | 80 | } |
82 | 81 | ||
83 | extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, | 82 | extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, |
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index 0173e95500d9..0fd70295cca6 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c | |||
@@ -51,7 +51,8 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, | |||
51 | RESTART_PAGE_HEADER *rp, s64 pos) | 51 | RESTART_PAGE_HEADER *rp, s64 pos) |
52 | { | 52 | { |
53 | u32 logfile_system_page_size, logfile_log_page_size; | 53 | u32 logfile_system_page_size, logfile_log_page_size; |
54 | u16 usa_count, usa_ofs, usa_end, ra_ofs; | 54 | u16 ra_ofs, usa_count, usa_ofs, usa_end = 0; |
55 | BOOL have_usa = TRUE; | ||
55 | 56 | ||
56 | ntfs_debug("Entering."); | 57 | ntfs_debug("Entering."); |
57 | /* | 58 | /* |
@@ -86,6 +87,14 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, | |||
86 | (int)sle16_to_cpu(rp->minor_ver)); | 87 | (int)sle16_to_cpu(rp->minor_ver)); |
87 | return FALSE; | 88 | return FALSE; |
88 | } | 89 | } |
90 | /* | ||
91 | * If chkdsk has been run the restart page may not be protected by an | ||
92 | * update sequence array. | ||
93 | */ | ||
94 | if (ntfs_is_chkd_record(rp->magic) && !le16_to_cpu(rp->usa_count)) { | ||
95 | have_usa = FALSE; | ||
96 | goto skip_usa_checks; | ||
97 | } | ||
89 | /* Verify the size of the update sequence array. */ | 98 | /* Verify the size of the update sequence array. */ |
90 | usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS); | 99 | usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS); |
91 | if (usa_count != le16_to_cpu(rp->usa_count)) { | 100 | if (usa_count != le16_to_cpu(rp->usa_count)) { |
@@ -102,6 +111,7 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, | |||
102 | "inconsistent update sequence array offset."); | 111 | "inconsistent update sequence array offset."); |
103 | return FALSE; | 112 | return FALSE; |
104 | } | 113 | } |
114 | skip_usa_checks: | ||
105 | /* | 115 | /* |
106 | * Verify the position of the restart area. It must be: | 116 | * Verify the position of the restart area. It must be: |
107 | * - aligned to 8-byte boundary, | 117 | * - aligned to 8-byte boundary, |
@@ -109,7 +119,8 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, | |||
109 | * - within the system page size. | 119 | * - within the system page size. |
110 | */ | 120 | */ |
111 | ra_ofs = le16_to_cpu(rp->restart_area_offset); | 121 | ra_ofs = le16_to_cpu(rp->restart_area_offset); |
112 | if (ra_ofs & 7 || ra_ofs < usa_end || | 122 | if (ra_ofs & 7 || (have_usa ? ra_ofs < usa_end : |
123 | ra_ofs < sizeof(RESTART_PAGE_HEADER)) || | ||
113 | ra_ofs > logfile_system_page_size) { | 124 | ra_ofs > logfile_system_page_size) { |
114 | ntfs_error(vi->i_sb, "$LogFile restart page specifies " | 125 | ntfs_error(vi->i_sb, "$LogFile restart page specifies " |
115 | "inconsistent restart area offset."); | 126 | "inconsistent restart area offset."); |
@@ -402,8 +413,12 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, | |||
402 | idx++; | 413 | idx++; |
403 | } while (to_read > 0); | 414 | } while (to_read > 0); |
404 | } | 415 | } |
405 | /* Perform the multi sector transfer deprotection on the buffer. */ | 416 | /* |
406 | if (post_read_mst_fixup((NTFS_RECORD*)trp, | 417 | * Perform the multi sector transfer deprotection on the buffer if the |
418 | * restart page is protected. | ||
419 | */ | ||
420 | if ((!ntfs_is_chkd_record(trp->magic) || le16_to_cpu(trp->usa_count)) | ||
421 | && post_read_mst_fixup((NTFS_RECORD*)trp, | ||
407 | le32_to_cpu(rp->system_page_size))) { | 422 | le32_to_cpu(rp->system_page_size))) { |
408 | /* | 423 | /* |
409 | * A multi sector tranfer error was detected. We only need to | 424 | * A multi sector tranfer error was detected. We only need to |
@@ -615,11 +630,16 @@ is_empty: | |||
615 | * Otherwise just throw it away. | 630 | * Otherwise just throw it away. |
616 | */ | 631 | */ |
617 | if (rstr2_lsn > rstr1_lsn) { | 632 | if (rstr2_lsn > rstr1_lsn) { |
633 | ntfs_debug("Using second restart page as it is more " | ||
634 | "recent."); | ||
618 | ntfs_free(rstr1_ph); | 635 | ntfs_free(rstr1_ph); |
619 | rstr1_ph = rstr2_ph; | 636 | rstr1_ph = rstr2_ph; |
620 | /* rstr1_lsn = rstr2_lsn; */ | 637 | /* rstr1_lsn = rstr2_lsn; */ |
621 | } else | 638 | } else { |
639 | ntfs_debug("Using first restart page as it is more " | ||
640 | "recent."); | ||
622 | ntfs_free(rstr2_ph); | 641 | ntfs_free(rstr2_ph); |
642 | } | ||
623 | rstr2_ph = NULL; | 643 | rstr2_ph = NULL; |
624 | } | 644 | } |
625 | /* All consistency checks passed. */ | 645 | /* All consistency checks passed. */ |
diff --git a/fs/ntfs/logfile.h b/fs/ntfs/logfile.h index 42388f95ea6d..a51f3dd0e9eb 100644 --- a/fs/ntfs/logfile.h +++ b/fs/ntfs/logfile.h | |||
@@ -113,7 +113,7 @@ typedef struct { | |||
113 | */ | 113 | */ |
114 | enum { | 114 | enum { |
115 | RESTART_VOLUME_IS_CLEAN = const_cpu_to_le16(0x0002), | 115 | RESTART_VOLUME_IS_CLEAN = const_cpu_to_le16(0x0002), |
116 | RESTART_SPACE_FILLER = 0xffff, /* gcc: Force enum bit width to 16. */ | 116 | RESTART_SPACE_FILLER = const_cpu_to_le16(0xffff), /* gcc: Force enum bit width to 16. */ |
117 | } __attribute__ ((__packed__)); | 117 | } __attribute__ ((__packed__)); |
118 | 118 | ||
119 | typedef le16 RESTART_AREA_FLAGS; | 119 | typedef le16 RESTART_AREA_FLAGS; |
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h index 3288bcc2c4aa..590887b943f5 100644 --- a/fs/ntfs/malloc.h +++ b/fs/ntfs/malloc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project. | 2 | * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2004 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
@@ -40,7 +40,7 @@ | |||
40 | * Depending on @gfp_mask the allocation may be guaranteed to succeed. | 40 | * Depending on @gfp_mask the allocation may be guaranteed to succeed. |
41 | */ | 41 | */ |
42 | static inline void *__ntfs_malloc(unsigned long size, | 42 | static inline void *__ntfs_malloc(unsigned long size, |
43 | unsigned int __nocast gfp_mask) | 43 | gfp_t gfp_mask) |
44 | { | 44 | { |
45 | if (likely(size <= PAGE_SIZE)) { | 45 | if (likely(size <= PAGE_SIZE)) { |
46 | BUG_ON(!size); | 46 | BUG_ON(!size); |
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 2c32b84385a8..b011369b5956 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c | |||
@@ -58,7 +58,8 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni) | |||
58 | * overflowing the unsigned long, but I don't think we would ever get | 58 | * overflowing the unsigned long, but I don't think we would ever get |
59 | * here if the volume was that big... | 59 | * here if the volume was that big... |
60 | */ | 60 | */ |
61 | index = ni->mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; | 61 | index = (u64)ni->mft_no << vol->mft_record_size_bits >> |
62 | PAGE_CACHE_SHIFT; | ||
62 | ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; | 63 | ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; |
63 | 64 | ||
64 | i_size = i_size_read(mft_vi); | 65 | i_size = i_size_read(mft_vi); |
@@ -1953,7 +1954,7 @@ restore_undo_alloc: | |||
1953 | a = ctx->attr; | 1954 | a = ctx->attr; |
1954 | a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1); | 1955 | a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1); |
1955 | undo_alloc: | 1956 | undo_alloc: |
1956 | if (ntfs_cluster_free(vol->mft_ino, old_last_vcn, -1, TRUE) < 0) { | 1957 | if (ntfs_cluster_free(mft_ni, old_last_vcn, -1) < 0) { |
1957 | ntfs_error(vol->sb, "Failed to free clusters from mft data " | 1958 | ntfs_error(vol->sb, "Failed to free clusters from mft data " |
1958 | "attribute.%s", es); | 1959 | "attribute.%s", es); |
1959 | NVolSetErrors(vol); | 1960 | NVolSetErrors(vol); |
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c index f5b2ac929081..061b5ff6b73c 100644 --- a/fs/ntfs/runlist.c +++ b/fs/ntfs/runlist.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. | 2 | * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2005 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * Copyright (c) 2002 Richard Russon | 5 | * Copyright (c) 2002-2005 Richard Russon |
6 | * | 6 | * |
7 | * This program/include file is free software; you can redistribute it and/or | 7 | * This program/include file is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License as published | 8 | * modify it under the terms of the GNU General Public License as published |
@@ -158,17 +158,21 @@ static inline BOOL ntfs_are_rl_mergeable(runlist_element *dst, | |||
158 | BUG_ON(!dst); | 158 | BUG_ON(!dst); |
159 | BUG_ON(!src); | 159 | BUG_ON(!src); |
160 | 160 | ||
161 | if ((dst->lcn < 0) || (src->lcn < 0)) { /* Are we merging holes? */ | 161 | /* We can merge unmapped regions even if they are misaligned. */ |
162 | if (dst->lcn == LCN_HOLE && src->lcn == LCN_HOLE) | 162 | if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED)) |
163 | return TRUE; | 163 | return TRUE; |
164 | /* If the runs are misaligned, we cannot merge them. */ | ||
165 | if ((dst->vcn + dst->length) != src->vcn) | ||
164 | return FALSE; | 166 | return FALSE; |
165 | } | 167 | /* If both runs are non-sparse and contiguous, we can merge them. */ |
166 | if ((dst->lcn + dst->length) != src->lcn) /* Are the runs contiguous? */ | 168 | if ((dst->lcn >= 0) && (src->lcn >= 0) && |
167 | return FALSE; | 169 | ((dst->lcn + dst->length) == src->lcn)) |
168 | if ((dst->vcn + dst->length) != src->vcn) /* Are the runs misaligned? */ | 170 | return TRUE; |
169 | return FALSE; | 171 | /* If we are merging two holes, we can merge them. */ |
170 | 172 | if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE)) | |
171 | return TRUE; | 173 | return TRUE; |
174 | /* Cannot merge. */ | ||
175 | return FALSE; | ||
172 | } | 176 | } |
173 | 177 | ||
174 | /** | 178 | /** |
@@ -214,14 +218,15 @@ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src) | |||
214 | static inline runlist_element *ntfs_rl_append(runlist_element *dst, | 218 | static inline runlist_element *ntfs_rl_append(runlist_element *dst, |
215 | int dsize, runlist_element *src, int ssize, int loc) | 219 | int dsize, runlist_element *src, int ssize, int loc) |
216 | { | 220 | { |
217 | BOOL right; | 221 | BOOL right = FALSE; /* Right end of @src needs merging. */ |
218 | int magic; | 222 | int marker; /* End of the inserted runs. */ |
219 | 223 | ||
220 | BUG_ON(!dst); | 224 | BUG_ON(!dst); |
221 | BUG_ON(!src); | 225 | BUG_ON(!src); |
222 | 226 | ||
223 | /* First, check if the right hand end needs merging. */ | 227 | /* First, check if the right hand end needs merging. */ |
224 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | 228 | if ((loc + 1) < dsize) |
229 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | ||
225 | 230 | ||
226 | /* Space required: @dst size + @src size, less one if we merged. */ | 231 | /* Space required: @dst size + @src size, less one if we merged. */ |
227 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); | 232 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right); |
@@ -236,18 +241,19 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst, | |||
236 | if (right) | 241 | if (right) |
237 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); | 242 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); |
238 | 243 | ||
239 | magic = loc + ssize; | 244 | /* First run after the @src runs that have been inserted. */ |
245 | marker = loc + ssize + 1; | ||
240 | 246 | ||
241 | /* Move the tail of @dst out of the way, then copy in @src. */ | 247 | /* Move the tail of @dst out of the way, then copy in @src. */ |
242 | ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right); | 248 | ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right)); |
243 | ntfs_rl_mc(dst, loc + 1, src, 0, ssize); | 249 | ntfs_rl_mc(dst, loc + 1, src, 0, ssize); |
244 | 250 | ||
245 | /* Adjust the size of the preceding hole. */ | 251 | /* Adjust the size of the preceding hole. */ |
246 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; | 252 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; |
247 | 253 | ||
248 | /* We may have changed the length of the file, so fix the end marker */ | 254 | /* We may have changed the length of the file, so fix the end marker */ |
249 | if (dst[magic + 1].lcn == LCN_ENOENT) | 255 | if (dst[marker].lcn == LCN_ENOENT) |
250 | dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length; | 256 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
251 | 257 | ||
252 | return dst; | 258 | return dst; |
253 | } | 259 | } |
@@ -279,18 +285,17 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst, | |||
279 | static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | 285 | static inline runlist_element *ntfs_rl_insert(runlist_element *dst, |
280 | int dsize, runlist_element *src, int ssize, int loc) | 286 | int dsize, runlist_element *src, int ssize, int loc) |
281 | { | 287 | { |
282 | BOOL left = FALSE; | 288 | BOOL left = FALSE; /* Left end of @src needs merging. */ |
283 | BOOL disc = FALSE; /* Discontinuity */ | 289 | BOOL disc = FALSE; /* Discontinuity between @dst and @src. */ |
284 | BOOL hole = FALSE; /* Following a hole */ | 290 | int marker; /* End of the inserted runs. */ |
285 | int magic; | ||
286 | 291 | ||
287 | BUG_ON(!dst); | 292 | BUG_ON(!dst); |
288 | BUG_ON(!src); | 293 | BUG_ON(!src); |
289 | 294 | ||
290 | /* disc => Discontinuity between the end of @dst and the start of @src. | 295 | /* |
291 | * This means we might need to insert a hole. | 296 | * disc => Discontinuity between the end of @dst and the start of @src. |
292 | * hole => @dst ends with a hole or an unmapped region which we can | 297 | * This means we might need to insert a "not mapped" run. |
293 | * extend to match the discontinuity. */ | 298 | */ |
294 | if (loc == 0) | 299 | if (loc == 0) |
295 | disc = (src[0].vcn > 0); | 300 | disc = (src[0].vcn > 0); |
296 | else { | 301 | else { |
@@ -303,58 +308,49 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | |||
303 | merged_length += src->length; | 308 | merged_length += src->length; |
304 | 309 | ||
305 | disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); | 310 | disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); |
306 | if (disc) | ||
307 | hole = (dst[loc - 1].lcn == LCN_HOLE); | ||
308 | } | 311 | } |
309 | 312 | /* | |
310 | /* Space required: @dst size + @src size, less one if we merged, plus | 313 | * Space required: @dst size + @src size, less one if we merged, plus |
311 | * one if there was a discontinuity, less one for a trailing hole. */ | 314 | * one if there was a discontinuity. |
312 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole); | 315 | */ |
316 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc); | ||
313 | if (IS_ERR(dst)) | 317 | if (IS_ERR(dst)) |
314 | return dst; | 318 | return dst; |
315 | /* | 319 | /* |
316 | * We are guaranteed to succeed from here so can start modifying the | 320 | * We are guaranteed to succeed from here so can start modifying the |
317 | * original runlist. | 321 | * original runlist. |
318 | */ | 322 | */ |
319 | |||
320 | if (left) | 323 | if (left) |
321 | __ntfs_rl_merge(dst + loc - 1, src); | 324 | __ntfs_rl_merge(dst + loc - 1, src); |
322 | 325 | /* | |
323 | magic = loc + ssize - left + disc - hole; | 326 | * First run after the @src runs that have been inserted. |
327 | * Nominally, @marker equals @loc + @ssize, i.e. location + number of | ||
328 | * runs in @src. However, if @left, then the first run in @src has | ||
329 | * been merged with one in @dst. And if @disc, then @dst and @src do | ||
330 | * not meet and we need an extra run to fill the gap. | ||
331 | */ | ||
332 | marker = loc + ssize - left + disc; | ||
324 | 333 | ||
325 | /* Move the tail of @dst out of the way, then copy in @src. */ | 334 | /* Move the tail of @dst out of the way, then copy in @src. */ |
326 | ntfs_rl_mm(dst, magic, loc, dsize - loc); | 335 | ntfs_rl_mm(dst, marker, loc, dsize - loc); |
327 | ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left); | 336 | ntfs_rl_mc(dst, loc + disc, src, left, ssize - left); |
328 | 337 | ||
329 | /* Adjust the VCN of the last run ... */ | 338 | /* Adjust the VCN of the first run after the insertion... */ |
330 | if (dst[magic].lcn <= LCN_HOLE) | 339 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
331 | dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; | ||
332 | /* ... and the length. */ | 340 | /* ... and the length. */ |
333 | if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED) | 341 | if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED) |
334 | dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn; | 342 | dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; |
335 | 343 | ||
336 | /* Writing beyond the end of the file and there's a discontinuity. */ | 344 | /* Writing beyond the end of the file and there is a discontinuity. */ |
337 | if (disc) { | 345 | if (disc) { |
338 | if (hole) | 346 | if (loc > 0) { |
339 | dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn; | 347 | dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; |
340 | else { | 348 | dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; |
341 | if (loc > 0) { | 349 | } else { |
342 | dst[loc].vcn = dst[loc - 1].vcn + | 350 | dst[loc].vcn = 0; |
343 | dst[loc - 1].length; | 351 | dst[loc].length = dst[loc + 1].vcn; |
344 | dst[loc].length = dst[loc + 1].vcn - | ||
345 | dst[loc].vcn; | ||
346 | } else { | ||
347 | dst[loc].vcn = 0; | ||
348 | dst[loc].length = dst[loc + 1].vcn; | ||
349 | } | ||
350 | dst[loc].lcn = LCN_RL_NOT_MAPPED; | ||
351 | } | 352 | } |
352 | 353 | dst[loc].lcn = LCN_RL_NOT_MAPPED; | |
353 | magic += hole; | ||
354 | |||
355 | if (dst[magic].lcn == LCN_ENOENT) | ||
356 | dst[magic].vcn = dst[magic - 1].vcn + | ||
357 | dst[magic - 1].length; | ||
358 | } | 354 | } |
359 | return dst; | 355 | return dst; |
360 | } | 356 | } |
@@ -385,20 +381,23 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst, | |||
385 | static inline runlist_element *ntfs_rl_replace(runlist_element *dst, | 381 | static inline runlist_element *ntfs_rl_replace(runlist_element *dst, |
386 | int dsize, runlist_element *src, int ssize, int loc) | 382 | int dsize, runlist_element *src, int ssize, int loc) |
387 | { | 383 | { |
388 | BOOL left = FALSE; | 384 | BOOL left = FALSE; /* Left end of @src needs merging. */ |
389 | BOOL right; | 385 | BOOL right = FALSE; /* Right end of @src needs merging. */ |
390 | int magic; | 386 | int tail; /* Start of tail of @dst. */ |
387 | int marker; /* End of the inserted runs. */ | ||
391 | 388 | ||
392 | BUG_ON(!dst); | 389 | BUG_ON(!dst); |
393 | BUG_ON(!src); | 390 | BUG_ON(!src); |
394 | 391 | ||
395 | /* First, merge the left and right ends, if necessary. */ | 392 | /* First, see if the left and right ends need merging. */ |
396 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | 393 | if ((loc + 1) < dsize) |
394 | right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); | ||
397 | if (loc > 0) | 395 | if (loc > 0) |
398 | left = ntfs_are_rl_mergeable(dst + loc - 1, src); | 396 | left = ntfs_are_rl_mergeable(dst + loc - 1, src); |
399 | 397 | /* | |
400 | /* Allocate some space. We'll need less if the left, right, or both | 398 | * Allocate some space. We will need less if the left, right, or both |
401 | * ends were merged. */ | 399 | * ends get merged. |
400 | */ | ||
402 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); | 401 | dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); |
403 | if (IS_ERR(dst)) | 402 | if (IS_ERR(dst)) |
404 | return dst; | 403 | return dst; |
@@ -406,21 +405,37 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst, | |||
406 | * We are guaranteed to succeed from here so can start modifying the | 405 | * We are guaranteed to succeed from here so can start modifying the |
407 | * original runlists. | 406 | * original runlists. |
408 | */ | 407 | */ |
408 | |||
409 | /* First, merge the left and right ends, if necessary. */ | ||
409 | if (right) | 410 | if (right) |
410 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); | 411 | __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); |
411 | if (left) | 412 | if (left) |
412 | __ntfs_rl_merge(dst + loc - 1, src); | 413 | __ntfs_rl_merge(dst + loc - 1, src); |
413 | 414 | /* | |
414 | /* FIXME: What does this mean? (AIA) */ | 415 | * Offset of the tail of @dst. This needs to be moved out of the way |
415 | magic = loc + ssize - left; | 416 | * to make space for the runs to be copied from @src, i.e. the first |
417 | * run of the tail of @dst. | ||
418 | * Nominally, @tail equals @loc + 1, i.e. location, skipping the | ||
419 | * replaced run. However, if @right, then one of @dst's runs is | ||
420 | * already merged into @src. | ||
421 | */ | ||
422 | tail = loc + right + 1; | ||
423 | /* | ||
424 | * First run after the @src runs that have been inserted, i.e. where | ||
425 | * the tail of @dst needs to be moved to. | ||
426 | * Nominally, @marker equals @loc + @ssize, i.e. location + number of | ||
427 | * runs in @src. However, if @left, then the first run in @src has | ||
428 | * been merged with one in @dst. | ||
429 | */ | ||
430 | marker = loc + ssize - left; | ||
416 | 431 | ||
417 | /* Move the tail of @dst out of the way, then copy in @src. */ | 432 | /* Move the tail of @dst out of the way, then copy in @src. */ |
418 | ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1); | 433 | ntfs_rl_mm(dst, marker, tail, dsize - tail); |
419 | ntfs_rl_mc(dst, loc, src, left, ssize - left); | 434 | ntfs_rl_mc(dst, loc, src, left, ssize - left); |
420 | 435 | ||
421 | /* We may have changed the length of the file, so fix the end marker */ | 436 | /* We may have changed the length of the file, so fix the end marker. */ |
422 | if (dst[magic].lcn == LCN_ENOENT) | 437 | if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT) |
423 | dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; | 438 | dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; |
424 | return dst; | 439 | return dst; |
425 | } | 440 | } |
426 | 441 | ||
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c index a389a5a16c84..0ea887fc859c 100644 --- a/fs/ntfs/unistr.c +++ b/fs/ntfs/unistr.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. | 2 | * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2004 Anton Altaparmakov | 4 | * Copyright (c) 2001-2005 Anton Altaparmakov |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 296480e96dd5..6c8dcf7613fd 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c | |||
@@ -35,7 +35,7 @@ EXPORT_SYMBOL(posix_acl_permission); | |||
35 | * Allocate a new ACL with the specified number of entries. | 35 | * Allocate a new ACL with the specified number of entries. |
36 | */ | 36 | */ |
37 | struct posix_acl * | 37 | struct posix_acl * |
38 | posix_acl_alloc(int count, unsigned int __nocast flags) | 38 | posix_acl_alloc(int count, gfp_t flags) |
39 | { | 39 | { |
40 | const size_t size = sizeof(struct posix_acl) + | 40 | const size_t size = sizeof(struct posix_acl) + |
41 | count * sizeof(struct posix_acl_entry); | 41 | count * sizeof(struct posix_acl_entry); |
@@ -51,7 +51,7 @@ posix_acl_alloc(int count, unsigned int __nocast flags) | |||
51 | * Clone an ACL. | 51 | * Clone an ACL. |
52 | */ | 52 | */ |
53 | struct posix_acl * | 53 | struct posix_acl * |
54 | posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags) | 54 | posix_acl_clone(const struct posix_acl *acl, gfp_t flags) |
55 | { | 55 | { |
56 | struct posix_acl *clone = NULL; | 56 | struct posix_acl *clone = NULL; |
57 | 57 | ||
@@ -185,7 +185,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p) | |||
185 | * Create an ACL representing the file mode permission bits of an inode. | 185 | * Create an ACL representing the file mode permission bits of an inode. |
186 | */ | 186 | */ |
187 | struct posix_acl * | 187 | struct posix_acl * |
188 | posix_acl_from_mode(mode_t mode, unsigned int __nocast flags) | 188 | posix_acl_from_mode(mode_t mode, gfp_t flags) |
189 | { | 189 | { |
190 | struct posix_acl *acl = posix_acl_alloc(3, flags); | 190 | struct posix_acl *acl = posix_acl_alloc(3, flags); |
191 | if (!acl) | 191 | if (!acl) |
diff --git a/fs/proc/array.c b/fs/proc/array.c index d88d518d30f6..d84eecacbeaf 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -74,6 +74,7 @@ | |||
74 | #include <linux/file.h> | 74 | #include <linux/file.h> |
75 | #include <linux/times.h> | 75 | #include <linux/times.h> |
76 | #include <linux/cpuset.h> | 76 | #include <linux/cpuset.h> |
77 | #include <linux/rcupdate.h> | ||
77 | 78 | ||
78 | #include <asm/uaccess.h> | 79 | #include <asm/uaccess.h> |
79 | #include <asm/pgtable.h> | 80 | #include <asm/pgtable.h> |
@@ -180,12 +181,14 @@ static inline char * task_state(struct task_struct *p, char *buffer) | |||
180 | p->gid, p->egid, p->sgid, p->fsgid); | 181 | p->gid, p->egid, p->sgid, p->fsgid); |
181 | read_unlock(&tasklist_lock); | 182 | read_unlock(&tasklist_lock); |
182 | task_lock(p); | 183 | task_lock(p); |
184 | rcu_read_lock(); | ||
183 | if (p->files) | 185 | if (p->files) |
184 | fdt = files_fdtable(p->files); | 186 | fdt = files_fdtable(p->files); |
185 | buffer += sprintf(buffer, | 187 | buffer += sprintf(buffer, |
186 | "FDSize:\t%d\n" | 188 | "FDSize:\t%d\n" |
187 | "Groups:\t", | 189 | "Groups:\t", |
188 | fdt ? fdt->max_fds : 0); | 190 | fdt ? fdt->max_fds : 0); |
191 | rcu_read_unlock(); | ||
189 | 192 | ||
190 | group_info = p->group_info; | 193 | group_info = p->group_info; |
191 | get_group_info(group_info); | 194 | get_group_info(group_info); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 23db452ab428..a170450aadb1 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -103,7 +103,9 @@ enum pid_directory_inos { | |||
103 | PROC_TGID_NUMA_MAPS, | 103 | PROC_TGID_NUMA_MAPS, |
104 | PROC_TGID_MOUNTS, | 104 | PROC_TGID_MOUNTS, |
105 | PROC_TGID_WCHAN, | 105 | PROC_TGID_WCHAN, |
106 | #ifdef CONFIG_MMU | ||
106 | PROC_TGID_SMAPS, | 107 | PROC_TGID_SMAPS, |
108 | #endif | ||
107 | #ifdef CONFIG_SCHEDSTATS | 109 | #ifdef CONFIG_SCHEDSTATS |
108 | PROC_TGID_SCHEDSTAT, | 110 | PROC_TGID_SCHEDSTAT, |
109 | #endif | 111 | #endif |
@@ -141,7 +143,9 @@ enum pid_directory_inos { | |||
141 | PROC_TID_NUMA_MAPS, | 143 | PROC_TID_NUMA_MAPS, |
142 | PROC_TID_MOUNTS, | 144 | PROC_TID_MOUNTS, |
143 | PROC_TID_WCHAN, | 145 | PROC_TID_WCHAN, |
146 | #ifdef CONFIG_MMU | ||
144 | PROC_TID_SMAPS, | 147 | PROC_TID_SMAPS, |
148 | #endif | ||
145 | #ifdef CONFIG_SCHEDSTATS | 149 | #ifdef CONFIG_SCHEDSTATS |
146 | PROC_TID_SCHEDSTAT, | 150 | PROC_TID_SCHEDSTAT, |
147 | #endif | 151 | #endif |
@@ -195,7 +199,9 @@ static struct pid_entry tgid_base_stuff[] = { | |||
195 | E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO), | 199 | E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO), |
196 | E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO), | 200 | E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO), |
197 | E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO), | 201 | E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO), |
202 | #ifdef CONFIG_MMU | ||
198 | E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUGO), | 203 | E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUGO), |
204 | #endif | ||
199 | #ifdef CONFIG_SECURITY | 205 | #ifdef CONFIG_SECURITY |
200 | E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO), | 206 | E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO), |
201 | #endif | 207 | #endif |
@@ -235,7 +241,9 @@ static struct pid_entry tid_base_stuff[] = { | |||
235 | E(PROC_TID_ROOT, "root", S_IFLNK|S_IRWXUGO), | 241 | E(PROC_TID_ROOT, "root", S_IFLNK|S_IRWXUGO), |
236 | E(PROC_TID_EXE, "exe", S_IFLNK|S_IRWXUGO), | 242 | E(PROC_TID_EXE, "exe", S_IFLNK|S_IRWXUGO), |
237 | E(PROC_TID_MOUNTS, "mounts", S_IFREG|S_IRUGO), | 243 | E(PROC_TID_MOUNTS, "mounts", S_IFREG|S_IRUGO), |
244 | #ifdef CONFIG_MMU | ||
238 | E(PROC_TID_SMAPS, "smaps", S_IFREG|S_IRUGO), | 245 | E(PROC_TID_SMAPS, "smaps", S_IFREG|S_IRUGO), |
246 | #endif | ||
239 | #ifdef CONFIG_SECURITY | 247 | #ifdef CONFIG_SECURITY |
240 | E(PROC_TID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO), | 248 | E(PROC_TID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO), |
241 | #endif | 249 | #endif |
@@ -340,6 +348,54 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf | |||
340 | return result; | 348 | return result; |
341 | } | 349 | } |
342 | 350 | ||
351 | |||
352 | /* Same as proc_root_link, but this addionally tries to get fs from other | ||
353 | * threads in the group */ | ||
354 | static int proc_task_root_link(struct inode *inode, struct dentry **dentry, | ||
355 | struct vfsmount **mnt) | ||
356 | { | ||
357 | struct fs_struct *fs; | ||
358 | int result = -ENOENT; | ||
359 | struct task_struct *leader = proc_task(inode); | ||
360 | |||
361 | task_lock(leader); | ||
362 | fs = leader->fs; | ||
363 | if (fs) { | ||
364 | atomic_inc(&fs->count); | ||
365 | task_unlock(leader); | ||
366 | } else { | ||
367 | /* Try to get fs from other threads */ | ||
368 | task_unlock(leader); | ||
369 | read_lock(&tasklist_lock); | ||
370 | if (pid_alive(leader)) { | ||
371 | struct task_struct *task = leader; | ||
372 | |||
373 | while ((task = next_thread(task)) != leader) { | ||
374 | task_lock(task); | ||
375 | fs = task->fs; | ||
376 | if (fs) { | ||
377 | atomic_inc(&fs->count); | ||
378 | task_unlock(task); | ||
379 | break; | ||
380 | } | ||
381 | task_unlock(task); | ||
382 | } | ||
383 | } | ||
384 | read_unlock(&tasklist_lock); | ||
385 | } | ||
386 | |||
387 | if (fs) { | ||
388 | read_lock(&fs->lock); | ||
389 | *mnt = mntget(fs->rootmnt); | ||
390 | *dentry = dget(fs->root); | ||
391 | read_unlock(&fs->lock); | ||
392 | result = 0; | ||
393 | put_fs_struct(fs); | ||
394 | } | ||
395 | return result; | ||
396 | } | ||
397 | |||
398 | |||
343 | #define MAY_PTRACE(task) \ | 399 | #define MAY_PTRACE(task) \ |
344 | (task == current || \ | 400 | (task == current || \ |
345 | (task->parent == current && \ | 401 | (task->parent == current && \ |
@@ -471,14 +527,14 @@ static int proc_oom_score(struct task_struct *task, char *buffer) | |||
471 | 527 | ||
472 | /* permission checks */ | 528 | /* permission checks */ |
473 | 529 | ||
474 | static int proc_check_root(struct inode *inode) | 530 | /* If the process being read is separated by chroot from the reading process, |
531 | * don't let the reader access the threads. | ||
532 | */ | ||
533 | static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt) | ||
475 | { | 534 | { |
476 | struct dentry *de, *base, *root; | 535 | struct dentry *de, *base; |
477 | struct vfsmount *our_vfsmnt, *vfsmnt, *mnt; | 536 | struct vfsmount *our_vfsmnt, *mnt; |
478 | int res = 0; | 537 | int res = 0; |
479 | |||
480 | if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */ | ||
481 | return -ENOENT; | ||
482 | read_lock(¤t->fs->lock); | 538 | read_lock(¤t->fs->lock); |
483 | our_vfsmnt = mntget(current->fs->rootmnt); | 539 | our_vfsmnt = mntget(current->fs->rootmnt); |
484 | base = dget(current->fs->root); | 540 | base = dget(current->fs->root); |
@@ -511,6 +567,16 @@ out: | |||
511 | goto exit; | 567 | goto exit; |
512 | } | 568 | } |
513 | 569 | ||
570 | static int proc_check_root(struct inode *inode) | ||
571 | { | ||
572 | struct dentry *root; | ||
573 | struct vfsmount *vfsmnt; | ||
574 | |||
575 | if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */ | ||
576 | return -ENOENT; | ||
577 | return proc_check_chroot(root, vfsmnt); | ||
578 | } | ||
579 | |||
514 | static int proc_permission(struct inode *inode, int mask, struct nameidata *nd) | 580 | static int proc_permission(struct inode *inode, int mask, struct nameidata *nd) |
515 | { | 581 | { |
516 | if (generic_permission(inode, mask, NULL) != 0) | 582 | if (generic_permission(inode, mask, NULL) != 0) |
@@ -518,6 +584,20 @@ static int proc_permission(struct inode *inode, int mask, struct nameidata *nd) | |||
518 | return proc_check_root(inode); | 584 | return proc_check_root(inode); |
519 | } | 585 | } |
520 | 586 | ||
587 | static int proc_task_permission(struct inode *inode, int mask, struct nameidata *nd) | ||
588 | { | ||
589 | struct dentry *root; | ||
590 | struct vfsmount *vfsmnt; | ||
591 | |||
592 | if (generic_permission(inode, mask, NULL) != 0) | ||
593 | return -EACCES; | ||
594 | |||
595 | if (proc_task_root_link(inode, &root, &vfsmnt)) | ||
596 | return -ENOENT; | ||
597 | |||
598 | return proc_check_chroot(root, vfsmnt); | ||
599 | } | ||
600 | |||
521 | extern struct seq_operations proc_pid_maps_op; | 601 | extern struct seq_operations proc_pid_maps_op; |
522 | static int maps_open(struct inode *inode, struct file *file) | 602 | static int maps_open(struct inode *inode, struct file *file) |
523 | { | 603 | { |
@@ -558,6 +638,7 @@ static struct file_operations proc_numa_maps_operations = { | |||
558 | }; | 638 | }; |
559 | #endif | 639 | #endif |
560 | 640 | ||
641 | #ifdef CONFIG_MMU | ||
561 | extern struct seq_operations proc_pid_smaps_op; | 642 | extern struct seq_operations proc_pid_smaps_op; |
562 | static int smaps_open(struct inode *inode, struct file *file) | 643 | static int smaps_open(struct inode *inode, struct file *file) |
563 | { | 644 | { |
@@ -576,6 +657,7 @@ static struct file_operations proc_smaps_operations = { | |||
576 | .llseek = seq_lseek, | 657 | .llseek = seq_lseek, |
577 | .release = seq_release, | 658 | .release = seq_release, |
578 | }; | 659 | }; |
660 | #endif | ||
579 | 661 | ||
580 | extern struct seq_operations mounts_op; | 662 | extern struct seq_operations mounts_op; |
581 | static int mounts_open(struct inode *inode, struct file *file) | 663 | static int mounts_open(struct inode *inode, struct file *file) |
@@ -1419,7 +1501,7 @@ static struct inode_operations proc_fd_inode_operations = { | |||
1419 | 1501 | ||
1420 | static struct inode_operations proc_task_inode_operations = { | 1502 | static struct inode_operations proc_task_inode_operations = { |
1421 | .lookup = proc_task_lookup, | 1503 | .lookup = proc_task_lookup, |
1422 | .permission = proc_permission, | 1504 | .permission = proc_task_permission, |
1423 | }; | 1505 | }; |
1424 | 1506 | ||
1425 | #ifdef CONFIG_SECURITY | 1507 | #ifdef CONFIG_SECURITY |
@@ -1609,10 +1691,12 @@ static struct dentry *proc_pident_lookup(struct inode *dir, | |||
1609 | case PROC_TGID_MOUNTS: | 1691 | case PROC_TGID_MOUNTS: |
1610 | inode->i_fop = &proc_mounts_operations; | 1692 | inode->i_fop = &proc_mounts_operations; |
1611 | break; | 1693 | break; |
1694 | #ifdef CONFIG_MMU | ||
1612 | case PROC_TID_SMAPS: | 1695 | case PROC_TID_SMAPS: |
1613 | case PROC_TGID_SMAPS: | 1696 | case PROC_TGID_SMAPS: |
1614 | inode->i_fop = &proc_smaps_operations; | 1697 | inode->i_fop = &proc_smaps_operations; |
1615 | break; | 1698 | break; |
1699 | #endif | ||
1616 | #ifdef CONFIG_SECURITY | 1700 | #ifdef CONFIG_SECURITY |
1617 | case PROC_TID_ATTR: | 1701 | case PROC_TID_ATTR: |
1618 | inode->i_nlink = 2; | 1702 | inode->i_nlink = 2; |
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index f3bf016d5ee3..cff10ab1af63 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c | |||
@@ -91,6 +91,7 @@ static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos) | |||
91 | next = _rb; | 91 | next = _rb; |
92 | break; | 92 | break; |
93 | } | 93 | } |
94 | pos--; | ||
94 | } | 95 | } |
95 | 96 | ||
96 | return next; | 97 | return next; |
diff --git a/fs/read_write.c b/fs/read_write.c index b60324aaa2b6..a091ee4f430d 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -499,6 +499,9 @@ static ssize_t do_readv_writev(int type, struct file *file, | |||
499 | ret = rw_verify_area(type, file, pos, tot_len); | 499 | ret = rw_verify_area(type, file, pos, tot_len); |
500 | if (ret) | 500 | if (ret) |
501 | goto out; | 501 | goto out; |
502 | ret = security_file_permission(file, type == READ ? MAY_READ : MAY_WRITE); | ||
503 | if (ret) | ||
504 | goto out; | ||
502 | 505 | ||
503 | fnv = NULL; | 506 | fnv = NULL; |
504 | if (type == READ) { | 507 | if (type == READ) { |
diff --git a/fs/relayfs/buffers.c b/fs/relayfs/buffers.c index 2aa8e2719999..84e21ffa5ca8 100644 --- a/fs/relayfs/buffers.c +++ b/fs/relayfs/buffers.c | |||
@@ -109,7 +109,7 @@ static void *relay_alloc_buf(struct rchan_buf *buf, unsigned long size) | |||
109 | if (unlikely(!buf->page_array[i])) | 109 | if (unlikely(!buf->page_array[i])) |
110 | goto depopulate; | 110 | goto depopulate; |
111 | } | 111 | } |
112 | mem = vmap(buf->page_array, n_pages, GFP_KERNEL, PAGE_KERNEL); | 112 | mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL); |
113 | if (!mem) | 113 | if (!mem) |
114 | goto depopulate; | 114 | goto depopulate; |
115 | 115 | ||
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c index 4b184559f231..d2653b589b1c 100644 --- a/fs/xfs/linux-2.6/kmem.c +++ b/fs/xfs/linux-2.6/kmem.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | 46 | ||
47 | void * | 47 | void * |
48 | kmem_alloc(size_t size, unsigned int __nocast flags) | 48 | kmem_alloc(size_t size, gfp_t flags) |
49 | { | 49 | { |
50 | int retries = 0; | 50 | int retries = 0; |
51 | unsigned int lflags = kmem_flags_convert(flags); | 51 | unsigned int lflags = kmem_flags_convert(flags); |
@@ -67,7 +67,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | void * | 69 | void * |
70 | kmem_zalloc(size_t size, unsigned int __nocast flags) | 70 | kmem_zalloc(size_t size, gfp_t flags) |
71 | { | 71 | { |
72 | void *ptr; | 72 | void *ptr; |
73 | 73 | ||
@@ -90,7 +90,7 @@ kmem_free(void *ptr, size_t size) | |||
90 | 90 | ||
91 | void * | 91 | void * |
92 | kmem_realloc(void *ptr, size_t newsize, size_t oldsize, | 92 | kmem_realloc(void *ptr, size_t newsize, size_t oldsize, |
93 | unsigned int __nocast flags) | 93 | gfp_t flags) |
94 | { | 94 | { |
95 | void *new; | 95 | void *new; |
96 | 96 | ||
@@ -105,7 +105,7 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize, | |||
105 | } | 105 | } |
106 | 106 | ||
107 | void * | 107 | void * |
108 | kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) | 108 | kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags) |
109 | { | 109 | { |
110 | int retries = 0; | 110 | int retries = 0; |
111 | unsigned int lflags = kmem_flags_convert(flags); | 111 | unsigned int lflags = kmem_flags_convert(flags); |
@@ -124,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) | |||
124 | } | 124 | } |
125 | 125 | ||
126 | void * | 126 | void * |
127 | kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags) | 127 | kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags) |
128 | { | 128 | { |
129 | void *ptr; | 129 | void *ptr; |
130 | 130 | ||
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index 109fcf27e256..ee7010f085bc 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h | |||
@@ -81,7 +81,7 @@ typedef unsigned long xfs_pflags_t; | |||
81 | *(NSTATEP) = *(OSTATEP); \ | 81 | *(NSTATEP) = *(OSTATEP); \ |
82 | } while (0) | 82 | } while (0) |
83 | 83 | ||
84 | static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags) | 84 | static __inline unsigned int kmem_flags_convert(gfp_t flags) |
85 | { | 85 | { |
86 | unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ | 86 | unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ |
87 | 87 | ||
@@ -125,13 +125,12 @@ kmem_zone_destroy(kmem_zone_t *zone) | |||
125 | BUG(); | 125 | BUG(); |
126 | } | 126 | } |
127 | 127 | ||
128 | extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); | 128 | extern void *kmem_zone_zalloc(kmem_zone_t *, gfp_t); |
129 | extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); | 129 | extern void *kmem_zone_alloc(kmem_zone_t *, gfp_t); |
130 | 130 | ||
131 | extern void *kmem_alloc(size_t, unsigned int __nocast); | 131 | extern void *kmem_alloc(size_t, gfp_t); |
132 | extern void *kmem_realloc(void *, size_t, size_t, | 132 | extern void *kmem_realloc(void *, size_t, size_t, gfp_t); |
133 | unsigned int __nocast); | 133 | extern void *kmem_zalloc(size_t, gfp_t); |
134 | extern void *kmem_zalloc(size_t, unsigned int __nocast); | ||
135 | extern void kmem_free(void *, size_t); | 134 | extern void kmem_free(void *, size_t); |
136 | 135 | ||
137 | typedef struct shrinker *kmem_shaker_t; | 136 | typedef struct shrinker *kmem_shaker_t; |