diff options
Diffstat (limited to 'fs/ntfs/attrib.c')
-rw-r--r-- | fs/ntfs/attrib.c | 232 |
1 files changed, 201 insertions, 31 deletions
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 3f9a4ff42ee5..b194197b72f7 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c | |||
@@ -36,9 +36,27 @@ | |||
36 | * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode | 36 | * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode |
37 | * @ni: ntfs inode for which to map (part of) a runlist | 37 | * @ni: ntfs inode for which to map (part of) a runlist |
38 | * @vcn: map runlist part containing this vcn | 38 | * @vcn: map runlist part containing this vcn |
39 | * @ctx: active attribute search context if present or NULL if not | ||
39 | * | 40 | * |
40 | * Map the part of a runlist containing the @vcn of the ntfs inode @ni. | 41 | * Map the part of a runlist containing the @vcn of the ntfs inode @ni. |
41 | * | 42 | * |
43 | * If @ctx is specified, it is an active search context of @ni and its base mft | ||
44 | * record. This is needed when ntfs_map_runlist_nolock() encounters unmapped | ||
45 | * runlist fragments and allows their mapping. If you do not have the mft | ||
46 | * record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock() | ||
47 | * will perform the necessary mapping and unmapping. | ||
48 | * | ||
49 | * Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and | ||
50 | * restores it before returning. Thus, @ctx will be left pointing to the same | ||
51 | * attribute on return as on entry. However, the actual pointers in @ctx may | ||
52 | * point to different memory locations on return, so you must remember to reset | ||
53 | * any cached pointers from the @ctx, i.e. after the call to | ||
54 | * ntfs_map_runlist_nolock(), you will probably want to do: | ||
55 | * m = ctx->mrec; | ||
56 | * a = ctx->attr; | ||
57 | * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that | ||
58 | * you cache ctx->mrec in a variable @m of type MFT_RECORD *. | ||
59 | * | ||
42 | * Return 0 on success and -errno on error. There is one special error code | 60 | * Return 0 on success and -errno on error. There is one special error code |
43 | * which is not an error as such. This is -ENOENT. It means that @vcn is out | 61 | * which is not an error as such. This is -ENOENT. It means that @vcn is out |
44 | * of bounds of the runlist. | 62 | * of bounds of the runlist. |
@@ -46,19 +64,32 @@ | |||
46 | * Note the runlist can be NULL after this function returns if @vcn is zero and | 64 | * Note the runlist can be NULL after this function returns if @vcn is zero and |
47 | * the attribute has zero allocated size, i.e. there simply is no runlist. | 65 | * the attribute has zero allocated size, i.e. there simply is no runlist. |
48 | * | 66 | * |
49 | * Locking: - The runlist must be locked for writing. | 67 | * WARNING: If @ctx is supplied, regardless of whether success or failure is |
50 | * - This function modifies the runlist. | 68 | * returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx |
69 | * is no longer valid, i.e. you need to either call | ||
70 | * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. | ||
71 | * In that case PTR_ERR(@ctx->mrec) will give you the error code for | ||
72 | * why the mapping of the old inode failed. | ||
73 | * | ||
74 | * Locking: - The runlist described by @ni must be locked for writing on entry | ||
75 | * and is locked on return. Note the runlist will be modified. | ||
76 | * - If @ctx is NULL, the base mft record of @ni must not be mapped on | ||
77 | * entry and it will be left unmapped on return. | ||
78 | * - If @ctx is not NULL, the base mft record must be mapped on entry | ||
79 | * and it will be left mapped on return. | ||
51 | */ | 80 | */ |
52 | int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn) | 81 | int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) |
53 | { | 82 | { |
54 | VCN end_vcn; | 83 | VCN end_vcn; |
84 | unsigned long flags; | ||
55 | ntfs_inode *base_ni; | 85 | ntfs_inode *base_ni; |
56 | MFT_RECORD *m; | 86 | MFT_RECORD *m; |
57 | ATTR_RECORD *a; | 87 | ATTR_RECORD *a; |
58 | ntfs_attr_search_ctx *ctx; | ||
59 | runlist_element *rl; | 88 | runlist_element *rl; |
60 | unsigned long flags; | 89 | struct page *put_this_page = NULL; |
61 | int err = 0; | 90 | int err = 0; |
91 | BOOL ctx_is_temporary, ctx_needs_reset; | ||
92 | ntfs_attr_search_ctx old_ctx; | ||
62 | 93 | ||
63 | ntfs_debug("Mapping runlist part containing vcn 0x%llx.", | 94 | ntfs_debug("Mapping runlist part containing vcn 0x%llx.", |
64 | (unsigned long long)vcn); | 95 | (unsigned long long)vcn); |
@@ -66,20 +97,77 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn) | |||
66 | base_ni = ni; | 97 | base_ni = ni; |
67 | else | 98 | else |
68 | base_ni = ni->ext.base_ntfs_ino; | 99 | base_ni = ni->ext.base_ntfs_ino; |
69 | m = map_mft_record(base_ni); | 100 | if (!ctx) { |
70 | if (IS_ERR(m)) | 101 | ctx_is_temporary = ctx_needs_reset = TRUE; |
71 | return PTR_ERR(m); | 102 | m = map_mft_record(base_ni); |
72 | ctx = ntfs_attr_get_search_ctx(base_ni, m); | 103 | if (IS_ERR(m)) |
73 | if (unlikely(!ctx)) { | 104 | return PTR_ERR(m); |
74 | err = -ENOMEM; | 105 | ctx = ntfs_attr_get_search_ctx(base_ni, m); |
75 | goto err_out; | 106 | if (unlikely(!ctx)) { |
107 | err = -ENOMEM; | ||
108 | goto err_out; | ||
109 | } | ||
110 | } else { | ||
111 | VCN allocated_size_vcn; | ||
112 | |||
113 | BUG_ON(IS_ERR(ctx->mrec)); | ||
114 | a = ctx->attr; | ||
115 | BUG_ON(!a->non_resident); | ||
116 | ctx_is_temporary = FALSE; | ||
117 | end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn); | ||
118 | read_lock_irqsave(&ni->size_lock, flags); | ||
119 | allocated_size_vcn = ni->allocated_size >> | ||
120 | ni->vol->cluster_size_bits; | ||
121 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
122 | if (!a->data.non_resident.lowest_vcn && end_vcn <= 0) | ||
123 | end_vcn = allocated_size_vcn - 1; | ||
124 | /* | ||
125 | * If we already have the attribute extent containing @vcn in | ||
126 | * @ctx, no need to look it up again. We slightly cheat in | ||
127 | * that if vcn exceeds the allocated size, we will refuse to | ||
128 | * map the runlist below, so there is definitely no need to get | ||
129 | * the right attribute extent. | ||
130 | */ | ||
131 | if (vcn >= allocated_size_vcn || (a->type == ni->type && | ||
132 | a->name_length == ni->name_len && | ||
133 | !memcmp((u8*)a + le16_to_cpu(a->name_offset), | ||
134 | ni->name, ni->name_len) && | ||
135 | sle64_to_cpu(a->data.non_resident.lowest_vcn) | ||
136 | <= vcn && end_vcn >= vcn)) | ||
137 | ctx_needs_reset = FALSE; | ||
138 | else { | ||
139 | /* Save the old search context. */ | ||
140 | old_ctx = *ctx; | ||
141 | /* | ||
142 | * If the currently mapped (extent) inode is not the | ||
143 | * base inode we will unmap it when we reinitialize the | ||
144 | * search context which means we need to get a | ||
145 | * reference to the page containing the mapped mft | ||
146 | * record so we do not accidentally drop changes to the | ||
147 | * mft record when it has not been marked dirty yet. | ||
148 | */ | ||
149 | if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino != | ||
150 | old_ctx.base_ntfs_ino) { | ||
151 | put_this_page = old_ctx.ntfs_ino->page; | ||
152 | page_cache_get(put_this_page); | ||
153 | } | ||
154 | /* | ||
155 | * Reinitialize the search context so we can lookup the | ||
156 | * needed attribute extent. | ||
157 | */ | ||
158 | ntfs_attr_reinit_search_ctx(ctx); | ||
159 | ctx_needs_reset = TRUE; | ||
160 | } | ||
76 | } | 161 | } |
77 | err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, | 162 | if (ctx_needs_reset) { |
78 | CASE_SENSITIVE, vcn, NULL, 0, ctx); | 163 | err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, |
79 | if (unlikely(err)) { | 164 | CASE_SENSITIVE, vcn, NULL, 0, ctx); |
80 | if (err == -ENOENT) | 165 | if (unlikely(err)) { |
81 | err = -EIO; | 166 | if (err == -ENOENT) |
82 | goto err_out; | 167 | err = -EIO; |
168 | goto err_out; | ||
169 | } | ||
170 | BUG_ON(!ctx->attr->non_resident); | ||
83 | } | 171 | } |
84 | a = ctx->attr; | 172 | a = ctx->attr; |
85 | /* | 173 | /* |
@@ -89,11 +177,9 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn) | |||
89 | * ntfs_mapping_pairs_decompress() fails. | 177 | * ntfs_mapping_pairs_decompress() fails. |
90 | */ | 178 | */ |
91 | end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1; | 179 | end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1; |
92 | if (unlikely(!a->data.non_resident.lowest_vcn && end_vcn <= 1)) { | 180 | if (!a->data.non_resident.lowest_vcn && end_vcn == 1) |
93 | read_lock_irqsave(&ni->size_lock, flags); | 181 | end_vcn = sle64_to_cpu(a->data.non_resident.allocated_size) >> |
94 | end_vcn = ni->allocated_size >> ni->vol->cluster_size_bits; | 182 | ni->vol->cluster_size_bits; |
95 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
96 | } | ||
97 | if (unlikely(vcn >= end_vcn)) { | 183 | if (unlikely(vcn >= end_vcn)) { |
98 | err = -ENOENT; | 184 | err = -ENOENT; |
99 | goto err_out; | 185 | goto err_out; |
@@ -104,9 +190,93 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn) | |||
104 | else | 190 | else |
105 | ni->runlist.rl = rl; | 191 | ni->runlist.rl = rl; |
106 | err_out: | 192 | err_out: |
107 | if (likely(ctx)) | 193 | if (ctx_is_temporary) { |
108 | ntfs_attr_put_search_ctx(ctx); | 194 | if (likely(ctx)) |
109 | unmap_mft_record(base_ni); | 195 | ntfs_attr_put_search_ctx(ctx); |
196 | unmap_mft_record(base_ni); | ||
197 | } else if (ctx_needs_reset) { | ||
198 | /* | ||
199 | * If there is no attribute list, restoring the search context | ||
200 | * is acomplished simply by copying the saved context back over | ||
201 | * the caller supplied context. If there is an attribute list, | ||
202 | * things are more complicated as we need to deal with mapping | ||
203 | * of mft records and resulting potential changes in pointers. | ||
204 | */ | ||
205 | if (NInoAttrList(base_ni)) { | ||
206 | /* | ||
207 | * If the currently mapped (extent) inode is not the | ||
208 | * one we had before, we need to unmap it and map the | ||
209 | * old one. | ||
210 | */ | ||
211 | if (ctx->ntfs_ino != old_ctx.ntfs_ino) { | ||
212 | /* | ||
213 | * If the currently mapped inode is not the | ||
214 | * base inode, unmap it. | ||
215 | */ | ||
216 | if (ctx->base_ntfs_ino && ctx->ntfs_ino != | ||
217 | ctx->base_ntfs_ino) { | ||
218 | unmap_extent_mft_record(ctx->ntfs_ino); | ||
219 | ctx->mrec = ctx->base_mrec; | ||
220 | BUG_ON(!ctx->mrec); | ||
221 | } | ||
222 | /* | ||
223 | * If the old mapped inode is not the base | ||
224 | * inode, map it. | ||
225 | */ | ||
226 | if (old_ctx.base_ntfs_ino && | ||
227 | old_ctx.ntfs_ino != | ||
228 | old_ctx.base_ntfs_ino) { | ||
229 | retry_map: | ||
230 | ctx->mrec = map_mft_record( | ||
231 | old_ctx.ntfs_ino); | ||
232 | /* | ||
233 | * Something bad has happened. If out | ||
234 | * of memory retry till it succeeds. | ||
235 | * Any other errors are fatal and we | ||
236 | * return the error code in ctx->mrec. | ||
237 | * Let the caller deal with it... We | ||
238 | * just need to fudge things so the | ||
239 | * caller can reinit and/or put the | ||
240 | * search context safely. | ||
241 | */ | ||
242 | if (IS_ERR(ctx->mrec)) { | ||
243 | if (PTR_ERR(ctx->mrec) == | ||
244 | -ENOMEM) { | ||
245 | schedule(); | ||
246 | goto retry_map; | ||
247 | } else | ||
248 | old_ctx.ntfs_ino = | ||
249 | old_ctx. | ||
250 | base_ntfs_ino; | ||
251 | } | ||
252 | } | ||
253 | } | ||
254 | /* Update the changed pointers in the saved context. */ | ||
255 | if (ctx->mrec != old_ctx.mrec) { | ||
256 | if (!IS_ERR(ctx->mrec)) | ||
257 | old_ctx.attr = (ATTR_RECORD*)( | ||
258 | (u8*)ctx->mrec + | ||
259 | ((u8*)old_ctx.attr - | ||
260 | (u8*)old_ctx.mrec)); | ||
261 | old_ctx.mrec = ctx->mrec; | ||
262 | } | ||
263 | } | ||
264 | /* Restore the search context to the saved one. */ | ||
265 | *ctx = old_ctx; | ||
266 | /* | ||
267 | * We drop the reference on the page we took earlier. In the | ||
268 | * case that IS_ERR(ctx->mrec) is true this means we might lose | ||
269 | * some changes to the mft record that had been made between | ||
270 | * the last time it was marked dirty/written out and now. This | ||
271 | * at this stage is not a problem as the mapping error is fatal | ||
272 | * enough that the mft record cannot be written out anyway and | ||
273 | * the caller is very likely to shutdown the whole inode | ||
274 | * immediately and mark the volume dirty for chkdsk to pick up | ||
275 | * the pieces anyway. | ||
276 | */ | ||
277 | if (put_this_page) | ||
278 | page_cache_release(put_this_page); | ||
279 | } | ||
110 | return err; | 280 | return err; |
111 | } | 281 | } |
112 | 282 | ||
@@ -122,8 +292,8 @@ err_out: | |||
122 | * of bounds of the runlist. | 292 | * of bounds of the runlist. |
123 | * | 293 | * |
124 | * Locking: - The runlist must be unlocked on entry and is unlocked on return. | 294 | * Locking: - The runlist must be unlocked on entry and is unlocked on return. |
125 | * - This function takes the runlist lock for writing and modifies the | 295 | * - This function takes the runlist lock for writing and may modify |
126 | * runlist. | 296 | * the runlist. |
127 | */ | 297 | */ |
128 | int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) | 298 | int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) |
129 | { | 299 | { |
@@ -133,7 +303,7 @@ int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) | |||
133 | /* Make sure someone else didn't do the work while we were sleeping. */ | 303 | /* Make sure someone else didn't do the work while we were sleeping. */ |
134 | if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= | 304 | if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= |
135 | LCN_RL_NOT_MAPPED)) | 305 | LCN_RL_NOT_MAPPED)) |
136 | err = ntfs_map_runlist_nolock(ni, vcn); | 306 | err = ntfs_map_runlist_nolock(ni, vcn, NULL); |
137 | up_write(&ni->runlist.lock); | 307 | up_write(&ni->runlist.lock); |
138 | return err; | 308 | return err; |
139 | } | 309 | } |
@@ -212,7 +382,7 @@ retry_remap: | |||
212 | goto retry_remap; | 382 | goto retry_remap; |
213 | } | 383 | } |
214 | } | 384 | } |
215 | err = ntfs_map_runlist_nolock(ni, vcn); | 385 | err = ntfs_map_runlist_nolock(ni, vcn, NULL); |
216 | if (!write_locked) { | 386 | if (!write_locked) { |
217 | up_write(&ni->runlist.lock); | 387 | up_write(&ni->runlist.lock); |
218 | down_read(&ni->runlist.lock); | 388 | down_read(&ni->runlist.lock); |
@@ -325,7 +495,7 @@ retry_remap: | |||
325 | goto retry_remap; | 495 | goto retry_remap; |
326 | } | 496 | } |
327 | } | 497 | } |
328 | err = ntfs_map_runlist_nolock(ni, vcn); | 498 | err = ntfs_map_runlist_nolock(ni, vcn, NULL); |
329 | if (!write_locked) { | 499 | if (!write_locked) { |
330 | up_write(&ni->runlist.lock); | 500 | up_write(&ni->runlist.lock); |
331 | down_read(&ni->runlist.lock); | 501 | down_read(&ni->runlist.lock); |