aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ntfs/ChangeLog2
-rw-r--r--fs/ntfs/aops.c122
-rw-r--r--fs/ntfs/inode.c9
-rw-r--r--fs/ntfs/malloc.h2
-rw-r--r--fs/ntfs/runlist.c132
5 files changed, 158 insertions, 109 deletions
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 49eafbdb15c1..c7e9237379c2 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -92,6 +92,8 @@ ToDo/Notes:
92 an octal number to conform to how chmod(1) works, too. Thanks to 92 an octal number to conform to how chmod(1) works, too. Thanks to
93 Giuseppe Bilotta and Horst von Brand for pointing out the errors of 93 Giuseppe Bilotta and Horst von Brand for pointing out the errors of
94 my ways. 94 my ways.
95 - Fix various bugs in the runlist merging code. (Based on libntfs
96 changes by Richard Russon.)
95 97
962.1.23 - Implement extension of resident files and make writing safe as well as 982.1.23 - Implement extension of resident files and make writing safe as well as
97 many bug fixes, cleanups, and enhancements... 99 many bug fixes, cleanups, and enhancements...
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index b6cc8cf24626..5e80c07c6a4d 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -59,39 +59,49 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
59 unsigned long flags; 59 unsigned long flags;
60 struct buffer_head *first, *tmp; 60 struct buffer_head *first, *tmp;
61 struct page *page; 61 struct page *page;
62 struct inode *vi;
62 ntfs_inode *ni; 63 ntfs_inode *ni;
63 int page_uptodate = 1; 64 int page_uptodate = 1;
64 65
65 page = bh->b_page; 66 page = bh->b_page;
66 ni = NTFS_I(page->mapping->host); 67 vi = page->mapping->host;
68 ni = NTFS_I(vi);
67 69
68 if (likely(uptodate)) { 70 if (likely(uptodate)) {
69 s64 file_ofs, initialized_size; 71 loff_t i_size;
72 s64 file_ofs, init_size;
70 73
71 set_buffer_uptodate(bh); 74 set_buffer_uptodate(bh);
72 75
73 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + 76 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
74 bh_offset(bh); 77 bh_offset(bh);
75 read_lock_irqsave(&ni->size_lock, flags); 78 read_lock_irqsave(&ni->size_lock, flags);
76 initialized_size = ni->initialized_size; 79 init_size = ni->initialized_size;
80 i_size = i_size_read(vi);
77 read_unlock_irqrestore(&ni->size_lock, flags); 81 read_unlock_irqrestore(&ni->size_lock, flags);
82 if (unlikely(init_size > i_size)) {
83 /* Race with shrinking truncate. */
84 init_size = i_size;
85 }
78 /* Check for the current buffer head overflowing. */ 86 /* Check for the current buffer head overflowing. */
79 if (file_ofs + bh->b_size > initialized_size) { 87 if (unlikely(file_ofs + bh->b_size > init_size)) {
80 char *addr; 88 u8 *kaddr;
81 int ofs = 0; 89 int ofs;
82 90
83 if (file_ofs < initialized_size) 91 ofs = 0;
84 ofs = initialized_size - file_ofs; 92 if (file_ofs < init_size)
85 addr = kmap_atomic(page, KM_BIO_SRC_IRQ); 93 ofs = init_size - file_ofs;
86 memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); 94 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
95 memset(kaddr + bh_offset(bh) + ofs, 0,
96 bh->b_size - ofs);
97 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
87 flush_dcache_page(page); 98 flush_dcache_page(page);
88 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
89 } 99 }
90 } else { 100 } else {
91 clear_buffer_uptodate(bh); 101 clear_buffer_uptodate(bh);
92 SetPageError(page); 102 SetPageError(page);
93 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.", 103 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
94 (unsigned long long)bh->b_blocknr); 104 "0x%llx.", (unsigned long long)bh->b_blocknr);
95 } 105 }
96 first = page_buffers(page); 106 first = page_buffers(page);
97 local_irq_save(flags); 107 local_irq_save(flags);
@@ -124,7 +134,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
124 if (likely(page_uptodate && !PageError(page))) 134 if (likely(page_uptodate && !PageError(page)))
125 SetPageUptodate(page); 135 SetPageUptodate(page);
126 } else { 136 } else {
127 char *addr; 137 u8 *kaddr;
128 unsigned int i, recs; 138 unsigned int i, recs;
129 u32 rec_size; 139 u32 rec_size;
130 140
@@ -132,12 +142,12 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
132 recs = PAGE_CACHE_SIZE / rec_size; 142 recs = PAGE_CACHE_SIZE / rec_size;
133 /* Should have been verified before we got here... */ 143 /* Should have been verified before we got here... */
134 BUG_ON(!recs); 144 BUG_ON(!recs);
135 addr = kmap_atomic(page, KM_BIO_SRC_IRQ); 145 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
136 for (i = 0; i < recs; i++) 146 for (i = 0; i < recs; i++)
137 post_read_mst_fixup((NTFS_RECORD*)(addr + 147 post_read_mst_fixup((NTFS_RECORD*)(kaddr +
138 i * rec_size), rec_size); 148 i * rec_size), rec_size);
149 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
139 flush_dcache_page(page); 150 flush_dcache_page(page);
140 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
141 if (likely(page_uptodate && !PageError(page))) 151 if (likely(page_uptodate && !PageError(page)))
142 SetPageUptodate(page); 152 SetPageUptodate(page);
143 } 153 }
@@ -168,8 +178,11 @@ still_busy:
168 */ 178 */
169static int ntfs_read_block(struct page *page) 179static int ntfs_read_block(struct page *page)
170{ 180{
181 loff_t i_size;
171 VCN vcn; 182 VCN vcn;
172 LCN lcn; 183 LCN lcn;
184 s64 init_size;
185 struct inode *vi;
173 ntfs_inode *ni; 186 ntfs_inode *ni;
174 ntfs_volume *vol; 187 ntfs_volume *vol;
175 runlist_element *rl; 188 runlist_element *rl;
@@ -180,7 +193,8 @@ static int ntfs_read_block(struct page *page)
180 int i, nr; 193 int i, nr;
181 unsigned char blocksize_bits; 194 unsigned char blocksize_bits;
182 195
183 ni = NTFS_I(page->mapping->host); 196 vi = page->mapping->host;
197 ni = NTFS_I(vi);
184 vol = ni->vol; 198 vol = ni->vol;
185 199
186 /* $MFT/$DATA must have its complete runlist in memory at all times. */ 200 /* $MFT/$DATA must have its complete runlist in memory at all times. */
@@ -199,11 +213,28 @@ static int ntfs_read_block(struct page *page)
199 bh = head = page_buffers(page); 213 bh = head = page_buffers(page);
200 BUG_ON(!bh); 214 BUG_ON(!bh);
201 215
216 /*
217 * We may be racing with truncate. To avoid some of the problems we
218 * now take a snapshot of the various sizes and use those for the whole
219 * of the function. In case of an extending truncate it just means we
220 * may leave some buffers unmapped which are now allocated. This is
221 * not a problem since these buffers will just get mapped when a write
222 * occurs. In case of a shrinking truncate, we will detect this later
223 * on due to the runlist being incomplete and if the page is being
224 * fully truncated, truncate will throw it away as soon as we unlock
225 * it so no need to worry what we do with it.
226 */
202 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); 227 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
203 read_lock_irqsave(&ni->size_lock, flags); 228 read_lock_irqsave(&ni->size_lock, flags);
204 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; 229 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
205 zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits; 230 init_size = ni->initialized_size;
231 i_size = i_size_read(vi);
206 read_unlock_irqrestore(&ni->size_lock, flags); 232 read_unlock_irqrestore(&ni->size_lock, flags);
233 if (unlikely(init_size > i_size)) {
234 /* Race with shrinking truncate. */
235 init_size = i_size;
236 }
237 zblock = (init_size + blocksize - 1) >> blocksize_bits;
207 238
208 /* Loop through all the buffers in the page. */ 239 /* Loop through all the buffers in the page. */
209 rl = NULL; 240 rl = NULL;
@@ -366,6 +397,8 @@ handle_zblock:
366 */ 397 */
367static int ntfs_readpage(struct file *file, struct page *page) 398static int ntfs_readpage(struct file *file, struct page *page)
368{ 399{
400 loff_t i_size;
401 struct inode *vi;
369 ntfs_inode *ni, *base_ni; 402 ntfs_inode *ni, *base_ni;
370 u8 *kaddr; 403 u8 *kaddr;
371 ntfs_attr_search_ctx *ctx; 404 ntfs_attr_search_ctx *ctx;
@@ -384,14 +417,17 @@ retry_readpage:
384 unlock_page(page); 417 unlock_page(page);
385 return 0; 418 return 0;
386 } 419 }
387 ni = NTFS_I(page->mapping->host); 420 vi = page->mapping->host;
421 ni = NTFS_I(vi);
388 /* 422 /*
389 * Only $DATA attributes can be encrypted and only unnamed $DATA 423 * Only $DATA attributes can be encrypted and only unnamed $DATA
390 * attributes can be compressed. Index root can have the flags set but 424 * attributes can be compressed. Index root can have the flags set but
391 * this means to create compressed/encrypted files, not that the 425 * this means to create compressed/encrypted files, not that the
392 * attribute is compressed/encrypted. 426 * attribute is compressed/encrypted. Note we need to check for
427 * AT_INDEX_ALLOCATION since this is the type of both directory and
428 * index inodes.
393 */ 429 */
394 if (ni->type != AT_INDEX_ROOT) { 430 if (ni->type != AT_INDEX_ALLOCATION) {
395 /* If attribute is encrypted, deny access, just like NT4. */ 431 /* If attribute is encrypted, deny access, just like NT4. */
396 if (NInoEncrypted(ni)) { 432 if (NInoEncrypted(ni)) {
397 BUG_ON(ni->type != AT_DATA); 433 BUG_ON(ni->type != AT_DATA);
@@ -456,7 +492,12 @@ retry_readpage:
456 read_lock_irqsave(&ni->size_lock, flags); 492 read_lock_irqsave(&ni->size_lock, flags);
457 if (unlikely(attr_len > ni->initialized_size)) 493 if (unlikely(attr_len > ni->initialized_size))
458 attr_len = ni->initialized_size; 494 attr_len = ni->initialized_size;
495 i_size = i_size_read(vi);
459 read_unlock_irqrestore(&ni->size_lock, flags); 496 read_unlock_irqrestore(&ni->size_lock, flags);
497 if (unlikely(attr_len > i_size)) {
498 /* Race with shrinking truncate. */
499 attr_len = i_size;
500 }
460 kaddr = kmap_atomic(page, KM_USER0); 501 kaddr = kmap_atomic(page, KM_USER0);
461 /* Copy the data to the page. */ 502 /* Copy the data to the page. */
462 memcpy(kaddr, (u8*)ctx->attr + 503 memcpy(kaddr, (u8*)ctx->attr +
@@ -1341,9 +1382,11 @@ retry_writepage:
1341 * Only $DATA attributes can be encrypted and only unnamed $DATA 1382 * Only $DATA attributes can be encrypted and only unnamed $DATA
1342 * attributes can be compressed. Index root can have the flags set but 1383 * attributes can be compressed. Index root can have the flags set but
1343 * this means to create compressed/encrypted files, not that the 1384 * this means to create compressed/encrypted files, not that the
1344 * attribute is compressed/encrypted. 1385 * attribute is compressed/encrypted. Note we need to check for
1386 * AT_INDEX_ALLOCATION since this is the type of both directory and
1387 * index inodes.
1345 */ 1388 */
1346 if (ni->type != AT_INDEX_ROOT) { 1389 if (ni->type != AT_INDEX_ALLOCATION) {
1347 /* If file is encrypted, deny access, just like NT4. */ 1390 /* If file is encrypted, deny access, just like NT4. */
1348 if (NInoEncrypted(ni)) { 1391 if (NInoEncrypted(ni)) {
1349 unlock_page(page); 1392 unlock_page(page);
@@ -1379,8 +1422,8 @@ retry_writepage:
1379 unsigned int ofs = i_size & ~PAGE_CACHE_MASK; 1422 unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1380 kaddr = kmap_atomic(page, KM_USER0); 1423 kaddr = kmap_atomic(page, KM_USER0);
1381 memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); 1424 memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
1382 flush_dcache_page(page);
1383 kunmap_atomic(kaddr, KM_USER0); 1425 kunmap_atomic(kaddr, KM_USER0);
1426 flush_dcache_page(page);
1384 } 1427 }
1385 /* Handle mst protected attributes. */ 1428 /* Handle mst protected attributes. */
1386 if (NInoMstProtected(ni)) 1429 if (NInoMstProtected(ni))
@@ -1443,34 +1486,33 @@ retry_writepage:
1443 BUG_ON(PageWriteback(page)); 1486 BUG_ON(PageWriteback(page));
1444 set_page_writeback(page); 1487 set_page_writeback(page);
1445 unlock_page(page); 1488 unlock_page(page);
1446 /*
1447 * Here, we do not need to zero the out of bounds area everytime
1448 * because the below memcpy() already takes care of the
1449 * mmap-at-end-of-file requirements. If the file is converted to a
1450 * non-resident one, then the code path use is switched to the
1451 * non-resident one where the zeroing happens on each ntfs_writepage()
1452 * invocation.
1453 */
1454 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); 1489 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1455 i_size = i_size_read(vi); 1490 i_size = i_size_read(vi);
1456 if (unlikely(attr_len > i_size)) { 1491 if (unlikely(attr_len > i_size)) {
1492 /* Race with shrinking truncate or a failed truncate. */
1457 attr_len = i_size; 1493 attr_len = i_size;
1458 ctx->attr->data.resident.value_length = cpu_to_le32(attr_len); 1494 /*
1495 * If the truncate failed, fix it up now. If a concurrent
1496 * truncate, we do its job, so it does not have to do anything.
1497 */
1498 err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
1499 attr_len);
1500 /* Shrinking cannot fail. */
1501 BUG_ON(err);
1459 } 1502 }
1460 kaddr = kmap_atomic(page, KM_USER0); 1503 kaddr = kmap_atomic(page, KM_USER0);
1461 /* Copy the data from the page to the mft record. */ 1504 /* Copy the data from the page to the mft record. */
1462 memcpy((u8*)ctx->attr + 1505 memcpy((u8*)ctx->attr +
1463 le16_to_cpu(ctx->attr->data.resident.value_offset), 1506 le16_to_cpu(ctx->attr->data.resident.value_offset),
1464 kaddr, attr_len); 1507 kaddr, attr_len);
1465 flush_dcache_mft_record_page(ctx->ntfs_ino);
1466 /* Zero out of bounds area in the page cache page. */ 1508 /* Zero out of bounds area in the page cache page. */
1467 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1509 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1468 flush_dcache_page(page);
1469 kunmap_atomic(kaddr, KM_USER0); 1510 kunmap_atomic(kaddr, KM_USER0);
1470 1511 flush_dcache_mft_record_page(ctx->ntfs_ino);
1512 flush_dcache_page(page);
1513 /* We are done with the page. */
1471 end_page_writeback(page); 1514 end_page_writeback(page);
1472 1515 /* Finally, mark the mft record dirty, so it gets written back. */
1473 /* Mark the mft record dirty, so it gets written back. */
1474 mark_mft_record_dirty(ctx->ntfs_ino); 1516 mark_mft_record_dirty(ctx->ntfs_ino);
1475 ntfs_attr_put_search_ctx(ctx); 1517 ntfs_attr_put_search_ctx(ctx);
1476 unmap_mft_record(base_ni); 1518 unmap_mft_record(base_ni);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index dc4bbe3acf5c..7ec045131808 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -1166,6 +1166,8 @@ err_out:
1166 * 1166 *
1167 * Return 0 on success and -errno on error. In the error case, the inode will 1167 * Return 0 on success and -errno on error. In the error case, the inode will
1168 * have had make_bad_inode() executed on it. 1168 * have had make_bad_inode() executed on it.
1169 *
1170 * Note this cannot be called for AT_INDEX_ALLOCATION.
1169 */ 1171 */
1170static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi) 1172static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
1171{ 1173{
@@ -1242,8 +1244,8 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
1242 } 1244 }
1243 } 1245 }
1244 /* 1246 /*
1245 * The encryption flag set in an index root just means to 1247 * The compressed/sparse flag set in an index root just means
1246 * compress all files. 1248 * to compress all files.
1247 */ 1249 */
1248 if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) { 1250 if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) {
1249 ntfs_error(vi->i_sb, "Found mst protected attribute " 1251 ntfs_error(vi->i_sb, "Found mst protected attribute "
@@ -1319,8 +1321,7 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
1319 "the mapping pairs array."); 1321 "the mapping pairs array.");
1320 goto unm_err_out; 1322 goto unm_err_out;
1321 } 1323 }
1322 if ((NInoCompressed(ni) || NInoSparse(ni)) && 1324 if (NInoCompressed(ni) || NInoSparse(ni)) {
1323 ni->type != AT_INDEX_ROOT) {
1324 if (a->data.non_resident.compression_unit != 4) { 1325 if (a->data.non_resident.compression_unit != 4) {
1325 ntfs_error(vi->i_sb, "Found nonstandard " 1326 ntfs_error(vi->i_sb, "Found nonstandard "
1326 "compression unit (%u instead " 1327 "compression unit (%u instead "
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h
index 3288bcc2c4aa..006946efca8c 100644
--- a/fs/ntfs/malloc.h
+++ b/fs/ntfs/malloc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project. 2 * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2001-2004 Anton Altaparmakov 4 * Copyright (c) 2001-2005 Anton Altaparmakov
5 * 5 *
6 * This program/include file is free software; you can redistribute it and/or 6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published 7 * modify it under the terms of the GNU General Public License as published
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index f5b2ac929081..e2665d011d72 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -2,7 +2,7 @@
2 * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project. 2 * runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2001-2005 Anton Altaparmakov 4 * Copyright (c) 2001-2005 Anton Altaparmakov
5 * Copyright (c) 2002 Richard Russon 5 * Copyright (c) 2002-2005 Richard Russon
6 * 6 *
7 * This program/include file is free software; you can redistribute it and/or 7 * This program/include file is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as published 8 * modify it under the terms of the GNU General Public License as published
@@ -214,8 +214,8 @@ static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src)
214static inline runlist_element *ntfs_rl_append(runlist_element *dst, 214static inline runlist_element *ntfs_rl_append(runlist_element *dst,
215 int dsize, runlist_element *src, int ssize, int loc) 215 int dsize, runlist_element *src, int ssize, int loc)
216{ 216{
217 BOOL right; 217 BOOL right; /* Right end of @src needs merging. */
218 int magic; 218 int marker; /* End of the inserted runs. */
219 219
220 BUG_ON(!dst); 220 BUG_ON(!dst);
221 BUG_ON(!src); 221 BUG_ON(!src);
@@ -236,18 +236,19 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst,
236 if (right) 236 if (right)
237 __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); 237 __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
238 238
239 magic = loc + ssize; 239 /* First run after the @src runs that have been inserted. */
240 marker = loc + ssize + 1;
240 241
241 /* Move the tail of @dst out of the way, then copy in @src. */ 242 /* Move the tail of @dst out of the way, then copy in @src. */
242 ntfs_rl_mm(dst, magic + 1, loc + 1 + right, dsize - loc - 1 - right); 243 ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right));
243 ntfs_rl_mc(dst, loc + 1, src, 0, ssize); 244 ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
244 245
245 /* Adjust the size of the preceding hole. */ 246 /* Adjust the size of the preceding hole. */
246 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; 247 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
247 248
248 /* We may have changed the length of the file, so fix the end marker */ 249 /* We may have changed the length of the file, so fix the end marker */
249 if (dst[magic + 1].lcn == LCN_ENOENT) 250 if (dst[marker].lcn == LCN_ENOENT)
250 dst[magic + 1].vcn = dst[magic].vcn + dst[magic].length; 251 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
251 252
252 return dst; 253 return dst;
253} 254}
@@ -279,18 +280,17 @@ static inline runlist_element *ntfs_rl_append(runlist_element *dst,
279static inline runlist_element *ntfs_rl_insert(runlist_element *dst, 280static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
280 int dsize, runlist_element *src, int ssize, int loc) 281 int dsize, runlist_element *src, int ssize, int loc)
281{ 282{
282 BOOL left = FALSE; 283 BOOL left = FALSE; /* Left end of @src needs merging. */
283 BOOL disc = FALSE; /* Discontinuity */ 284 BOOL disc = FALSE; /* Discontinuity between @dst and @src. */
284 BOOL hole = FALSE; /* Following a hole */ 285 int marker; /* End of the inserted runs. */
285 int magic;
286 286
287 BUG_ON(!dst); 287 BUG_ON(!dst);
288 BUG_ON(!src); 288 BUG_ON(!src);
289 289
290 /* disc => Discontinuity between the end of @dst and the start of @src. 290 /*
291 * This means we might need to insert a hole. 291 * disc => Discontinuity between the end of @dst and the start of @src.
292 * hole => @dst ends with a hole or an unmapped region which we can 292 * This means we might need to insert a "not mapped" run.
293 * extend to match the discontinuity. */ 293 */
294 if (loc == 0) 294 if (loc == 0)
295 disc = (src[0].vcn > 0); 295 disc = (src[0].vcn > 0);
296 else { 296 else {
@@ -303,58 +303,49 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
303 merged_length += src->length; 303 merged_length += src->length;
304 304
305 disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); 305 disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
306 if (disc)
307 hole = (dst[loc - 1].lcn == LCN_HOLE);
308 } 306 }
309 307 /*
310 /* Space required: @dst size + @src size, less one if we merged, plus 308 * Space required: @dst size + @src size, less one if we merged, plus
311 * one if there was a discontinuity, less one for a trailing hole. */ 309 * one if there was a discontinuity.
312 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc - hole); 310 */
311 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc);
313 if (IS_ERR(dst)) 312 if (IS_ERR(dst))
314 return dst; 313 return dst;
315 /* 314 /*
316 * We are guaranteed to succeed from here so can start modifying the 315 * We are guaranteed to succeed from here so can start modifying the
317 * original runlist. 316 * original runlist.
318 */ 317 */
319
320 if (left) 318 if (left)
321 __ntfs_rl_merge(dst + loc - 1, src); 319 __ntfs_rl_merge(dst + loc - 1, src);
322 320 /*
323 magic = loc + ssize - left + disc - hole; 321 * First run after the @src runs that have been inserted.
322 * Nominally, @marker equals @loc + @ssize, i.e. location + number of
323 * runs in @src. However, if @left, then the first run in @src has
324 * been merged with one in @dst. And if @disc, then @dst and @src do
325 * not meet and we need an extra run to fill the gap.
326 */
327 marker = loc + ssize - left + disc;
324 328
325 /* Move the tail of @dst out of the way, then copy in @src. */ 329 /* Move the tail of @dst out of the way, then copy in @src. */
326 ntfs_rl_mm(dst, magic, loc, dsize - loc); 330 ntfs_rl_mm(dst, marker, loc, dsize - loc);
327 ntfs_rl_mc(dst, loc + disc - hole, src, left, ssize - left); 331 ntfs_rl_mc(dst, loc + disc, src, left, ssize - left);
328 332
329 /* Adjust the VCN of the last run ... */ 333 /* Adjust the VCN of the first run after the insertion... */
330 if (dst[magic].lcn <= LCN_HOLE) 334 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
331 dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length;
332 /* ... and the length. */ 335 /* ... and the length. */
333 if (dst[magic].lcn == LCN_HOLE || dst[magic].lcn == LCN_RL_NOT_MAPPED) 336 if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED)
334 dst[magic].length = dst[magic + 1].vcn - dst[magic].vcn; 337 dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn;
335 338
336 /* Writing beyond the end of the file and there's a discontinuity. */ 339 /* Writing beyond the end of the file and there is a discontinuity. */
337 if (disc) { 340 if (disc) {
338 if (hole) 341 if (loc > 0) {
339 dst[loc - 1].length = dst[loc].vcn - dst[loc - 1].vcn; 342 dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length;
340 else { 343 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
341 if (loc > 0) { 344 } else {
342 dst[loc].vcn = dst[loc - 1].vcn + 345 dst[loc].vcn = 0;
343 dst[loc - 1].length; 346 dst[loc].length = dst[loc + 1].vcn;
344 dst[loc].length = dst[loc + 1].vcn -
345 dst[loc].vcn;
346 } else {
347 dst[loc].vcn = 0;
348 dst[loc].length = dst[loc + 1].vcn;
349 }
350 dst[loc].lcn = LCN_RL_NOT_MAPPED;
351 } 347 }
352 348 dst[loc].lcn = LCN_RL_NOT_MAPPED;
353 magic += hole;
354
355 if (dst[magic].lcn == LCN_ENOENT)
356 dst[magic].vcn = dst[magic - 1].vcn +
357 dst[magic - 1].length;
358 } 349 }
359 return dst; 350 return dst;
360} 351}
@@ -385,9 +376,10 @@ static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
385static inline runlist_element *ntfs_rl_replace(runlist_element *dst, 376static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
386 int dsize, runlist_element *src, int ssize, int loc) 377 int dsize, runlist_element *src, int ssize, int loc)
387{ 378{
388 BOOL left = FALSE; 379 BOOL left = FALSE; /* Left end of @src needs merging. */
389 BOOL right; 380 BOOL right; /* Right end of @src needs merging. */
390 int magic; 381 int tail; /* Start of tail of @dst. */
382 int marker; /* End of the inserted runs. */
391 383
392 BUG_ON(!dst); 384 BUG_ON(!dst);
393 BUG_ON(!src); 385 BUG_ON(!src);
@@ -396,9 +388,10 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
396 right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1); 388 right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
397 if (loc > 0) 389 if (loc > 0)
398 left = ntfs_are_rl_mergeable(dst + loc - 1, src); 390 left = ntfs_are_rl_mergeable(dst + loc - 1, src);
399 391 /*
400 /* Allocate some space. We'll need less if the left, right, or both 392 * Allocate some space. We will need less if the left, right, or both
401 * ends were merged. */ 393 * ends were merged.
394 */
402 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right); 395 dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left - right);
403 if (IS_ERR(dst)) 396 if (IS_ERR(dst))
404 return dst; 397 return dst;
@@ -410,17 +403,28 @@ static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
410 __ntfs_rl_merge(src + ssize - 1, dst + loc + 1); 403 __ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
411 if (left) 404 if (left)
412 __ntfs_rl_merge(dst + loc - 1, src); 405 __ntfs_rl_merge(dst + loc - 1, src);
413 406 /*
414 /* FIXME: What does this mean? (AIA) */ 407 * First run of @dst that needs to be moved out of the way to make
415 magic = loc + ssize - left; 408 * space for the runs to be copied from @src, i.e. the first run of the
409 * tail of @dst.
410 */
411 tail = loc + right + 1;
412 /*
413 * First run after the @src runs that have been inserted, i.e. where
414 * the tail of @dst needs to be moved to.
415 * Nominally, marker equals @loc + @ssize, i.e. location + number of
416 * runs in @src). However, if @left, then the first run in @src has
417 * been merged with one in @dst.
418 */
419 marker = loc + ssize - left;
416 420
417 /* Move the tail of @dst out of the way, then copy in @src. */ 421 /* Move the tail of @dst out of the way, then copy in @src. */
418 ntfs_rl_mm(dst, magic, loc + right + 1, dsize - loc - right - 1); 422 ntfs_rl_mm(dst, marker, tail, dsize - tail);
419 ntfs_rl_mc(dst, loc, src, left, ssize - left); 423 ntfs_rl_mc(dst, loc, src, left, ssize - left);
420 424
421 /* We may have changed the length of the file, so fix the end marker */ 425 /* We may have changed the length of the file, so fix the end marker. */
422 if (dst[magic].lcn == LCN_ENOENT) 426 if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT)
423 dst[magic].vcn = dst[magic - 1].vcn + dst[magic - 1].length; 427 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
424 return dst; 428 return dst;
425} 429}
426 430