aboutsummaryrefslogtreecommitdiffstats
path: root/fs/udf/balloc.c
diff options
context:
space:
mode:
authorMarcin Slusarz <marcin.slusarz@gmail.com>2008-02-08 07:20:30 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 12:22:34 -0500
commit6c79e987d629cb0f8f7e2983725f4434a2dec66b (patch)
treeea2e17a12a0bdc5d68d2fe49941bfdf249e0f647 /fs/udf/balloc.c
parent3a71fc5de56338076fe99f24f50bccfebabefe18 (diff)
udf: remove some ugly macros
remove macros: - UDF_SB_PARTMAPS - UDF_SB_PARTTYPE - UDF_SB_PARTROOT - UDF_SB_PARTLEN - UDF_SB_PARTVSN - UDF_SB_PARTNUM - UDF_SB_TYPESPAR - UDF_SB_TYPEVIRT - UDF_SB_PARTFUNC - UDF_SB_PARTFLAGS - UDF_SB_VOLIDENT - UDF_SB_NUMPARTS - UDF_SB_PARTITION - UDF_SB_SESSION - UDF_SB_ANCHOR - UDF_SB_LASTBLOCK - UDF_SB_LVIDBH - UDF_SB_LVID - UDF_SB_UMASK - UDF_SB_GID - UDF_SB_UID - UDF_SB_RECORDTIME - UDF_SB_SERIALNUM - UDF_SB_UDFREV - UDF_SB_FLAGS - UDF_SB_VAT - UDF_UPDATE_UDFREV - UDF_SB_FREE and open code them convert UDF_SB_LVIDIU macro to udf_sb_lvidiu function rename some struct udf_sb_info fields: - s_volident to s_volume_ident - s_lastblock to s_last_block - s_lvidbh to s_lvid_bh - s_recordtime to s_record_time - s_serialnum to s_serial_number; - s_vat to s_vat_inode; Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com> Cc: Ben Fennema <bfennema@falcon.csc.calpoly.edu> Cc: Jan Kara <jack@suse.cz> Acked-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/udf/balloc.c')
-rw-r--r--fs/udf/balloc.c136
1 files changed, 74 insertions, 62 deletions
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index ab26176f6b91..8c0c27912278 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -88,7 +88,7 @@ static int read_block_bitmap(struct super_block *sb,
88 kernel_lb_addr loc; 88 kernel_lb_addr loc;
89 89
90 loc.logicalBlockNum = bitmap->s_extPosition; 90 loc.logicalBlockNum = bitmap->s_extPosition;
91 loc.partitionReferenceNum = UDF_SB_PARTITION(sb); 91 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
92 92
93 bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); 93 bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
94 if (!bh) { 94 if (!bh) {
@@ -155,10 +155,10 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
155 155
156 mutex_lock(&sbi->s_alloc_mutex); 156 mutex_lock(&sbi->s_alloc_mutex);
157 if (bloc.logicalBlockNum < 0 || 157 if (bloc.logicalBlockNum < 0 ||
158 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { 158 (bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
159 udf_debug("%d < %d || %d + %d > %d\n", 159 udf_debug("%d < %d || %d + %d > %d\n",
160 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, 160 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
161 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); 161 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len);
162 goto error_return; 162 goto error_return;
163 } 163 }
164 164
@@ -188,9 +188,10 @@ do_more:
188 } else { 188 } else {
189 if (inode) 189 if (inode)
190 DQUOT_FREE_BLOCK(inode, 1); 190 DQUOT_FREE_BLOCK(inode, 1);
191 if (UDF_SB_LVIDBH(sb)) { 191 if (sbi->s_lvid_bh) {
192 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = 192 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
193 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1); 193 lvid->freeSpaceTable[sbi->s_partition] =
194 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + 1);
194 } 195 }
195 } 196 }
196 } 197 }
@@ -202,8 +203,8 @@ do_more:
202 } 203 }
203error_return: 204error_return:
204 sb->s_dirt = 1; 205 sb->s_dirt = 1;
205 if (UDF_SB_LVIDBH(sb)) 206 if (sbi->s_lvid_bh)
206 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 207 mark_buffer_dirty(sbi->s_lvid_bh);
207 mutex_unlock(&sbi->s_alloc_mutex); 208 mutex_unlock(&sbi->s_alloc_mutex);
208 return; 209 return;
209} 210}
@@ -219,16 +220,18 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
219 int bit, block, block_group, group_start; 220 int bit, block, block_group, group_start;
220 int nr_groups, bitmap_nr; 221 int nr_groups, bitmap_nr;
221 struct buffer_head *bh; 222 struct buffer_head *bh;
223 __u32 part_len;
222 224
223 mutex_lock(&sbi->s_alloc_mutex); 225 mutex_lock(&sbi->s_alloc_mutex);
224 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) 226 part_len = sbi->s_partmaps[partition].s_partition_len;
227 if (first_block < 0 || first_block >= part_len)
225 goto out; 228 goto out;
226 229
227 if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) 230 if (first_block + block_count > part_len)
228 block_count = UDF_SB_PARTLEN(sb, partition) - first_block; 231 block_count = part_len - first_block;
229 232
230repeat: 233repeat:
231 nr_groups = (UDF_SB_PARTLEN(sb, partition) + 234 nr_groups = (sbi->s_partmaps[partition].s_partition_len +
232 (sizeof(struct spaceBitmapDesc) << 3) + 235 (sizeof(struct spaceBitmapDesc) << 3) +
233 (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8); 236 (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
234 block = first_block + (sizeof(struct spaceBitmapDesc) << 3); 237 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
@@ -261,10 +264,11 @@ repeat:
261 if (block_count > 0) 264 if (block_count > 0)
262 goto repeat; 265 goto repeat;
263out: 266out:
264 if (UDF_SB_LVIDBH(sb)) { 267 if (sbi->s_lvid_bh) {
265 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 268 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
266 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count); 269 lvid->freeSpaceTable[partition] =
267 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 270 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
271 mark_buffer_dirty(sbi->s_lvid_bh);
268 } 272 }
269 sb->s_dirt = 1; 273 sb->s_dirt = 1;
270 mutex_unlock(&sbi->s_alloc_mutex); 274 mutex_unlock(&sbi->s_alloc_mutex);
@@ -287,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block *sb,
287 mutex_lock(&sbi->s_alloc_mutex); 291 mutex_lock(&sbi->s_alloc_mutex);
288 292
289repeat: 293repeat:
290 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 294 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
291 goal = 0; 295 goal = 0;
292 296
293 nr_groups = bitmap->s_nr_groups; 297 nr_groups = bitmap->s_nr_groups;
@@ -389,10 +393,11 @@ got_block:
389 393
390 mark_buffer_dirty(bh); 394 mark_buffer_dirty(bh);
391 395
392 if (UDF_SB_LVIDBH(sb)) { 396 if (sbi->s_lvid_bh) {
393 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 397 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
394 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1); 398 lvid->freeSpaceTable[partition] =
395 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 399 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
400 mark_buffer_dirty(sbi->s_lvid_bh);
396 } 401 }
397 sb->s_dirt = 1; 402 sb->s_dirt = 1;
398 mutex_unlock(&sbi->s_alloc_mutex); 403 mutex_unlock(&sbi->s_alloc_mutex);
@@ -421,10 +426,10 @@ static void udf_table_free_blocks(struct super_block *sb,
421 426
422 mutex_lock(&sbi->s_alloc_mutex); 427 mutex_lock(&sbi->s_alloc_mutex);
423 if (bloc.logicalBlockNum < 0 || 428 if (bloc.logicalBlockNum < 0 ||
424 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { 429 (bloc.logicalBlockNum + count) > sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
425 udf_debug("%d < %d || %d + %d > %d\n", 430 udf_debug("%d < %d || %d + %d > %d\n",
426 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, 431 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
427 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); 432 sbi->s_partmaps[bloc.partitionReferenceNum]->s_partition_len);
428 goto error_return; 433 goto error_return;
429 } 434 }
430 435
@@ -432,10 +437,11 @@ static void udf_table_free_blocks(struct super_block *sb,
432 but.. oh well */ 437 but.. oh well */
433 if (inode) 438 if (inode)
434 DQUOT_FREE_BLOCK(inode, count); 439 DQUOT_FREE_BLOCK(inode, count);
435 if (UDF_SB_LVIDBH(sb)) { 440 if (sbi->s_lvid_bh) {
436 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = 441 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
437 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count); 442 lvid->freeSpaceTable[sbi->s_partition] =
438 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 443 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + count);
444 mark_buffer_dirty(sbi->s_lvid_bh);
439 } 445 }
440 446
441 start = bloc.logicalBlockNum + offset; 447 start = bloc.logicalBlockNum + offset;
@@ -559,7 +565,7 @@ static void udf_table_free_blocks(struct super_block *sb,
559 } 565 }
560 epos.offset = sizeof(struct allocExtDesc); 566 epos.offset = sizeof(struct allocExtDesc);
561 } 567 }
562 if (UDF_SB_UDFREV(sb) >= 0x0200) 568 if (sbi->s_udfrev >= 0x0200)
563 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1, 569 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1,
564 epos.block.logicalBlockNum, sizeof(tag)); 570 epos.block.logicalBlockNum, sizeof(tag));
565 else 571 else
@@ -627,7 +633,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
627 struct extent_position epos; 633 struct extent_position epos;
628 int8_t etype = -1; 634 int8_t etype = -1;
629 635
630 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) 636 if (first_block < 0 || first_block >= sbi->s_partmaps[partition].s_partition_len)
631 return 0; 637 return 0;
632 638
633 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) 639 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
@@ -670,10 +676,11 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
670 676
671 brelse(epos.bh); 677 brelse(epos.bh);
672 678
673 if (alloc_count && UDF_SB_LVIDBH(sb)) { 679 if (alloc_count && sbi->s_lvid_bh) {
674 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 680 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
675 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count); 681 lvid->freeSpaceTable[partition] =
676 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 682 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
683 mark_buffer_dirty(sbi->s_lvid_bh);
677 sb->s_dirt = 1; 684 sb->s_dirt = 1;
678 } 685 }
679 mutex_unlock(&sbi->s_alloc_mutex); 686 mutex_unlock(&sbi->s_alloc_mutex);
@@ -703,7 +710,7 @@ static int udf_table_new_block(struct super_block *sb,
703 return newblock; 710 return newblock;
704 711
705 mutex_lock(&sbi->s_alloc_mutex); 712 mutex_lock(&sbi->s_alloc_mutex);
706 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 713 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
707 goal = 0; 714 goal = 0;
708 715
709 /* We search for the closest matching block to goal. If we find a exact hit, 716 /* We search for the closest matching block to goal. If we find a exact hit,
@@ -771,10 +778,11 @@ static int udf_table_new_block(struct super_block *sb,
771 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); 778 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
772 brelse(goal_epos.bh); 779 brelse(goal_epos.bh);
773 780
774 if (UDF_SB_LVIDBH(sb)) { 781 if (sbi->s_lvid_bh) {
775 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 782 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
776 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1); 783 lvid->freeSpaceTable[partition] =
777 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 784 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
785 mark_buffer_dirty(sbi->s_lvid_bh);
778 } 786 }
779 787
780 sb->s_dirt = 1; 788 sb->s_dirt = 1;
@@ -789,22 +797,23 @@ inline void udf_free_blocks(struct super_block *sb,
789 uint32_t count) 797 uint32_t count)
790{ 798{
791 uint16_t partition = bloc.partitionReferenceNum; 799 uint16_t partition = bloc.partitionReferenceNum;
800 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
792 801
793 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 802 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
794 return udf_bitmap_free_blocks(sb, inode, 803 return udf_bitmap_free_blocks(sb, inode,
795 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 804 map->s_uspace.s_bitmap,
796 bloc, offset, count); 805 bloc, offset, count);
797 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) { 806 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
798 return udf_table_free_blocks(sb, inode, 807 return udf_table_free_blocks(sb, inode,
799 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 808 map->s_uspace.s_table,
800 bloc, offset, count); 809 bloc, offset, count);
801 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 810 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
802 return udf_bitmap_free_blocks(sb, inode, 811 return udf_bitmap_free_blocks(sb, inode,
803 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 812 map->s_fspace.s_bitmap,
804 bloc, offset, count); 813 bloc, offset, count);
805 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 814 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
806 return udf_table_free_blocks(sb, inode, 815 return udf_table_free_blocks(sb, inode,
807 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 816 map->s_fspace.s_table,
808 bloc, offset, count); 817 bloc, offset, count);
809 } else { 818 } else {
810 return; 819 return;
@@ -816,21 +825,23 @@ inline int udf_prealloc_blocks(struct super_block *sb,
816 uint16_t partition, uint32_t first_block, 825 uint16_t partition, uint32_t first_block,
817 uint32_t block_count) 826 uint32_t block_count)
818{ 827{
819 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 828 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
829
830 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
820 return udf_bitmap_prealloc_blocks(sb, inode, 831 return udf_bitmap_prealloc_blocks(sb, inode,
821 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 832 map->s_uspace.s_bitmap,
822 partition, first_block, block_count); 833 partition, first_block, block_count);
823 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) { 834 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
824 return udf_table_prealloc_blocks(sb, inode, 835 return udf_table_prealloc_blocks(sb, inode,
825 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 836 map->s_uspace.s_table,
826 partition, first_block, block_count); 837 partition, first_block, block_count);
827 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 838 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
828 return udf_bitmap_prealloc_blocks(sb, inode, 839 return udf_bitmap_prealloc_blocks(sb, inode,
829 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 840 map->s_fspace.s_bitmap,
830 partition, first_block, block_count); 841 partition, first_block, block_count);
831 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 842 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
832 return udf_table_prealloc_blocks(sb, inode, 843 return udf_table_prealloc_blocks(sb, inode,
833 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 844 map->s_fspace.s_table,
834 partition, first_block, block_count); 845 partition, first_block, block_count);
835 } else { 846 } else {
836 return 0; 847 return 0;
@@ -842,23 +853,24 @@ inline int udf_new_block(struct super_block *sb,
842 uint16_t partition, uint32_t goal, int *err) 853 uint16_t partition, uint32_t goal, int *err)
843{ 854{
844 int ret; 855 int ret;
856 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
845 857
846 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { 858 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
847 ret = udf_bitmap_new_block(sb, inode, 859 ret = udf_bitmap_new_block(sb, inode,
848 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 860 map->s_uspace.s_bitmap,
849 partition, goal, err); 861 partition, goal, err);
850 return ret; 862 return ret;
851 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) { 863 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
852 return udf_table_new_block(sb, inode, 864 return udf_table_new_block(sb, inode,
853 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 865 map->s_uspace.s_table,
854 partition, goal, err); 866 partition, goal, err);
855 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { 867 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
856 return udf_bitmap_new_block(sb, inode, 868 return udf_bitmap_new_block(sb, inode,
857 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 869 map->s_fspace.s_bitmap,
858 partition, goal, err); 870 partition, goal, err);
859 } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { 871 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
860 return udf_table_new_block(sb, inode, 872 return udf_table_new_block(sb, inode,
861 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 873 map->s_fspace.s_table,
862 partition, goal, err); 874 partition, goal, err);
863 } else { 875 } else {
864 *err = -EIO; 876 *err = -EIO;