diff options
Diffstat (limited to 'fs/udf/balloc.c')
-rw-r--r-- | fs/udf/balloc.c | 709 |
1 files changed, 363 insertions, 346 deletions
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 4cec91015681..ef48d094dd2b 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c | |||
@@ -41,18 +41,17 @@ | |||
41 | #define uint(x) xuint(x) | 41 | #define uint(x) xuint(x) |
42 | #define xuint(x) __le ## x | 42 | #define xuint(x) __le ## x |
43 | 43 | ||
44 | static inline int find_next_one_bit (void * addr, int size, int offset) | 44 | static inline int find_next_one_bit(void *addr, int size, int offset) |
45 | { | 45 | { |
46 | uintBPL_t * p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG); | 46 | uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG); |
47 | int result = offset & ~(BITS_PER_LONG-1); | 47 | int result = offset & ~(BITS_PER_LONG - 1); |
48 | unsigned long tmp; | 48 | unsigned long tmp; |
49 | 49 | ||
50 | if (offset >= size) | 50 | if (offset >= size) |
51 | return size; | 51 | return size; |
52 | size -= result; | 52 | size -= result; |
53 | offset &= (BITS_PER_LONG-1); | 53 | offset &= (BITS_PER_LONG - 1); |
54 | if (offset) | 54 | if (offset) { |
55 | { | ||
56 | tmp = leBPL_to_cpup(p++); | 55 | tmp = leBPL_to_cpup(p++); |
57 | tmp &= ~0UL << offset; | 56 | tmp &= ~0UL << offset; |
58 | if (size < BITS_PER_LONG) | 57 | if (size < BITS_PER_LONG) |
@@ -62,8 +61,7 @@ static inline int find_next_one_bit (void * addr, int size, int offset) | |||
62 | size -= BITS_PER_LONG; | 61 | size -= BITS_PER_LONG; |
63 | result += BITS_PER_LONG; | 62 | result += BITS_PER_LONG; |
64 | } | 63 | } |
65 | while (size & ~(BITS_PER_LONG-1)) | 64 | while (size & ~(BITS_PER_LONG - 1)) { |
66 | { | ||
67 | if ((tmp = leBPL_to_cpup(p++))) | 65 | if ((tmp = leBPL_to_cpup(p++))) |
68 | goto found_middle; | 66 | goto found_middle; |
69 | result += BITS_PER_LONG; | 67 | result += BITS_PER_LONG; |
@@ -72,17 +70,18 @@ static inline int find_next_one_bit (void * addr, int size, int offset) | |||
72 | if (!size) | 70 | if (!size) |
73 | return result; | 71 | return result; |
74 | tmp = leBPL_to_cpup(p); | 72 | tmp = leBPL_to_cpup(p); |
75 | found_first: | 73 | found_first: |
76 | tmp &= ~0UL >> (BITS_PER_LONG-size); | 74 | tmp &= ~0UL >> (BITS_PER_LONG - size); |
77 | found_middle: | 75 | found_middle: |
78 | return result + ffz(~tmp); | 76 | return result + ffz(~tmp); |
79 | } | 77 | } |
80 | 78 | ||
81 | #define find_first_one_bit(addr, size)\ | 79 | #define find_first_one_bit(addr, size)\ |
82 | find_next_one_bit((addr), (size), 0) | 80 | find_next_one_bit((addr), (size), 0) |
83 | 81 | ||
84 | static int read_block_bitmap(struct super_block * sb, | 82 | static int read_block_bitmap(struct super_block *sb, |
85 | struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr) | 83 | struct udf_bitmap *bitmap, unsigned int block, |
84 | unsigned long bitmap_nr) | ||
86 | { | 85 | { |
87 | struct buffer_head *bh = NULL; | 86 | struct buffer_head *bh = NULL; |
88 | int retval = 0; | 87 | int retval = 0; |
@@ -92,38 +91,39 @@ static int read_block_bitmap(struct super_block * sb, | |||
92 | loc.partitionReferenceNum = UDF_SB_PARTITION(sb); | 91 | loc.partitionReferenceNum = UDF_SB_PARTITION(sb); |
93 | 92 | ||
94 | bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); | 93 | bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); |
95 | if (!bh) | 94 | if (!bh) { |
96 | { | ||
97 | retval = -EIO; | 95 | retval = -EIO; |
98 | } | 96 | } |
99 | bitmap->s_block_bitmap[bitmap_nr] = bh; | 97 | bitmap->s_block_bitmap[bitmap_nr] = bh; |
100 | return retval; | 98 | return retval; |
101 | } | 99 | } |
102 | 100 | ||
103 | static int __load_block_bitmap(struct super_block * sb, | 101 | static int __load_block_bitmap(struct super_block *sb, |
104 | struct udf_bitmap *bitmap, unsigned int block_group) | 102 | struct udf_bitmap *bitmap, |
103 | unsigned int block_group) | ||
105 | { | 104 | { |
106 | int retval = 0; | 105 | int retval = 0; |
107 | int nr_groups = bitmap->s_nr_groups; | 106 | int nr_groups = bitmap->s_nr_groups; |
108 | 107 | ||
109 | if (block_group >= nr_groups) | 108 | if (block_group >= nr_groups) { |
110 | { | 109 | udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, |
111 | udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, nr_groups); | 110 | nr_groups); |
112 | } | 111 | } |
113 | 112 | ||
114 | if (bitmap->s_block_bitmap[block_group]) | 113 | if (bitmap->s_block_bitmap[block_group]) |
115 | return block_group; | 114 | return block_group; |
116 | else | 115 | else { |
117 | { | 116 | retval = |
118 | retval = read_block_bitmap(sb, bitmap, block_group, block_group); | 117 | read_block_bitmap(sb, bitmap, block_group, block_group); |
119 | if (retval < 0) | 118 | if (retval < 0) |
120 | return retval; | 119 | return retval; |
121 | return block_group; | 120 | return block_group; |
122 | } | 121 | } |
123 | } | 122 | } |
124 | 123 | ||
125 | static inline int load_block_bitmap(struct super_block * sb, | 124 | static inline int load_block_bitmap(struct super_block *sb, |
126 | struct udf_bitmap *bitmap, unsigned int block_group) | 125 | struct udf_bitmap *bitmap, |
126 | unsigned int block_group) | ||
127 | { | 127 | { |
128 | int slot; | 128 | int slot; |
129 | 129 | ||
@@ -138,13 +138,14 @@ static inline int load_block_bitmap(struct super_block * sb, | |||
138 | return slot; | 138 | return slot; |
139 | } | 139 | } |
140 | 140 | ||
141 | static void udf_bitmap_free_blocks(struct super_block * sb, | 141 | static void udf_bitmap_free_blocks(struct super_block *sb, |
142 | struct inode * inode, | 142 | struct inode *inode, |
143 | struct udf_bitmap *bitmap, | 143 | struct udf_bitmap *bitmap, |
144 | kernel_lb_addr bloc, uint32_t offset, uint32_t count) | 144 | kernel_lb_addr bloc, uint32_t offset, |
145 | uint32_t count) | ||
145 | { | 146 | { |
146 | struct udf_sb_info *sbi = UDF_SB(sb); | 147 | struct udf_sb_info *sbi = UDF_SB(sb); |
147 | struct buffer_head * bh = NULL; | 148 | struct buffer_head *bh = NULL; |
148 | unsigned long block; | 149 | unsigned long block; |
149 | unsigned long block_group; | 150 | unsigned long block_group; |
150 | unsigned long bit; | 151 | unsigned long bit; |
@@ -154,17 +155,22 @@ static void udf_bitmap_free_blocks(struct super_block * sb, | |||
154 | 155 | ||
155 | mutex_lock(&sbi->s_alloc_mutex); | 156 | mutex_lock(&sbi->s_alloc_mutex); |
156 | if (bloc.logicalBlockNum < 0 || | 157 | if (bloc.logicalBlockNum < 0 || |
157 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) | 158 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, |
158 | { | 159 | bloc. |
159 | udf_debug("%d < %d || %d + %d > %d\n", | 160 | partitionReferenceNum)) |
160 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, | 161 | { |
161 | UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); | 162 | udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0, |
163 | bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb, | ||
164 | bloc. | ||
165 | partitionReferenceNum)); | ||
162 | goto error_return; | 166 | goto error_return; |
163 | } | 167 | } |
164 | 168 | ||
165 | block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3); | 169 | block = |
170 | bloc.logicalBlockNum + offset + | ||
171 | (sizeof(struct spaceBitmapDesc) << 3); | ||
166 | 172 | ||
167 | do_more: | 173 | do_more: |
168 | overflow = 0; | 174 | overflow = 0; |
169 | block_group = block >> (sb->s_blocksize_bits + 3); | 175 | block_group = block >> (sb->s_blocksize_bits + 3); |
170 | bit = block % (sb->s_blocksize << 3); | 176 | bit = block % (sb->s_blocksize << 3); |
@@ -172,8 +178,7 @@ do_more: | |||
172 | /* | 178 | /* |
173 | * Check to see if we are freeing blocks across a group boundary. | 179 | * Check to see if we are freeing blocks across a group boundary. |
174 | */ | 180 | */ |
175 | if (bit + count > (sb->s_blocksize << 3)) | 181 | if (bit + count > (sb->s_blocksize << 3)) { |
176 | { | ||
177 | overflow = bit + count - (sb->s_blocksize << 3); | 182 | overflow = bit + count - (sb->s_blocksize << 3); |
178 | count -= overflow; | 183 | count -= overflow; |
179 | } | 184 | } |
@@ -182,32 +187,31 @@ do_more: | |||
182 | goto error_return; | 187 | goto error_return; |
183 | 188 | ||
184 | bh = bitmap->s_block_bitmap[bitmap_nr]; | 189 | bh = bitmap->s_block_bitmap[bitmap_nr]; |
185 | for (i=0; i < count; i++) | 190 | for (i = 0; i < count; i++) { |
186 | { | 191 | if (udf_set_bit(bit + i, bh->b_data)) { |
187 | if (udf_set_bit(bit + i, bh->b_data)) | ||
188 | { | ||
189 | udf_debug("bit %ld already set\n", bit + i); | 192 | udf_debug("bit %ld already set\n", bit + i); |
190 | udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]); | 193 | udf_debug("byte=%2x\n", |
191 | } | 194 | ((char *)bh->b_data)[(bit + i) >> 3]); |
192 | else | 195 | } else { |
193 | { | ||
194 | if (inode) | 196 | if (inode) |
195 | DQUOT_FREE_BLOCK(inode, 1); | 197 | DQUOT_FREE_BLOCK(inode, 1); |
196 | if (UDF_SB_LVIDBH(sb)) | 198 | if (UDF_SB_LVIDBH(sb)) { |
197 | { | 199 | UDF_SB_LVID(sb)-> |
198 | UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = | 200 | freeSpaceTable[UDF_SB_PARTITION(sb)] = |
199 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+1); | 201 | cpu_to_le32(le32_to_cpu |
202 | (UDF_SB_LVID(sb)-> | ||
203 | freeSpaceTable[UDF_SB_PARTITION | ||
204 | (sb)]) + 1); | ||
200 | } | 205 | } |
201 | } | 206 | } |
202 | } | 207 | } |
203 | mark_buffer_dirty(bh); | 208 | mark_buffer_dirty(bh); |
204 | if (overflow) | 209 | if (overflow) { |
205 | { | ||
206 | block += count; | 210 | block += count; |
207 | count = overflow; | 211 | count = overflow; |
208 | goto do_more; | 212 | goto do_more; |
209 | } | 213 | } |
210 | error_return: | 214 | error_return: |
211 | sb->s_dirt = 1; | 215 | sb->s_dirt = 1; |
212 | if (UDF_SB_LVIDBH(sb)) | 216 | if (UDF_SB_LVIDBH(sb)) |
213 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 217 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
@@ -215,10 +219,11 @@ error_return: | |||
215 | return; | 219 | return; |
216 | } | 220 | } |
217 | 221 | ||
218 | static int udf_bitmap_prealloc_blocks(struct super_block * sb, | 222 | static int udf_bitmap_prealloc_blocks(struct super_block *sb, |
219 | struct inode * inode, | 223 | struct inode *inode, |
220 | struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block, | 224 | struct udf_bitmap *bitmap, |
221 | uint32_t block_count) | 225 | uint16_t partition, uint32_t first_block, |
226 | uint32_t block_count) | ||
222 | { | 227 | { |
223 | struct udf_sb_info *sbi = UDF_SB(sb); | 228 | struct udf_sb_info *sbi = UDF_SB(sb); |
224 | int alloc_count = 0; | 229 | int alloc_count = 0; |
@@ -233,9 +238,10 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb, | |||
233 | if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) | 238 | if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) |
234 | block_count = UDF_SB_PARTLEN(sb, partition) - first_block; | 239 | block_count = UDF_SB_PARTLEN(sb, partition) - first_block; |
235 | 240 | ||
236 | repeat: | 241 | repeat: |
237 | nr_groups = (UDF_SB_PARTLEN(sb, partition) + | 242 | nr_groups = (UDF_SB_PARTLEN(sb, partition) + |
238 | (sizeof(struct spaceBitmapDesc) << 3) + (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8); | 243 | (sizeof(struct spaceBitmapDesc) << 3) + |
244 | (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8); | ||
239 | block = first_block + (sizeof(struct spaceBitmapDesc) << 3); | 245 | block = first_block + (sizeof(struct spaceBitmapDesc) << 3); |
240 | block_group = block >> (sb->s_blocksize_bits + 3); | 246 | block_group = block >> (sb->s_blocksize_bits + 3); |
241 | group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); | 247 | group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); |
@@ -247,31 +253,30 @@ repeat: | |||
247 | 253 | ||
248 | bit = block % (sb->s_blocksize << 3); | 254 | bit = block % (sb->s_blocksize << 3); |
249 | 255 | ||
250 | while (bit < (sb->s_blocksize << 3) && block_count > 0) | 256 | while (bit < (sb->s_blocksize << 3) && block_count > 0) { |
251 | { | ||
252 | if (!udf_test_bit(bit, bh->b_data)) | 257 | if (!udf_test_bit(bit, bh->b_data)) |
253 | goto out; | 258 | goto out; |
254 | else if (DQUOT_PREALLOC_BLOCK(inode, 1)) | 259 | else if (DQUOT_PREALLOC_BLOCK(inode, 1)) |
255 | goto out; | 260 | goto out; |
256 | else if (!udf_clear_bit(bit, bh->b_data)) | 261 | else if (!udf_clear_bit(bit, bh->b_data)) { |
257 | { | ||
258 | udf_debug("bit already cleared for block %d\n", bit); | 262 | udf_debug("bit already cleared for block %d\n", bit); |
259 | DQUOT_FREE_BLOCK(inode, 1); | 263 | DQUOT_FREE_BLOCK(inode, 1); |
260 | goto out; | 264 | goto out; |
261 | } | 265 | } |
262 | block_count --; | 266 | block_count--; |
263 | alloc_count ++; | 267 | alloc_count++; |
264 | bit ++; | 268 | bit++; |
265 | block ++; | 269 | block++; |
266 | } | 270 | } |
267 | mark_buffer_dirty(bh); | 271 | mark_buffer_dirty(bh); |
268 | if (block_count > 0) | 272 | if (block_count > 0) |
269 | goto repeat; | 273 | goto repeat; |
270 | out: | 274 | out: |
271 | if (UDF_SB_LVIDBH(sb)) | 275 | if (UDF_SB_LVIDBH(sb)) { |
272 | { | ||
273 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = | 276 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = |
274 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count); | 277 | cpu_to_le32(le32_to_cpu |
278 | (UDF_SB_LVID(sb)->freeSpaceTable[partition]) - | ||
279 | alloc_count); | ||
275 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 280 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
276 | } | 281 | } |
277 | sb->s_dirt = 1; | 282 | sb->s_dirt = 1; |
@@ -279,12 +284,13 @@ out: | |||
279 | return alloc_count; | 284 | return alloc_count; |
280 | } | 285 | } |
281 | 286 | ||
282 | static int udf_bitmap_new_block(struct super_block * sb, | 287 | static int udf_bitmap_new_block(struct super_block *sb, |
283 | struct inode * inode, | 288 | struct inode *inode, |
284 | struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err) | 289 | struct udf_bitmap *bitmap, uint16_t partition, |
290 | uint32_t goal, int *err) | ||
285 | { | 291 | { |
286 | struct udf_sb_info *sbi = UDF_SB(sb); | 292 | struct udf_sb_info *sbi = UDF_SB(sb); |
287 | int newbit, bit=0, block, block_group, group_start; | 293 | int newbit, bit = 0, block, block_group, group_start; |
288 | int end_goal, nr_groups, bitmap_nr, i; | 294 | int end_goal, nr_groups, bitmap_nr, i; |
289 | struct buffer_head *bh = NULL; | 295 | struct buffer_head *bh = NULL; |
290 | char *ptr; | 296 | char *ptr; |
@@ -293,7 +299,7 @@ static int udf_bitmap_new_block(struct super_block * sb, | |||
293 | *err = -ENOSPC; | 299 | *err = -ENOSPC; |
294 | mutex_lock(&sbi->s_alloc_mutex); | 300 | mutex_lock(&sbi->s_alloc_mutex); |
295 | 301 | ||
296 | repeat: | 302 | repeat: |
297 | if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) | 303 | if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) |
298 | goal = 0; | 304 | goal = 0; |
299 | 305 | ||
@@ -306,38 +312,39 @@ repeat: | |||
306 | if (bitmap_nr < 0) | 312 | if (bitmap_nr < 0) |
307 | goto error_return; | 313 | goto error_return; |
308 | bh = bitmap->s_block_bitmap[bitmap_nr]; | 314 | bh = bitmap->s_block_bitmap[bitmap_nr]; |
309 | ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start); | 315 | ptr = |
316 | memscan((char *)bh->b_data + group_start, 0xFF, | ||
317 | sb->s_blocksize - group_start); | ||
310 | 318 | ||
311 | if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) | 319 | if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { |
312 | { | ||
313 | bit = block % (sb->s_blocksize << 3); | 320 | bit = block % (sb->s_blocksize << 3); |
314 | 321 | ||
315 | if (udf_test_bit(bit, bh->b_data)) | 322 | if (udf_test_bit(bit, bh->b_data)) { |
316 | { | ||
317 | goto got_block; | 323 | goto got_block; |
318 | } | 324 | } |
319 | end_goal = (bit + 63) & ~63; | 325 | end_goal = (bit + 63) & ~63; |
320 | bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); | 326 | bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); |
321 | if (bit < end_goal) | 327 | if (bit < end_goal) |
322 | goto got_block; | 328 | goto got_block; |
323 | ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3)); | 329 | ptr = |
330 | memscan((char *)bh->b_data + (bit >> 3), 0xFF, | ||
331 | sb->s_blocksize - ((bit + 7) >> 3)); | ||
324 | newbit = (ptr - ((char *)bh->b_data)) << 3; | 332 | newbit = (ptr - ((char *)bh->b_data)) << 3; |
325 | if (newbit < sb->s_blocksize << 3) | 333 | if (newbit < sb->s_blocksize << 3) { |
326 | { | ||
327 | bit = newbit; | 334 | bit = newbit; |
328 | goto search_back; | 335 | goto search_back; |
329 | } | 336 | } |
330 | newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit); | 337 | newbit = |
331 | if (newbit < sb->s_blocksize << 3) | 338 | udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, |
332 | { | 339 | bit); |
340 | if (newbit < sb->s_blocksize << 3) { | ||
333 | bit = newbit; | 341 | bit = newbit; |
334 | goto got_block; | 342 | goto got_block; |
335 | } | 343 | } |
336 | } | 344 | } |
337 | 345 | ||
338 | for (i=0; i<(nr_groups*2); i++) | 346 | for (i = 0; i < (nr_groups * 2); i++) { |
339 | { | 347 | block_group++; |
340 | block_group ++; | ||
341 | if (block_group >= nr_groups) | 348 | if (block_group >= nr_groups) |
342 | block_group = 0; | 349 | block_group = 0; |
343 | group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); | 350 | group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); |
@@ -346,67 +353,69 @@ repeat: | |||
346 | if (bitmap_nr < 0) | 353 | if (bitmap_nr < 0) |
347 | goto error_return; | 354 | goto error_return; |
348 | bh = bitmap->s_block_bitmap[bitmap_nr]; | 355 | bh = bitmap->s_block_bitmap[bitmap_nr]; |
349 | if (i < nr_groups) | 356 | if (i < nr_groups) { |
350 | { | 357 | ptr = |
351 | ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start); | 358 | memscan((char *)bh->b_data + group_start, 0xFF, |
352 | if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) | 359 | sb->s_blocksize - group_start); |
353 | { | 360 | if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { |
354 | bit = (ptr - ((char *)bh->b_data)) << 3; | 361 | bit = (ptr - ((char *)bh->b_data)) << 3; |
355 | break; | 362 | break; |
356 | } | 363 | } |
357 | } | 364 | } else { |
358 | else | 365 | bit = |
359 | { | 366 | udf_find_next_one_bit((char *)bh->b_data, |
360 | bit = udf_find_next_one_bit((char *)bh->b_data, sb->s_blocksize << 3, group_start << 3); | 367 | sb->s_blocksize << 3, |
368 | group_start << 3); | ||
361 | if (bit < sb->s_blocksize << 3) | 369 | if (bit < sb->s_blocksize << 3) |
362 | break; | 370 | break; |
363 | } | 371 | } |
364 | } | 372 | } |
365 | if (i >= (nr_groups*2)) | 373 | if (i >= (nr_groups * 2)) { |
366 | { | ||
367 | mutex_unlock(&sbi->s_alloc_mutex); | 374 | mutex_unlock(&sbi->s_alloc_mutex); |
368 | return newblock; | 375 | return newblock; |
369 | } | 376 | } |
370 | if (bit < sb->s_blocksize << 3) | 377 | if (bit < sb->s_blocksize << 3) |
371 | goto search_back; | 378 | goto search_back; |
372 | else | 379 | else |
373 | bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); | 380 | bit = |
374 | if (bit >= sb->s_blocksize << 3) | 381 | udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, |
375 | { | 382 | group_start << 3); |
383 | if (bit >= sb->s_blocksize << 3) { | ||
376 | mutex_unlock(&sbi->s_alloc_mutex); | 384 | mutex_unlock(&sbi->s_alloc_mutex); |
377 | return 0; | 385 | return 0; |
378 | } | 386 | } |
379 | 387 | ||
380 | search_back: | 388 | search_back: |
381 | for (i=0; i<7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--); | 389 | for (i = 0; |
390 | i < 7 && bit > (group_start << 3) | ||
391 | && udf_test_bit(bit - 1, bh->b_data); i++, bit--) ; | ||
382 | 392 | ||
383 | got_block: | 393 | got_block: |
384 | 394 | ||
385 | /* | 395 | /* |
386 | * Check quota for allocation of this block. | 396 | * Check quota for allocation of this block. |
387 | */ | 397 | */ |
388 | if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) | 398 | if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { |
389 | { | ||
390 | mutex_unlock(&sbi->s_alloc_mutex); | 399 | mutex_unlock(&sbi->s_alloc_mutex); |
391 | *err = -EDQUOT; | 400 | *err = -EDQUOT; |
392 | return 0; | 401 | return 0; |
393 | } | 402 | } |
394 | 403 | ||
395 | newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - | 404 | newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - |
396 | (sizeof(struct spaceBitmapDesc) << 3); | 405 | (sizeof(struct spaceBitmapDesc) << 3); |
397 | 406 | ||
398 | if (!udf_clear_bit(bit, bh->b_data)) | 407 | if (!udf_clear_bit(bit, bh->b_data)) { |
399 | { | ||
400 | udf_debug("bit already cleared for block %d\n", bit); | 408 | udf_debug("bit already cleared for block %d\n", bit); |
401 | goto repeat; | 409 | goto repeat; |
402 | } | 410 | } |
403 | 411 | ||
404 | mark_buffer_dirty(bh); | 412 | mark_buffer_dirty(bh); |
405 | 413 | ||
406 | if (UDF_SB_LVIDBH(sb)) | 414 | if (UDF_SB_LVIDBH(sb)) { |
407 | { | ||
408 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = | 415 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = |
409 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1); | 416 | cpu_to_le32(le32_to_cpu |
417 | (UDF_SB_LVID(sb)->freeSpaceTable[partition]) - | ||
418 | 1); | ||
410 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 419 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
411 | } | 420 | } |
412 | sb->s_dirt = 1; | 421 | sb->s_dirt = 1; |
@@ -414,16 +423,17 @@ got_block: | |||
414 | *err = 0; | 423 | *err = 0; |
415 | return newblock; | 424 | return newblock; |
416 | 425 | ||
417 | error_return: | 426 | error_return: |
418 | *err = -EIO; | 427 | *err = -EIO; |
419 | mutex_unlock(&sbi->s_alloc_mutex); | 428 | mutex_unlock(&sbi->s_alloc_mutex); |
420 | return 0; | 429 | return 0; |
421 | } | 430 | } |
422 | 431 | ||
423 | static void udf_table_free_blocks(struct super_block * sb, | 432 | static void udf_table_free_blocks(struct super_block *sb, |
424 | struct inode * inode, | 433 | struct inode *inode, |
425 | struct inode * table, | 434 | struct inode *table, |
426 | kernel_lb_addr bloc, uint32_t offset, uint32_t count) | 435 | kernel_lb_addr bloc, uint32_t offset, |
436 | uint32_t count) | ||
427 | { | 437 | { |
428 | struct udf_sb_info *sbi = UDF_SB(sb); | 438 | struct udf_sb_info *sbi = UDF_SB(sb); |
429 | uint32_t start, end; | 439 | uint32_t start, end; |
@@ -435,11 +445,14 @@ static void udf_table_free_blocks(struct super_block * sb, | |||
435 | 445 | ||
436 | mutex_lock(&sbi->s_alloc_mutex); | 446 | mutex_lock(&sbi->s_alloc_mutex); |
437 | if (bloc.logicalBlockNum < 0 || | 447 | if (bloc.logicalBlockNum < 0 || |
438 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) | 448 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, |
439 | { | 449 | bloc. |
440 | udf_debug("%d < %d || %d + %d > %d\n", | 450 | partitionReferenceNum)) |
441 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, | 451 | { |
442 | UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); | 452 | udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0, |
453 | bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb, | ||
454 | bloc. | ||
455 | partitionReferenceNum)); | ||
443 | goto error_return; | 456 | goto error_return; |
444 | } | 457 | } |
445 | 458 | ||
@@ -447,10 +460,11 @@ static void udf_table_free_blocks(struct super_block * sb, | |||
447 | but.. oh well */ | 460 | but.. oh well */ |
448 | if (inode) | 461 | if (inode) |
449 | DQUOT_FREE_BLOCK(inode, count); | 462 | DQUOT_FREE_BLOCK(inode, count); |
450 | if (UDF_SB_LVIDBH(sb)) | 463 | if (UDF_SB_LVIDBH(sb)) { |
451 | { | ||
452 | UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = | 464 | UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = |
453 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+count); | 465 | cpu_to_le32(le32_to_cpu |
466 | (UDF_SB_LVID(sb)-> | ||
467 | freeSpaceTable[UDF_SB_PARTITION(sb)]) + count); | ||
454 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 468 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
455 | } | 469 | } |
456 | 470 | ||
@@ -463,73 +477,75 @@ static void udf_table_free_blocks(struct super_block * sb, | |||
463 | epos.bh = oepos.bh = NULL; | 477 | epos.bh = oepos.bh = NULL; |
464 | 478 | ||
465 | while (count && (etype = | 479 | while (count && (etype = |
466 | udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) | 480 | udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { |
467 | { | ||
468 | if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == | 481 | if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == |
469 | start)) | 482 | start)) { |
470 | { | 483 | if ((0x3FFFFFFF - elen) < |
471 | if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) | 484 | (count << sb->s_blocksize_bits)) { |
472 | { | 485 | count -= |
473 | count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 486 | ((0x3FFFFFFF - |
474 | start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 487 | elen) >> sb->s_blocksize_bits); |
475 | elen = (etype << 30) | (0x40000000 - sb->s_blocksize); | 488 | start += |
476 | } | 489 | ((0x3FFFFFFF - |
477 | else | 490 | elen) >> sb->s_blocksize_bits); |
478 | { | 491 | elen = |
492 | (etype << 30) | (0x40000000 - | ||
493 | sb->s_blocksize); | ||
494 | } else { | ||
479 | elen = (etype << 30) | | 495 | elen = (etype << 30) | |
480 | (elen + (count << sb->s_blocksize_bits)); | 496 | (elen + (count << sb->s_blocksize_bits)); |
481 | start += count; | 497 | start += count; |
482 | count = 0; | 498 | count = 0; |
483 | } | 499 | } |
484 | udf_write_aext(table, &oepos, eloc, elen, 1); | 500 | udf_write_aext(table, &oepos, eloc, elen, 1); |
485 | } | 501 | } else if (eloc.logicalBlockNum == (end + 1)) { |
486 | else if (eloc.logicalBlockNum == (end + 1)) | 502 | if ((0x3FFFFFFF - elen) < |
487 | { | 503 | (count << sb->s_blocksize_bits)) { |
488 | if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) | 504 | count -= |
489 | { | 505 | ((0x3FFFFFFF - |
490 | count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 506 | elen) >> sb->s_blocksize_bits); |
491 | end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 507 | end -= |
508 | ((0x3FFFFFFF - | ||
509 | elen) >> sb->s_blocksize_bits); | ||
492 | eloc.logicalBlockNum -= | 510 | eloc.logicalBlockNum -= |
493 | ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 511 | ((0x3FFFFFFF - |
494 | elen = (etype << 30) | (0x40000000 - sb->s_blocksize); | 512 | elen) >> sb->s_blocksize_bits); |
495 | } | 513 | elen = |
496 | else | 514 | (etype << 30) | (0x40000000 - |
497 | { | 515 | sb->s_blocksize); |
516 | } else { | ||
498 | eloc.logicalBlockNum = start; | 517 | eloc.logicalBlockNum = start; |
499 | elen = (etype << 30) | | 518 | elen = (etype << 30) | |
500 | (elen + (count << sb->s_blocksize_bits)); | 519 | (elen + (count << sb->s_blocksize_bits)); |
501 | end -= count; | 520 | end -= count; |
502 | count = 0; | 521 | count = 0; |
503 | } | 522 | } |
504 | udf_write_aext(table, &oepos, eloc, elen, 1); | 523 | udf_write_aext(table, &oepos, eloc, elen, 1); |
505 | } | 524 | } |
506 | 525 | ||
507 | if (epos.bh != oepos.bh) | 526 | if (epos.bh != oepos.bh) { |
508 | { | ||
509 | i = -1; | 527 | i = -1; |
510 | oepos.block = epos.block; | 528 | oepos.block = epos.block; |
511 | brelse(oepos.bh); | 529 | brelse(oepos.bh); |
512 | get_bh(epos.bh); | 530 | get_bh(epos.bh); |
513 | oepos.bh = epos.bh; | 531 | oepos.bh = epos.bh; |
514 | oepos.offset = 0; | 532 | oepos.offset = 0; |
515 | } | 533 | } else |
516 | else | ||
517 | oepos.offset = epos.offset; | 534 | oepos.offset = epos.offset; |
518 | } | 535 | } |
519 | 536 | ||
520 | if (count) | 537 | if (count) { |
521 | { | ||
522 | /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate | 538 | /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate |
523 | a new block, and since we hold the super block lock already | 539 | a new block, and since we hold the super block lock already |
524 | very bad things would happen :) | 540 | very bad things would happen :) |
525 | 541 | ||
526 | We copy the behavior of udf_add_aext, but instead of | 542 | We copy the behavior of udf_add_aext, but instead of |
527 | trying to allocate a new block close to the existing one, | 543 | trying to allocate a new block close to the existing one, |
528 | we just steal a block from the extent we are trying to add. | 544 | we just steal a block from the extent we are trying to add. |
529 | 545 | ||
530 | It would be nice if the blocks were close together, but it | 546 | It would be nice if the blocks were close together, but it |
531 | isn't required. | 547 | isn't required. |
532 | */ | 548 | */ |
533 | 549 | ||
534 | int adsize; | 550 | int adsize; |
535 | short_ad *sad = NULL; | 551 | short_ad *sad = NULL; |
@@ -537,121 +553,124 @@ static void udf_table_free_blocks(struct super_block * sb, | |||
537 | struct allocExtDesc *aed; | 553 | struct allocExtDesc *aed; |
538 | 554 | ||
539 | eloc.logicalBlockNum = start; | 555 | eloc.logicalBlockNum = start; |
540 | elen = EXT_RECORDED_ALLOCATED | | 556 | elen = EXT_RECORDED_ALLOCATED | (count << sb->s_blocksize_bits); |
541 | (count << sb->s_blocksize_bits); | ||
542 | 557 | ||
543 | if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) | 558 | if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) |
544 | adsize = sizeof(short_ad); | 559 | adsize = sizeof(short_ad); |
545 | else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) | 560 | else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) |
546 | adsize = sizeof(long_ad); | 561 | adsize = sizeof(long_ad); |
547 | else | 562 | else { |
548 | { | ||
549 | brelse(oepos.bh); | 563 | brelse(oepos.bh); |
550 | brelse(epos.bh); | 564 | brelse(epos.bh); |
551 | goto error_return; | 565 | goto error_return; |
552 | } | 566 | } |
553 | 567 | ||
554 | if (epos.offset + (2 * adsize) > sb->s_blocksize) | 568 | if (epos.offset + (2 * adsize) > sb->s_blocksize) { |
555 | { | ||
556 | char *sptr, *dptr; | 569 | char *sptr, *dptr; |
557 | int loffset; | 570 | int loffset; |
558 | 571 | ||
559 | brelse(oepos.bh); | 572 | brelse(oepos.bh); |
560 | oepos = epos; | 573 | oepos = epos; |
561 | 574 | ||
562 | /* Steal a block from the extent being free'd */ | 575 | /* Steal a block from the extent being free'd */ |
563 | epos.block.logicalBlockNum = eloc.logicalBlockNum; | 576 | epos.block.logicalBlockNum = eloc.logicalBlockNum; |
564 | eloc.logicalBlockNum ++; | 577 | eloc.logicalBlockNum++; |
565 | elen -= sb->s_blocksize; | 578 | elen -= sb->s_blocksize; |
566 | 579 | ||
567 | if (!(epos.bh = udf_tread(sb, | 580 | if (!(epos.bh = udf_tread(sb, |
568 | udf_get_lb_pblock(sb, epos.block, 0)))) | 581 | udf_get_lb_pblock(sb, |
569 | { | 582 | epos.block, |
583 | 0)))) { | ||
570 | brelse(oepos.bh); | 584 | brelse(oepos.bh); |
571 | goto error_return; | 585 | goto error_return; |
572 | } | 586 | } |
573 | aed = (struct allocExtDesc *)(epos.bh->b_data); | 587 | aed = (struct allocExtDesc *)(epos.bh->b_data); |
574 | aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum); | 588 | aed->previousAllocExtLocation = |
575 | if (epos.offset + adsize > sb->s_blocksize) | 589 | cpu_to_le32(oepos.block.logicalBlockNum); |
576 | { | 590 | if (epos.offset + adsize > sb->s_blocksize) { |
577 | loffset = epos.offset; | 591 | loffset = epos.offset; |
578 | aed->lengthAllocDescs = cpu_to_le32(adsize); | 592 | aed->lengthAllocDescs = cpu_to_le32(adsize); |
579 | sptr = UDF_I_DATA(inode) + epos.offset - | 593 | sptr = UDF_I_DATA(inode) + epos.offset - |
580 | udf_file_entry_alloc_offset(inode) + | 594 | udf_file_entry_alloc_offset(inode) + |
581 | UDF_I_LENEATTR(inode) - adsize; | 595 | UDF_I_LENEATTR(inode) - adsize; |
582 | dptr = epos.bh->b_data + sizeof(struct allocExtDesc); | 596 | dptr = |
597 | epos.bh->b_data + | ||
598 | sizeof(struct allocExtDesc); | ||
583 | memcpy(dptr, sptr, adsize); | 599 | memcpy(dptr, sptr, adsize); |
584 | epos.offset = sizeof(struct allocExtDesc) + adsize; | 600 | epos.offset = |
585 | } | 601 | sizeof(struct allocExtDesc) + adsize; |
586 | else | 602 | } else { |
587 | { | ||
588 | loffset = epos.offset + adsize; | 603 | loffset = epos.offset + adsize; |
589 | aed->lengthAllocDescs = cpu_to_le32(0); | 604 | aed->lengthAllocDescs = cpu_to_le32(0); |
590 | sptr = oepos.bh->b_data + epos.offset; | 605 | sptr = oepos.bh->b_data + epos.offset; |
591 | epos.offset = sizeof(struct allocExtDesc); | 606 | epos.offset = sizeof(struct allocExtDesc); |
592 | 607 | ||
593 | if (oepos.bh) | 608 | if (oepos.bh) { |
594 | { | 609 | aed = |
595 | aed = (struct allocExtDesc *)oepos.bh->b_data; | 610 | (struct allocExtDesc *)oepos.bh-> |
611 | b_data; | ||
596 | aed->lengthAllocDescs = | 612 | aed->lengthAllocDescs = |
597 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); | 613 | cpu_to_le32(le32_to_cpu |
598 | } | 614 | (aed-> |
599 | else | 615 | lengthAllocDescs) + |
600 | { | 616 | adsize); |
617 | } else { | ||
601 | UDF_I_LENALLOC(table) += adsize; | 618 | UDF_I_LENALLOC(table) += adsize; |
602 | mark_inode_dirty(table); | 619 | mark_inode_dirty(table); |
603 | } | 620 | } |
604 | } | 621 | } |
605 | if (UDF_SB_UDFREV(sb) >= 0x0200) | 622 | if (UDF_SB_UDFREV(sb) >= 0x0200) |
606 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1, | 623 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, |
607 | epos.block.logicalBlockNum, sizeof(tag)); | 624 | 1, epos.block.logicalBlockNum, |
625 | sizeof(tag)); | ||
608 | else | 626 | else |
609 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1, | 627 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, |
610 | epos.block.logicalBlockNum, sizeof(tag)); | 628 | 1, epos.block.logicalBlockNum, |
611 | switch (UDF_I_ALLOCTYPE(table)) | 629 | sizeof(tag)); |
612 | { | 630 | switch (UDF_I_ALLOCTYPE(table)) { |
613 | case ICBTAG_FLAG_AD_SHORT: | 631 | case ICBTAG_FLAG_AD_SHORT: |
614 | { | 632 | { |
615 | sad = (short_ad *)sptr; | 633 | sad = (short_ad *) sptr; |
616 | sad->extLength = cpu_to_le32( | 634 | sad->extLength = |
617 | EXT_NEXT_EXTENT_ALLOCDECS | | 635 | cpu_to_le32 |
618 | sb->s_blocksize); | 636 | (EXT_NEXT_EXTENT_ALLOCDECS | sb-> |
619 | sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum); | 637 | s_blocksize); |
638 | sad->extPosition = | ||
639 | cpu_to_le32(epos.block. | ||
640 | logicalBlockNum); | ||
620 | break; | 641 | break; |
621 | } | 642 | } |
622 | case ICBTAG_FLAG_AD_LONG: | 643 | case ICBTAG_FLAG_AD_LONG: |
623 | { | 644 | { |
624 | lad = (long_ad *)sptr; | 645 | lad = (long_ad *) sptr; |
625 | lad->extLength = cpu_to_le32( | 646 | lad->extLength = |
626 | EXT_NEXT_EXTENT_ALLOCDECS | | 647 | cpu_to_le32 |
627 | sb->s_blocksize); | 648 | (EXT_NEXT_EXTENT_ALLOCDECS | sb-> |
628 | lad->extLocation = cpu_to_lelb(epos.block); | 649 | s_blocksize); |
650 | lad->extLocation = | ||
651 | cpu_to_lelb(epos.block); | ||
629 | break; | 652 | break; |
630 | } | 653 | } |
631 | } | 654 | } |
632 | if (oepos.bh) | 655 | if (oepos.bh) { |
633 | { | ||
634 | udf_update_tag(oepos.bh->b_data, loffset); | 656 | udf_update_tag(oepos.bh->b_data, loffset); |
635 | mark_buffer_dirty(oepos.bh); | 657 | mark_buffer_dirty(oepos.bh); |
636 | } | 658 | } else |
637 | else | ||
638 | mark_inode_dirty(table); | 659 | mark_inode_dirty(table); |
639 | } | 660 | } |
640 | 661 | ||
641 | if (elen) /* It's possible that stealing the block emptied the extent */ | 662 | if (elen) { /* It's possible that stealing the block emptied the extent */ |
642 | { | ||
643 | udf_write_aext(table, &epos, eloc, elen, 1); | 663 | udf_write_aext(table, &epos, eloc, elen, 1); |
644 | 664 | ||
645 | if (!epos.bh) | 665 | if (!epos.bh) { |
646 | { | ||
647 | UDF_I_LENALLOC(table) += adsize; | 666 | UDF_I_LENALLOC(table) += adsize; |
648 | mark_inode_dirty(table); | 667 | mark_inode_dirty(table); |
649 | } | 668 | } else { |
650 | else | ||
651 | { | ||
652 | aed = (struct allocExtDesc *)epos.bh->b_data; | 669 | aed = (struct allocExtDesc *)epos.bh->b_data; |
653 | aed->lengthAllocDescs = | 670 | aed->lengthAllocDescs = |
654 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); | 671 | cpu_to_le32(le32_to_cpu |
672 | (aed->lengthAllocDescs) + | ||
673 | adsize); | ||
655 | udf_update_tag(epos.bh->b_data, epos.offset); | 674 | udf_update_tag(epos.bh->b_data, epos.offset); |
656 | mark_buffer_dirty(epos.bh); | 675 | mark_buffer_dirty(epos.bh); |
657 | } | 676 | } |
@@ -661,16 +680,16 @@ static void udf_table_free_blocks(struct super_block * sb, | |||
661 | brelse(epos.bh); | 680 | brelse(epos.bh); |
662 | brelse(oepos.bh); | 681 | brelse(oepos.bh); |
663 | 682 | ||
664 | error_return: | 683 | error_return: |
665 | sb->s_dirt = 1; | 684 | sb->s_dirt = 1; |
666 | mutex_unlock(&sbi->s_alloc_mutex); | 685 | mutex_unlock(&sbi->s_alloc_mutex); |
667 | return; | 686 | return; |
668 | } | 687 | } |
669 | 688 | ||
670 | static int udf_table_prealloc_blocks(struct super_block * sb, | 689 | static int udf_table_prealloc_blocks(struct super_block *sb, |
671 | struct inode * inode, | 690 | struct inode *inode, |
672 | struct inode *table, uint16_t partition, uint32_t first_block, | 691 | struct inode *table, uint16_t partition, |
673 | uint32_t block_count) | 692 | uint32_t first_block, uint32_t block_count) |
674 | { | 693 | { |
675 | struct udf_sb_info *sbi = UDF_SB(sb); | 694 | struct udf_sb_info *sbi = UDF_SB(sb); |
676 | int alloc_count = 0; | 695 | int alloc_count = 0; |
@@ -696,39 +715,46 @@ static int udf_table_prealloc_blocks(struct super_block * sb, | |||
696 | eloc.logicalBlockNum = 0xFFFFFFFF; | 715 | eloc.logicalBlockNum = 0xFFFFFFFF; |
697 | 716 | ||
698 | while (first_block != eloc.logicalBlockNum && (etype = | 717 | while (first_block != eloc.logicalBlockNum && (etype = |
699 | udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) | 718 | udf_next_aext(table, |
700 | { | 719 | &epos, |
720 | &eloc, | ||
721 | &elen, | ||
722 | 1)) != | ||
723 | -1) { | ||
701 | udf_debug("eloc=%d, elen=%d, first_block=%d\n", | 724 | udf_debug("eloc=%d, elen=%d, first_block=%d\n", |
702 | eloc.logicalBlockNum, elen, first_block); | 725 | eloc.logicalBlockNum, elen, first_block); |
703 | ; /* empty loop body */ | 726 | ; /* empty loop body */ |
704 | } | 727 | } |
705 | 728 | ||
706 | if (first_block == eloc.logicalBlockNum) | 729 | if (first_block == eloc.logicalBlockNum) { |
707 | { | ||
708 | epos.offset -= adsize; | 730 | epos.offset -= adsize; |
709 | 731 | ||
710 | alloc_count = (elen >> sb->s_blocksize_bits); | 732 | alloc_count = (elen >> sb->s_blocksize_bits); |
711 | if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) | 733 | if (inode |
734 | && DQUOT_PREALLOC_BLOCK(inode, | ||
735 | alloc_count > | ||
736 | block_count ? block_count : | ||
737 | alloc_count)) | ||
712 | alloc_count = 0; | 738 | alloc_count = 0; |
713 | else if (alloc_count > block_count) | 739 | else if (alloc_count > block_count) { |
714 | { | ||
715 | alloc_count = block_count; | 740 | alloc_count = block_count; |
716 | eloc.logicalBlockNum += alloc_count; | 741 | eloc.logicalBlockNum += alloc_count; |
717 | elen -= (alloc_count << sb->s_blocksize_bits); | 742 | elen -= (alloc_count << sb->s_blocksize_bits); |
718 | udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1); | 743 | udf_write_aext(table, &epos, eloc, (etype << 30) | elen, |
719 | } | 744 | 1); |
720 | else | 745 | } else |
721 | udf_delete_aext(table, epos, eloc, (etype << 30) | elen); | 746 | udf_delete_aext(table, epos, eloc, |
722 | } | 747 | (etype << 30) | elen); |
723 | else | 748 | } else |
724 | alloc_count = 0; | 749 | alloc_count = 0; |
725 | 750 | ||
726 | brelse(epos.bh); | 751 | brelse(epos.bh); |
727 | 752 | ||
728 | if (alloc_count && UDF_SB_LVIDBH(sb)) | 753 | if (alloc_count && UDF_SB_LVIDBH(sb)) { |
729 | { | ||
730 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = | 754 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = |
731 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count); | 755 | cpu_to_le32(le32_to_cpu |
756 | (UDF_SB_LVID(sb)->freeSpaceTable[partition]) - | ||
757 | alloc_count); | ||
732 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 758 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
733 | sb->s_dirt = 1; | 759 | sb->s_dirt = 1; |
734 | } | 760 | } |
@@ -736,9 +762,10 @@ static int udf_table_prealloc_blocks(struct super_block * sb, | |||
736 | return alloc_count; | 762 | return alloc_count; |
737 | } | 763 | } |
738 | 764 | ||
739 | static int udf_table_new_block(struct super_block * sb, | 765 | static int udf_table_new_block(struct super_block *sb, |
740 | struct inode * inode, | 766 | struct inode *inode, |
741 | struct inode *table, uint16_t partition, uint32_t goal, int *err) | 767 | struct inode *table, uint16_t partition, |
768 | uint32_t goal, int *err) | ||
742 | { | 769 | { |
743 | struct udf_sb_info *sbi = UDF_SB(sb); | 770 | struct udf_sb_info *sbi = UDF_SB(sb); |
744 | uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF; | 771 | uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF; |
@@ -765,30 +792,27 @@ static int udf_table_new_block(struct super_block * sb, | |||
765 | we stop. Otherwise we keep going till we run out of extents. | 792 | we stop. Otherwise we keep going till we run out of extents. |
766 | We store the buffer_head, bloc, and extoffset of the current closest | 793 | We store the buffer_head, bloc, and extoffset of the current closest |
767 | match and use that when we are done. | 794 | match and use that when we are done. |
768 | */ | 795 | */ |
769 | epos.offset = sizeof(struct unallocSpaceEntry); | 796 | epos.offset = sizeof(struct unallocSpaceEntry); |
770 | epos.block = UDF_I_LOCATION(table); | 797 | epos.block = UDF_I_LOCATION(table); |
771 | epos.bh = goal_epos.bh = NULL; | 798 | epos.bh = goal_epos.bh = NULL; |
772 | 799 | ||
773 | while (spread && (etype = | 800 | while (spread && (etype = |
774 | udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) | 801 | udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { |
775 | { | 802 | if (goal >= eloc.logicalBlockNum) { |
776 | if (goal >= eloc.logicalBlockNum) | 803 | if (goal < |
777 | { | 804 | eloc.logicalBlockNum + |
778 | if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) | 805 | (elen >> sb->s_blocksize_bits)) |
779 | nspread = 0; | 806 | nspread = 0; |
780 | else | 807 | else |
781 | nspread = goal - eloc.logicalBlockNum - | 808 | nspread = goal - eloc.logicalBlockNum - |
782 | (elen >> sb->s_blocksize_bits); | 809 | (elen >> sb->s_blocksize_bits); |
783 | } | 810 | } else |
784 | else | ||
785 | nspread = eloc.logicalBlockNum - goal; | 811 | nspread = eloc.logicalBlockNum - goal; |
786 | 812 | ||
787 | if (nspread < spread) | 813 | if (nspread < spread) { |
788 | { | ||
789 | spread = nspread; | 814 | spread = nspread; |
790 | if (goal_epos.bh != epos.bh) | 815 | if (goal_epos.bh != epos.bh) { |
791 | { | ||
792 | brelse(goal_epos.bh); | 816 | brelse(goal_epos.bh); |
793 | goal_epos.bh = epos.bh; | 817 | goal_epos.bh = epos.bh; |
794 | get_bh(goal_epos.bh); | 818 | get_bh(goal_epos.bh); |
@@ -802,8 +826,7 @@ static int udf_table_new_block(struct super_block * sb, | |||
802 | 826 | ||
803 | brelse(epos.bh); | 827 | brelse(epos.bh); |
804 | 828 | ||
805 | if (spread == 0xFFFFFFFF) | 829 | if (spread == 0xFFFFFFFF) { |
806 | { | ||
807 | brelse(goal_epos.bh); | 830 | brelse(goal_epos.bh); |
808 | mutex_unlock(&sbi->s_alloc_mutex); | 831 | mutex_unlock(&sbi->s_alloc_mutex); |
809 | return 0; | 832 | return 0; |
@@ -815,11 +838,10 @@ static int udf_table_new_block(struct super_block * sb, | |||
815 | /* This works, but very poorly.... */ | 838 | /* This works, but very poorly.... */ |
816 | 839 | ||
817 | newblock = goal_eloc.logicalBlockNum; | 840 | newblock = goal_eloc.logicalBlockNum; |
818 | goal_eloc.logicalBlockNum ++; | 841 | goal_eloc.logicalBlockNum++; |
819 | goal_elen -= sb->s_blocksize; | 842 | goal_elen -= sb->s_blocksize; |
820 | 843 | ||
821 | if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) | 844 | if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { |
822 | { | ||
823 | brelse(goal_epos.bh); | 845 | brelse(goal_epos.bh); |
824 | mutex_unlock(&sbi->s_alloc_mutex); | 846 | mutex_unlock(&sbi->s_alloc_mutex); |
825 | *err = -EDQUOT; | 847 | *err = -EDQUOT; |
@@ -832,10 +854,11 @@ static int udf_table_new_block(struct super_block * sb, | |||
832 | udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); | 854 | udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); |
833 | brelse(goal_epos.bh); | 855 | brelse(goal_epos.bh); |
834 | 856 | ||
835 | if (UDF_SB_LVIDBH(sb)) | 857 | if (UDF_SB_LVIDBH(sb)) { |
836 | { | ||
837 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = | 858 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = |
838 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1); | 859 | cpu_to_le32(le32_to_cpu |
860 | (UDF_SB_LVID(sb)->freeSpaceTable[partition]) - | ||
861 | 1); | ||
839 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 862 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); |
840 | } | 863 | } |
841 | 864 | ||
@@ -845,105 +868,99 @@ static int udf_table_new_block(struct super_block * sb, | |||
845 | return newblock; | 868 | return newblock; |
846 | } | 869 | } |
847 | 870 | ||
848 | inline void udf_free_blocks(struct super_block * sb, | 871 | inline void udf_free_blocks(struct super_block *sb, |
849 | struct inode * inode, | 872 | struct inode *inode, |
850 | kernel_lb_addr bloc, uint32_t offset, uint32_t count) | 873 | kernel_lb_addr bloc, uint32_t offset, |
874 | uint32_t count) | ||
851 | { | 875 | { |
852 | uint16_t partition = bloc.partitionReferenceNum; | 876 | uint16_t partition = bloc.partitionReferenceNum; |
853 | 877 | ||
854 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) | 878 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { |
855 | { | ||
856 | return udf_bitmap_free_blocks(sb, inode, | 879 | return udf_bitmap_free_blocks(sb, inode, |
857 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, | 880 | UDF_SB_PARTMAPS(sb)[partition]. |
858 | bloc, offset, count); | 881 | s_uspace.s_bitmap, bloc, offset, |
859 | } | 882 | count); |
860 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) | 883 | } else if (UDF_SB_PARTFLAGS(sb, partition) & |
861 | { | 884 | UDF_PART_FLAG_UNALLOC_TABLE) { |
862 | return udf_table_free_blocks(sb, inode, | 885 | return udf_table_free_blocks(sb, inode, |
863 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, | 886 | UDF_SB_PARTMAPS(sb)[partition]. |
864 | bloc, offset, count); | 887 | s_uspace.s_table, bloc, offset, |
865 | } | 888 | count); |
866 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) | 889 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { |
867 | { | ||
868 | return udf_bitmap_free_blocks(sb, inode, | 890 | return udf_bitmap_free_blocks(sb, inode, |
869 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, | 891 | UDF_SB_PARTMAPS(sb)[partition]. |
870 | bloc, offset, count); | 892 | s_fspace.s_bitmap, bloc, offset, |
871 | } | 893 | count); |
872 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) | 894 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { |
873 | { | ||
874 | return udf_table_free_blocks(sb, inode, | 895 | return udf_table_free_blocks(sb, inode, |
875 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, | 896 | UDF_SB_PARTMAPS(sb)[partition]. |
876 | bloc, offset, count); | 897 | s_fspace.s_table, bloc, offset, |
877 | } | 898 | count); |
878 | else | 899 | } else |
879 | return; | 900 | return; |
880 | } | 901 | } |
881 | 902 | ||
882 | inline int udf_prealloc_blocks(struct super_block * sb, | 903 | inline int udf_prealloc_blocks(struct super_block *sb, |
883 | struct inode * inode, | 904 | struct inode *inode, |
884 | uint16_t partition, uint32_t first_block, uint32_t block_count) | 905 | uint16_t partition, uint32_t first_block, |
906 | uint32_t block_count) | ||
885 | { | 907 | { |
886 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) | 908 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { |
887 | { | ||
888 | return udf_bitmap_prealloc_blocks(sb, inode, | 909 | return udf_bitmap_prealloc_blocks(sb, inode, |
889 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, | 910 | UDF_SB_PARTMAPS(sb) |
890 | partition, first_block, block_count); | 911 | [partition].s_uspace.s_bitmap, |
891 | } | 912 | partition, first_block, |
892 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) | 913 | block_count); |
893 | { | 914 | } else if (UDF_SB_PARTFLAGS(sb, partition) & |
915 | UDF_PART_FLAG_UNALLOC_TABLE) { | ||
894 | return udf_table_prealloc_blocks(sb, inode, | 916 | return udf_table_prealloc_blocks(sb, inode, |
895 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, | 917 | UDF_SB_PARTMAPS(sb)[partition]. |
896 | partition, first_block, block_count); | 918 | s_uspace.s_table, partition, |
897 | } | 919 | first_block, block_count); |
898 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) | 920 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { |
899 | { | ||
900 | return udf_bitmap_prealloc_blocks(sb, inode, | 921 | return udf_bitmap_prealloc_blocks(sb, inode, |
901 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, | 922 | UDF_SB_PARTMAPS(sb) |
902 | partition, first_block, block_count); | 923 | [partition].s_fspace.s_bitmap, |
903 | } | 924 | partition, first_block, |
904 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) | 925 | block_count); |
905 | { | 926 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { |
906 | return udf_table_prealloc_blocks(sb, inode, | 927 | return udf_table_prealloc_blocks(sb, inode, |
907 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, | 928 | UDF_SB_PARTMAPS(sb)[partition]. |
908 | partition, first_block, block_count); | 929 | s_fspace.s_table, partition, |
909 | } | 930 | first_block, block_count); |
910 | else | 931 | } else |
911 | return 0; | 932 | return 0; |
912 | } | 933 | } |
913 | 934 | ||
914 | inline int udf_new_block(struct super_block * sb, | 935 | inline int udf_new_block(struct super_block *sb, |
915 | struct inode * inode, | 936 | struct inode *inode, |
916 | uint16_t partition, uint32_t goal, int *err) | 937 | uint16_t partition, uint32_t goal, int *err) |
917 | { | 938 | { |
918 | int ret; | 939 | int ret; |
919 | 940 | ||
920 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) | 941 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { |
921 | { | ||
922 | ret = udf_bitmap_new_block(sb, inode, | 942 | ret = udf_bitmap_new_block(sb, inode, |
923 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, | 943 | UDF_SB_PARTMAPS(sb)[partition]. |
924 | partition, goal, err); | 944 | s_uspace.s_bitmap, partition, goal, |
945 | err); | ||
925 | return ret; | 946 | return ret; |
926 | } | 947 | } else if (UDF_SB_PARTFLAGS(sb, partition) & |
927 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) | 948 | UDF_PART_FLAG_UNALLOC_TABLE) { |
928 | { | ||
929 | return udf_table_new_block(sb, inode, | 949 | return udf_table_new_block(sb, inode, |
930 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, | 950 | UDF_SB_PARTMAPS(sb)[partition]. |
931 | partition, goal, err); | 951 | s_uspace.s_table, partition, goal, |
932 | } | 952 | err); |
933 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) | 953 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { |
934 | { | ||
935 | return udf_bitmap_new_block(sb, inode, | 954 | return udf_bitmap_new_block(sb, inode, |
936 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, | 955 | UDF_SB_PARTMAPS(sb)[partition]. |
937 | partition, goal, err); | 956 | s_fspace.s_bitmap, partition, goal, |
938 | } | 957 | err); |
939 | else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) | 958 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { |
940 | { | ||
941 | return udf_table_new_block(sb, inode, | 959 | return udf_table_new_block(sb, inode, |
942 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, | 960 | UDF_SB_PARTMAPS(sb)[partition]. |
943 | partition, goal, err); | 961 | s_fspace.s_table, partition, goal, |
944 | } | 962 | err); |
945 | else | 963 | } else { |
946 | { | ||
947 | *err = -EIO; | 964 | *err = -EIO; |
948 | return 0; | 965 | return 0; |
949 | } | 966 | } |