aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs
diff options
context:
space:
mode:
authorPeng Tao <bergwolf@gmail.com>2012-01-12 10:18:41 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-01-12 16:38:49 -0500
commit39e567ae36fe03c2b446e1b83ee3d39bea08f90b (patch)
tree0f7c3c1ec9dd0fb7a8b36e3ac4818eb870de0580 /fs/nfs
parentde040beccd52bb5fcac90031505384d037b1111c (diff)
pnfsblock: acquire im_lock in _preload_range
When calling _add_entry, we should take the im_lock to protect agains other modifiers. Cc: <stable@vger.kernel.org> #3.1+ Signed-off-by: Peng Tao <peng_tao@emc.com> Signed-off-by: Benny Halevy <bhalevy@tonian.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/blocklayout/extents.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
index 19fa7b0b8c00..c69682a4262a 100644
--- a/fs/nfs/blocklayout/extents.c
+++ b/fs/nfs/blocklayout/extents.c
@@ -139,11 +139,13 @@ static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
139} 139}
140 140
141/* Ensure that future operations on given range of tree will not malloc */ 141/* Ensure that future operations on given range of tree will not malloc */
142static int _preload_range(struct my_tree *tree, u64 offset, u64 length) 142static int _preload_range(struct pnfs_inval_markings *marks,
143 u64 offset, u64 length)
143{ 144{
144 u64 start, end, s; 145 u64 start, end, s;
145 int count, i, used = 0, status = -ENOMEM; 146 int count, i, used = 0, status = -ENOMEM;
146 struct pnfs_inval_tracking **storage; 147 struct pnfs_inval_tracking **storage;
148 struct my_tree *tree = &marks->im_tree;
147 149
148 dprintk("%s(%llu, %llu) enter\n", __func__, offset, length); 150 dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
149 start = normalize(offset, tree->mtt_step_size); 151 start = normalize(offset, tree->mtt_step_size);
@@ -161,12 +163,11 @@ static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
161 goto out_cleanup; 163 goto out_cleanup;
162 } 164 }
163 165
164 /* Now need lock - HOW??? */ 166 spin_lock(&marks->im_lock);
165
166 for (s = start; s < end; s += tree->mtt_step_size) 167 for (s = start; s < end; s += tree->mtt_step_size)
167 used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]); 168 used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
169 spin_unlock(&marks->im_lock);
168 170
169 /* Unlock - HOW??? */
170 status = 0; 171 status = 0;
171 172
172 out_cleanup: 173 out_cleanup:
@@ -286,7 +287,7 @@ int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
286 287
287 start = normalize(offset, marks->im_block_size); 288 start = normalize(offset, marks->im_block_size);
288 end = normalize_up(offset + length, marks->im_block_size); 289 end = normalize_up(offset + length, marks->im_block_size);
289 if (_preload_range(&marks->im_tree, start, end - start)) 290 if (_preload_range(marks, start, end - start))
290 goto outerr; 291 goto outerr;
291 292
292 spin_lock(&marks->im_lock); 293 spin_lock(&marks->im_lock);