aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorJohn L. Hammond <john.hammond@intel.com>2016-03-30 19:48:53 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-03-31 00:38:13 -0400
commitfee6eb5052fc394c6f40316052033cd69ee4ebc5 (patch)
tree4f6cb3bd9ed664f31f26ba5508355e58defd4af2 /drivers/staging
parente0a8144b8c32031d37ff849fc07e6c5646e61198 (diff)
staging/lustre/llite: move vvp_io functions to vvp_io.c
Move all vvp_io related functions from lustre/llite/lcommon_cl.c to the sole file where they are used lustre/llite/vvp_io.c. Signed-off-by: John L. Hammond <john.hammond@intel.com> Reviewed-on: http://review.whamcloud.com/13376 Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5971 Reviewed-by: Jinshan Xiong <jinshan.xiong@intel.com> Reviewed-by: Bobi Jam <bobijam@hotmail.com> Signed-off-by: Oleg Drokin <green@linuxhacker.ru> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c197
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h22
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c198
4 files changed, 196 insertions, 222 deletions
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 630c37142d31..1b1110349312 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -184,192 +184,6 @@ void ccc_global_fini(struct lu_device_type *device_type)
184 lu_kmem_fini(ccc_caches); 184 lu_kmem_fini(ccc_caches);
185} 185}
186 186
187static void vvp_object_size_lock(struct cl_object *obj)
188{
189 struct inode *inode = vvp_object_inode(obj);
190
191 ll_inode_size_lock(inode);
192 cl_object_attr_lock(obj);
193}
194
195static void vvp_object_size_unlock(struct cl_object *obj)
196{
197 struct inode *inode = vvp_object_inode(obj);
198
199 cl_object_attr_unlock(obj);
200 ll_inode_size_unlock(inode);
201}
202
203/*****************************************************************************
204 *
205 * io operations.
206 *
207 */
208
209int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
210 __u32 enqflags, enum cl_lock_mode mode,
211 pgoff_t start, pgoff_t end)
212{
213 struct vvp_io *vio = vvp_env_io(env);
214 struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
215 struct cl_object *obj = io->ci_obj;
216
217 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
218
219 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
220
221 memset(&vio->vui_link, 0, sizeof(vio->vui_link));
222
223 if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
224 descr->cld_mode = CLM_GROUP;
225 descr->cld_gid = vio->vui_fd->fd_grouplock.cg_gid;
226 } else {
227 descr->cld_mode = mode;
228 }
229 descr->cld_obj = obj;
230 descr->cld_start = start;
231 descr->cld_end = end;
232 descr->cld_enq_flags = enqflags;
233
234 cl_io_lock_add(env, io, &vio->vui_link);
235 return 0;
236}
237
238void vvp_io_update_iov(const struct lu_env *env,
239 struct vvp_io *vio, struct cl_io *io)
240{
241 size_t size = io->u.ci_rw.crw_count;
242
243 if (!cl_is_normalio(env, io) || !vio->vui_iter)
244 return;
245
246 iov_iter_truncate(vio->vui_iter, size);
247}
248
249int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
250 __u32 enqflags, enum cl_lock_mode mode,
251 loff_t start, loff_t end)
252{
253 struct cl_object *obj = io->ci_obj;
254
255 return vvp_io_one_lock_index(env, io, enqflags, mode,
256 cl_index(obj, start), cl_index(obj, end));
257}
258
259void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
260{
261 CLOBINVRNT(env, ios->cis_io->ci_obj,
262 vvp_object_invariant(ios->cis_io->ci_obj));
263}
264
265void vvp_io_advance(const struct lu_env *env,
266 const struct cl_io_slice *ios,
267 size_t nob)
268{
269 struct vvp_io *vio = cl2vvp_io(env, ios);
270 struct cl_io *io = ios->cis_io;
271 struct cl_object *obj = ios->cis_io->ci_obj;
272
273 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
274
275 if (!cl_is_normalio(env, io))
276 return;
277
278 iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
279}
280
281/**
282 * Helper function that if necessary adjusts file size (inode->i_size), when
283 * position at the offset \a pos is accessed. File size can be arbitrary stale
284 * on a Lustre client, but client at least knows KMS. If accessed area is
285 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
286 *
287 * Locking: cl_isize_lock is used to serialize changes to inode size and to
288 * protect consistency between inode size and cl_object
289 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
290 * top-object and sub-objects.
291 */
292int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
293 struct cl_io *io, loff_t start, size_t count, int *exceed)
294{
295 struct cl_attr *attr = ccc_env_thread_attr(env);
296 struct inode *inode = vvp_object_inode(obj);
297 loff_t pos = start + count - 1;
298 loff_t kms;
299 int result;
300
301 /*
302 * Consistency guarantees: following possibilities exist for the
303 * relation between region being accessed and real file size at this
304 * moment:
305 *
306 * (A): the region is completely inside of the file;
307 *
308 * (B-x): x bytes of region are inside of the file, the rest is
309 * outside;
310 *
311 * (C): the region is completely outside of the file.
312 *
313 * This classification is stable under DLM lock already acquired by
314 * the caller, because to change the class, other client has to take
315 * DLM lock conflicting with our lock. Also, any updates to ->i_size
316 * by other threads on this client are serialized by
317 * ll_inode_size_lock(). This guarantees that short reads are handled
318 * correctly in the face of concurrent writes and truncates.
319 */
320 vvp_object_size_lock(obj);
321 result = cl_object_attr_get(env, obj, attr);
322 if (result == 0) {
323 kms = attr->cat_kms;
324 if (pos > kms) {
325 /*
326 * A glimpse is necessary to determine whether we
327 * return a short read (B) or some zeroes at the end
328 * of the buffer (C)
329 */
330 vvp_object_size_unlock(obj);
331 result = cl_glimpse_lock(env, io, inode, obj, 0);
332 if (result == 0 && exceed) {
333 /* If objective page index exceed end-of-file
334 * page index, return directly. Do not expect
335 * kernel will check such case correctly.
336 * linux-2.6.18-128.1.1 miss to do that.
337 * --bug 17336
338 */
339 loff_t size = i_size_read(inode);
340 loff_t cur_index = start >> PAGE_CACHE_SHIFT;
341 loff_t size_index = (size - 1) >>
342 PAGE_CACHE_SHIFT;
343
344 if ((size == 0 && cur_index != 0) ||
345 size_index < cur_index)
346 *exceed = 1;
347 }
348 return result;
349 }
350 /*
351 * region is within kms and, hence, within real file
352 * size (A). We need to increase i_size to cover the
353 * read region so that generic_file_read() will do its
354 * job, but that doesn't mean the kms size is
355 * _correct_, it is only the _minimum_ size. If
356 * someone does a stat they will get the correct size
357 * which will always be >= the kms value here.
358 * b=11081
359 */
360 if (i_size_read(inode) < kms) {
361 i_size_write(inode, kms);
362 CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
363 PFID(lu_object_fid(&obj->co_lu)),
364 (__u64)i_size_read(inode));
365 }
366 }
367
368 vvp_object_size_unlock(obj);
369
370 return result;
371}
372
373/***************************************************************************** 187/*****************************************************************************
374 * 188 *
375 * Transfer operations. 189 * Transfer operations.
@@ -493,17 +307,6 @@ again:
493 * 307 *
494 */ 308 */
495 309
496struct vvp_io *cl2vvp_io(const struct lu_env *env,
497 const struct cl_io_slice *slice)
498{
499 struct vvp_io *vio;
500
501 vio = container_of(slice, struct vvp_io, vui_cl);
502 LASSERT(vio == vvp_env_io(env));
503
504 return vio;
505}
506
507struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice) 310struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
508{ 311{
509 return container_of0(slice, struct ccc_req, crq_cl); 312 return container_of0(slice, struct ccc_req, crq_cl);
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 9856bb6daf15..86e93c04b12c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -693,7 +693,6 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
693int ll_readahead(const struct lu_env *env, struct cl_io *io, 693int ll_readahead(const struct lu_env *env, struct cl_io *io,
694 struct cl_page_list *queue, struct ll_readahead_state *ras, 694 struct cl_page_list *queue, struct ll_readahead_state *ras,
695 bool hit); 695 bool hit);
696int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
697struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage); 696struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage);
698void ll_cl_fini(struct ll_cl_context *lcc); 697void ll_cl_fini(struct ll_cl_context *lcc);
699 698
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index e04f23ea403e..7af8c445be77 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -164,12 +164,6 @@ struct vvp_io {
164 bool vui_ra_valid; 164 bool vui_ra_valid;
165}; 165};
166 166
167/**
168 * True, if \a io is a normal io, False for other splice_{read,write}.
169 * must be implemented in arch specific code.
170 */
171int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
172
173extern struct lu_context_key ccc_key; 167extern struct lu_context_key ccc_key;
174extern struct lu_context_key vvp_session_key; 168extern struct lu_context_key vvp_session_key;
175 169
@@ -334,19 +328,6 @@ void ccc_umount(const struct lu_env *env, struct cl_device *dev);
334int ccc_global_init(struct lu_device_type *device_type); 328int ccc_global_init(struct lu_device_type *device_type);
335void ccc_global_fini(struct lu_device_type *device_type); 329void ccc_global_fini(struct lu_device_type *device_type);
336 330
337int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
338 __u32 enqflags, enum cl_lock_mode mode,
339 pgoff_t start, pgoff_t end);
340int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
341 __u32 enqflags, enum cl_lock_mode mode,
342 loff_t start, loff_t end);
343void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
344void vvp_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
345 size_t nob);
346void vvp_io_update_iov(const struct lu_env *env, struct vvp_io *cio,
347 struct cl_io *io);
348int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
349 struct cl_io *io, loff_t start, size_t count, int *exceed);
350void ccc_req_completion(const struct lu_env *env, 331void ccc_req_completion(const struct lu_env *env,
351 const struct cl_req_slice *slice, int ioret); 332 const struct cl_req_slice *slice, int ioret);
352void ccc_req_attr_set(const struct lu_env *env, 333void ccc_req_attr_set(const struct lu_env *env,
@@ -397,8 +378,6 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
397 return container_of(slice, struct vvp_lock, vlk_cl); 378 return container_of(slice, struct vvp_lock, vlk_cl);
398} 379}
399 380
400struct vvp_io *cl2vvp_io(const struct lu_env *env,
401 const struct cl_io_slice *slice);
402struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice); 381struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice);
403 382
404int cl_setattr_ost(struct inode *inode, const struct iattr *attr); 383int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
@@ -447,6 +426,7 @@ void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
447 426
448int vvp_io_init(const struct lu_env *env, struct cl_object *obj, 427int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
449 struct cl_io *io); 428 struct cl_io *io);
429int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
450int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, 430int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
451 struct cl_lock *lock, const struct cl_io *io); 431 struct cl_lock *lock, const struct cl_io *io);
452int vvp_page_init(const struct lu_env *env, struct cl_object *obj, 432int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 53cf2be0e152..48b0693edb1f 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -47,10 +47,21 @@
47#include "llite_internal.h" 47#include "llite_internal.h"
48#include "vvp_internal.h" 48#include "vvp_internal.h"
49 49
50struct vvp_io *cl2vvp_io(const struct lu_env *env,
51 const struct cl_io_slice *slice)
52{
53 struct vvp_io *vio;
54
55 vio = container_of(slice, struct vvp_io, vui_cl);
56 LASSERT(vio == vvp_env_io(env));
57
58 return vio;
59}
60
50/** 61/**
51 * True, if \a io is a normal io, False for splice_{read,write} 62 * True, if \a io is a normal io, False for splice_{read,write}
52 */ 63 */
53int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) 64static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
54{ 65{
55 struct vvp_io *vio = vvp_env_io(env); 66 struct vvp_io *vio = vvp_env_io(env);
56 67
@@ -93,12 +104,160 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
93 return rc; 104 return rc;
94} 105}
95 106
107static void vvp_object_size_lock(struct cl_object *obj)
108{
109 struct inode *inode = vvp_object_inode(obj);
110
111 ll_inode_size_lock(inode);
112 cl_object_attr_lock(obj);
113}
114
115static void vvp_object_size_unlock(struct cl_object *obj)
116{
117 struct inode *inode = vvp_object_inode(obj);
118
119 cl_object_attr_unlock(obj);
120 ll_inode_size_unlock(inode);
121}
122
123/**
124 * Helper function that if necessary adjusts file size (inode->i_size), when
125 * position at the offset \a pos is accessed. File size can be arbitrary stale
126 * on a Lustre client, but client at least knows KMS. If accessed area is
127 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
128 *
129 * Locking: cl_isize_lock is used to serialize changes to inode size and to
130 * protect consistency between inode size and cl_object
131 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
132 * top-object and sub-objects.
133 */
134static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
135 struct cl_io *io, loff_t start, size_t count,
136 int *exceed)
137{
138 struct cl_attr *attr = ccc_env_thread_attr(env);
139 struct inode *inode = vvp_object_inode(obj);
140 loff_t pos = start + count - 1;
141 loff_t kms;
142 int result;
143
144 /*
145 * Consistency guarantees: following possibilities exist for the
146 * relation between region being accessed and real file size at this
147 * moment:
148 *
149 * (A): the region is completely inside of the file;
150 *
151 * (B-x): x bytes of region are inside of the file, the rest is
152 * outside;
153 *
154 * (C): the region is completely outside of the file.
155 *
156 * This classification is stable under DLM lock already acquired by
157 * the caller, because to change the class, other client has to take
158 * DLM lock conflicting with our lock. Also, any updates to ->i_size
159 * by other threads on this client are serialized by
160 * ll_inode_size_lock(). This guarantees that short reads are handled
161 * correctly in the face of concurrent writes and truncates.
162 */
163 vvp_object_size_lock(obj);
164 result = cl_object_attr_get(env, obj, attr);
165 if (result == 0) {
166 kms = attr->cat_kms;
167 if (pos > kms) {
168 /*
169 * A glimpse is necessary to determine whether we
170 * return a short read (B) or some zeroes at the end
171 * of the buffer (C)
172 */
173 vvp_object_size_unlock(obj);
174 result = cl_glimpse_lock(env, io, inode, obj, 0);
175 if (result == 0 && exceed) {
176 /* If objective page index exceed end-of-file
177 * page index, return directly. Do not expect
178 * kernel will check such case correctly.
179 * linux-2.6.18-128.1.1 miss to do that.
180 * --bug 17336
181 */
182 loff_t size = i_size_read(inode);
183 loff_t cur_index = start >> PAGE_CACHE_SHIFT;
184 loff_t size_index = (size - 1) >>
185 PAGE_CACHE_SHIFT;
186
187 if ((size == 0 && cur_index != 0) ||
188 size_index < cur_index)
189 *exceed = 1;
190 }
191 return result;
192 }
193 /*
194 * region is within kms and, hence, within real file
195 * size (A). We need to increase i_size to cover the
196 * read region so that generic_file_read() will do its
197 * job, but that doesn't mean the kms size is
198 * _correct_, it is only the _minimum_ size. If
199 * someone does a stat they will get the correct size
200 * which will always be >= the kms value here.
201 * b=11081
202 */
203 if (i_size_read(inode) < kms) {
204 i_size_write(inode, kms);
205 CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
206 PFID(lu_object_fid(&obj->co_lu)),
207 (__u64)i_size_read(inode));
208 }
209 }
210
211 vvp_object_size_unlock(obj);
212
213 return result;
214}
215
96/***************************************************************************** 216/*****************************************************************************
97 * 217 *
98 * io operations. 218 * io operations.
99 * 219 *
100 */ 220 */
101 221
222static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
223 __u32 enqflags, enum cl_lock_mode mode,
224 pgoff_t start, pgoff_t end)
225{
226 struct vvp_io *vio = vvp_env_io(env);
227 struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
228 struct cl_object *obj = io->ci_obj;
229
230 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
231
232 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
233
234 memset(&vio->vui_link, 0, sizeof(vio->vui_link));
235
236 if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
237 descr->cld_mode = CLM_GROUP;
238 descr->cld_gid = vio->vui_fd->fd_grouplock.cg_gid;
239 } else {
240 descr->cld_mode = mode;
241 }
242 descr->cld_obj = obj;
243 descr->cld_start = start;
244 descr->cld_end = end;
245 descr->cld_enq_flags = enqflags;
246
247 cl_io_lock_add(env, io, &vio->vui_link);
248 return 0;
249}
250
251static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
252 __u32 enqflags, enum cl_lock_mode mode,
253 loff_t start, loff_t end)
254{
255 struct cl_object *obj = io->ci_obj;
256
257 return vvp_io_one_lock_index(env, io, enqflags, mode,
258 cl_index(obj, start), cl_index(obj, end));
259}
260
102static int vvp_io_write_iter_init(const struct lu_env *env, 261static int vvp_io_write_iter_init(const struct lu_env *env,
103 const struct cl_io_slice *ios) 262 const struct cl_io_slice *ios)
104{ 263{
@@ -303,6 +462,33 @@ static int vvp_mmap_locks(const struct lu_env *env,
303 return result; 462 return result;
304} 463}
305 464
465static void vvp_io_advance(const struct lu_env *env,
466 const struct cl_io_slice *ios,
467 size_t nob)
468{
469 struct vvp_io *vio = cl2vvp_io(env, ios);
470 struct cl_io *io = ios->cis_io;
471 struct cl_object *obj = ios->cis_io->ci_obj;
472
473 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
474
475 if (!cl_is_normalio(env, io))
476 return;
477
478 iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
479}
480
481static void vvp_io_update_iov(const struct lu_env *env,
482 struct vvp_io *vio, struct cl_io *io)
483{
484 size_t size = io->u.ci_rw.crw_count;
485
486 if (!cl_is_normalio(env, io) || !vio->vui_iter)
487 return;
488
489 iov_iter_truncate(vio->vui_iter, size);
490}
491
306static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, 492static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
307 enum cl_lock_mode mode, loff_t start, loff_t end) 493 enum cl_lock_mode mode, loff_t start, loff_t end)
308{ 494{
@@ -514,7 +700,7 @@ static int vvp_io_read_start(const struct lu_env *env,
514 if (!can_populate_pages(env, io, inode)) 700 if (!can_populate_pages(env, io, inode))
515 return 0; 701 return 0;
516 702
517 result = ccc_prep_size(env, obj, io, pos, tot, &exceed); 703 result = vvp_prep_size(env, obj, io, pos, tot, &exceed);
518 if (result != 0) 704 if (result != 0)
519 return result; 705 return result;
520 else if (exceed != 0) 706 else if (exceed != 0)
@@ -886,7 +1072,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
886 /* offset of the last byte on the page */ 1072 /* offset of the last byte on the page */
887 offset = cl_offset(obj, fio->ft_index + 1) - 1; 1073 offset = cl_offset(obj, fio->ft_index + 1) - 1;
888 LASSERT(cl_index(obj, offset) == fio->ft_index); 1074 LASSERT(cl_index(obj, offset) == fio->ft_index);
889 result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL); 1075 result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
890 if (result != 0) 1076 if (result != 0)
891 return result; 1077 return result;
892 1078
@@ -1075,6 +1261,12 @@ static int vvp_io_read_page(const struct lu_env *env,
1075 return 0; 1261 return 0;
1076} 1262}
1077 1263
1264void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
1265{
1266 CLOBINVRNT(env, ios->cis_io->ci_obj,
1267 vvp_object_invariant(ios->cis_io->ci_obj));
1268}
1269
1078static const struct cl_io_operations vvp_io_ops = { 1270static const struct cl_io_operations vvp_io_ops = {
1079 .op = { 1271 .op = {
1080 [CIT_READ] = { 1272 [CIT_READ] = {