diff options
| author | Patrick McHardy <kaber@trash.net> | 2010-04-20 10:02:01 -0400 |
|---|---|---|
| committer | Patrick McHardy <kaber@trash.net> | 2010-04-20 10:02:01 -0400 |
| commit | 62910554656cdcd6b6f84a5154c4155aae4ca231 (patch) | |
| tree | dcf14004f6fd2ef7154362ff948bfeba0f3ea92d /fs/ceph/file.c | |
| parent | 22265a5c3c103cf8c50be62e6c90d045eb649e6d (diff) | |
| parent | ab9304717f7624c41927f442e6b6d418b2d8b3e4 (diff) | |
Merge branch 'master' of /repos/git/net-next-2.6
Conflicts:
Documentation/feature-removal-schedule.txt
net/ipv6/netfilter/ip6t_REJECT.c
net/netfilter/xt_limit.c
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'fs/ceph/file.c')
| -rw-r--r-- | fs/ceph/file.c | 938 |
1 files changed, 938 insertions, 0 deletions
diff --git a/fs/ceph/file.c b/fs/ceph/file.c new file mode 100644 index 000000000000..4add3d5da2c1 --- /dev/null +++ b/fs/ceph/file.c | |||
| @@ -0,0 +1,938 @@ | |||
| 1 | #include "ceph_debug.h" | ||
| 2 | |||
| 3 | #include <linux/sched.h> | ||
| 4 | #include <linux/slab.h> | ||
| 5 | #include <linux/file.h> | ||
| 6 | #include <linux/namei.h> | ||
| 7 | #include <linux/writeback.h> | ||
| 8 | |||
| 9 | #include "super.h" | ||
| 10 | #include "mds_client.h" | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Ceph file operations | ||
| 14 | * | ||
| 15 | * Implement basic open/close functionality, and implement | ||
| 16 | * read/write. | ||
| 17 | * | ||
| 18 | * We implement three modes of file I/O: | ||
| 19 | * - buffered uses the generic_file_aio_{read,write} helpers | ||
| 20 | * | ||
| 21 | * - synchronous is used when there is multi-client read/write | ||
| 22 | * sharing, avoids the page cache, and synchronously waits for an | ||
| 23 | * ack from the OSD. | ||
| 24 | * | ||
| 25 | * - direct io takes the variant of the sync path that references | ||
| 26 | * user pages directly. | ||
| 27 | * | ||
| 28 | * fsync() flushes and waits on dirty pages, but just queues metadata | ||
| 29 | * for writeback: since the MDS can recover size and mtime there is no | ||
| 30 | * need to wait for MDS acknowledgement. | ||
| 31 | */ | ||
| 32 | |||
| 33 | |||
| 34 | /* | ||
| 35 | * Prepare an open request. Preallocate ceph_cap to avoid an | ||
| 36 | * inopportune ENOMEM later. | ||
| 37 | */ | ||
| 38 | static struct ceph_mds_request * | ||
| 39 | prepare_open_request(struct super_block *sb, int flags, int create_mode) | ||
| 40 | { | ||
| 41 | struct ceph_client *client = ceph_sb_to_client(sb); | ||
| 42 | struct ceph_mds_client *mdsc = &client->mdsc; | ||
| 43 | struct ceph_mds_request *req; | ||
| 44 | int want_auth = USE_ANY_MDS; | ||
| 45 | int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; | ||
| 46 | |||
| 47 | if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) | ||
| 48 | want_auth = USE_AUTH_MDS; | ||
| 49 | |||
| 50 | req = ceph_mdsc_create_request(mdsc, op, want_auth); | ||
| 51 | if (IS_ERR(req)) | ||
| 52 | goto out; | ||
| 53 | req->r_fmode = ceph_flags_to_mode(flags); | ||
| 54 | req->r_args.open.flags = cpu_to_le32(flags); | ||
| 55 | req->r_args.open.mode = cpu_to_le32(create_mode); | ||
| 56 | req->r_args.open.preferred = cpu_to_le32(-1); | ||
| 57 | out: | ||
| 58 | return req; | ||
| 59 | } | ||
| 60 | |||
| 61 | /* | ||
| 62 | * initialize private struct file data. | ||
| 63 | * if we fail, clean up by dropping fmode reference on the ceph_inode | ||
| 64 | */ | ||
| 65 | static int ceph_init_file(struct inode *inode, struct file *file, int fmode) | ||
| 66 | { | ||
| 67 | struct ceph_file_info *cf; | ||
| 68 | int ret = 0; | ||
| 69 | |||
| 70 | switch (inode->i_mode & S_IFMT) { | ||
| 71 | case S_IFREG: | ||
| 72 | case S_IFDIR: | ||
| 73 | dout("init_file %p %p 0%o (regular)\n", inode, file, | ||
| 74 | inode->i_mode); | ||
| 75 | cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); | ||
| 76 | if (cf == NULL) { | ||
| 77 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | ||
| 78 | return -ENOMEM; | ||
| 79 | } | ||
| 80 | cf->fmode = fmode; | ||
| 81 | cf->next_offset = 2; | ||
| 82 | file->private_data = cf; | ||
| 83 | BUG_ON(inode->i_fop->release != ceph_release); | ||
| 84 | break; | ||
| 85 | |||
| 86 | case S_IFLNK: | ||
| 87 | dout("init_file %p %p 0%o (symlink)\n", inode, file, | ||
| 88 | inode->i_mode); | ||
| 89 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | ||
| 90 | break; | ||
| 91 | |||
| 92 | default: | ||
| 93 | dout("init_file %p %p 0%o (special)\n", inode, file, | ||
| 94 | inode->i_mode); | ||
| 95 | /* | ||
| 96 | * we need to drop the open ref now, since we don't | ||
| 97 | * have .release set to ceph_release. | ||
| 98 | */ | ||
| 99 | ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ | ||
| 100 | BUG_ON(inode->i_fop->release == ceph_release); | ||
| 101 | |||
| 102 | /* call the proper open fop */ | ||
| 103 | ret = inode->i_fop->open(inode, file); | ||
| 104 | } | ||
| 105 | return ret; | ||
| 106 | } | ||
| 107 | |||
| 108 | /* | ||
| 109 | * If the filp already has private_data, that means the file was | ||
| 110 | * already opened by intent during lookup, and we do nothing. | ||
| 111 | * | ||
| 112 | * If we already have the requisite capabilities, we can satisfy | ||
| 113 | * the open request locally (no need to request new caps from the | ||
| 114 | * MDS). We do, however, need to inform the MDS (asynchronously) | ||
| 115 | * if our wanted caps set expands. | ||
| 116 | */ | ||
| 117 | int ceph_open(struct inode *inode, struct file *file) | ||
| 118 | { | ||
| 119 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 120 | struct ceph_client *client = ceph_sb_to_client(inode->i_sb); | ||
| 121 | struct ceph_mds_client *mdsc = &client->mdsc; | ||
| 122 | struct ceph_mds_request *req; | ||
| 123 | struct ceph_file_info *cf = file->private_data; | ||
| 124 | struct inode *parent_inode = file->f_dentry->d_parent->d_inode; | ||
| 125 | int err; | ||
| 126 | int flags, fmode, wanted; | ||
| 127 | |||
| 128 | if (cf) { | ||
| 129 | dout("open file %p is already opened\n", file); | ||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ | ||
| 134 | flags = file->f_flags & ~(O_CREAT|O_EXCL); | ||
| 135 | if (S_ISDIR(inode->i_mode)) | ||
| 136 | flags = O_DIRECTORY; /* mds likes to know */ | ||
| 137 | |||
| 138 | dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, | ||
| 139 | ceph_vinop(inode), file, flags, file->f_flags); | ||
| 140 | fmode = ceph_flags_to_mode(flags); | ||
| 141 | wanted = ceph_caps_for_mode(fmode); | ||
| 142 | |||
| 143 | /* snapped files are read-only */ | ||
| 144 | if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) | ||
| 145 | return -EROFS; | ||
| 146 | |||
| 147 | /* trivially open snapdir */ | ||
| 148 | if (ceph_snap(inode) == CEPH_SNAPDIR) { | ||
| 149 | spin_lock(&inode->i_lock); | ||
| 150 | __ceph_get_fmode(ci, fmode); | ||
| 151 | spin_unlock(&inode->i_lock); | ||
| 152 | return ceph_init_file(inode, file, fmode); | ||
| 153 | } | ||
| 154 | |||
| 155 | /* | ||
| 156 | * No need to block if we have any caps. Update wanted set | ||
| 157 | * asynchronously. | ||
| 158 | */ | ||
| 159 | spin_lock(&inode->i_lock); | ||
| 160 | if (__ceph_is_any_real_caps(ci)) { | ||
| 161 | int mds_wanted = __ceph_caps_mds_wanted(ci); | ||
| 162 | int issued = __ceph_caps_issued(ci, NULL); | ||
| 163 | |||
| 164 | dout("open %p fmode %d want %s issued %s using existing\n", | ||
| 165 | inode, fmode, ceph_cap_string(wanted), | ||
| 166 | ceph_cap_string(issued)); | ||
| 167 | __ceph_get_fmode(ci, fmode); | ||
| 168 | spin_unlock(&inode->i_lock); | ||
| 169 | |||
| 170 | /* adjust wanted? */ | ||
| 171 | if ((issued & wanted) != wanted && | ||
| 172 | (mds_wanted & wanted) != wanted && | ||
| 173 | ceph_snap(inode) != CEPH_SNAPDIR) | ||
| 174 | ceph_check_caps(ci, 0, NULL); | ||
| 175 | |||
| 176 | return ceph_init_file(inode, file, fmode); | ||
| 177 | } else if (ceph_snap(inode) != CEPH_NOSNAP && | ||
| 178 | (ci->i_snap_caps & wanted) == wanted) { | ||
| 179 | __ceph_get_fmode(ci, fmode); | ||
| 180 | spin_unlock(&inode->i_lock); | ||
| 181 | return ceph_init_file(inode, file, fmode); | ||
| 182 | } | ||
| 183 | spin_unlock(&inode->i_lock); | ||
| 184 | |||
| 185 | dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); | ||
| 186 | req = prepare_open_request(inode->i_sb, flags, 0); | ||
| 187 | if (IS_ERR(req)) { | ||
| 188 | err = PTR_ERR(req); | ||
| 189 | goto out; | ||
| 190 | } | ||
| 191 | req->r_inode = igrab(inode); | ||
| 192 | req->r_num_caps = 1; | ||
| 193 | err = ceph_mdsc_do_request(mdsc, parent_inode, req); | ||
| 194 | if (!err) | ||
| 195 | err = ceph_init_file(inode, file, req->r_fmode); | ||
| 196 | ceph_mdsc_put_request(req); | ||
| 197 | dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); | ||
| 198 | out: | ||
| 199 | return err; | ||
| 200 | } | ||
| 201 | |||
| 202 | |||
| 203 | /* | ||
| 204 | * Do a lookup + open with a single request. | ||
| 205 | * | ||
| 206 | * If this succeeds, but some subsequent check in the vfs | ||
| 207 | * may_open() fails, the struct *file gets cleaned up (i.e. | ||
| 208 | * ceph_release gets called). So fear not! | ||
| 209 | */ | ||
| 210 | /* | ||
| 211 | * flags | ||
| 212 | * path_lookup_open -> LOOKUP_OPEN | ||
| 213 | * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE | ||
| 214 | */ | ||
| 215 | struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, | ||
| 216 | struct nameidata *nd, int mode, | ||
| 217 | int locked_dir) | ||
| 218 | { | ||
| 219 | struct ceph_client *client = ceph_sb_to_client(dir->i_sb); | ||
| 220 | struct ceph_mds_client *mdsc = &client->mdsc; | ||
| 221 | struct file *file = nd->intent.open.file; | ||
| 222 | struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); | ||
| 223 | struct ceph_mds_request *req; | ||
| 224 | int err; | ||
| 225 | int flags = nd->intent.open.flags - 1; /* silly vfs! */ | ||
| 226 | |||
| 227 | dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", | ||
| 228 | dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); | ||
| 229 | |||
| 230 | /* do the open */ | ||
| 231 | req = prepare_open_request(dir->i_sb, flags, mode); | ||
| 232 | if (IS_ERR(req)) | ||
| 233 | return ERR_PTR(PTR_ERR(req)); | ||
| 234 | req->r_dentry = dget(dentry); | ||
| 235 | req->r_num_caps = 2; | ||
| 236 | if (flags & O_CREAT) { | ||
| 237 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | ||
| 238 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | ||
| 239 | } | ||
| 240 | req->r_locked_dir = dir; /* caller holds dir->i_mutex */ | ||
| 241 | err = ceph_mdsc_do_request(mdsc, parent_inode, req); | ||
| 242 | dentry = ceph_finish_lookup(req, dentry, err); | ||
| 243 | if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) | ||
| 244 | err = ceph_handle_notrace_create(dir, dentry); | ||
| 245 | if (!err) | ||
| 246 | err = ceph_init_file(req->r_dentry->d_inode, file, | ||
| 247 | req->r_fmode); | ||
| 248 | ceph_mdsc_put_request(req); | ||
| 249 | dout("ceph_lookup_open result=%p\n", dentry); | ||
| 250 | return dentry; | ||
| 251 | } | ||
| 252 | |||
| 253 | int ceph_release(struct inode *inode, struct file *file) | ||
| 254 | { | ||
| 255 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 256 | struct ceph_file_info *cf = file->private_data; | ||
| 257 | |||
| 258 | dout("release inode %p file %p\n", inode, file); | ||
| 259 | ceph_put_fmode(ci, cf->fmode); | ||
| 260 | if (cf->last_readdir) | ||
| 261 | ceph_mdsc_put_request(cf->last_readdir); | ||
| 262 | kfree(cf->last_name); | ||
| 263 | kfree(cf->dir_info); | ||
| 264 | dput(cf->dentry); | ||
| 265 | kmem_cache_free(ceph_file_cachep, cf); | ||
| 266 | |||
| 267 | /* wake up anyone waiting for caps on this inode */ | ||
| 268 | wake_up(&ci->i_cap_wq); | ||
| 269 | return 0; | ||
| 270 | } | ||
| 271 | |||
| 272 | /* | ||
| 273 | * build a vector of user pages | ||
| 274 | */ | ||
| 275 | static struct page **get_direct_page_vector(const char __user *data, | ||
| 276 | int num_pages, | ||
| 277 | loff_t off, size_t len) | ||
| 278 | { | ||
| 279 | struct page **pages; | ||
| 280 | int rc; | ||
| 281 | |||
| 282 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); | ||
| 283 | if (!pages) | ||
| 284 | return ERR_PTR(-ENOMEM); | ||
| 285 | |||
| 286 | down_read(¤t->mm->mmap_sem); | ||
| 287 | rc = get_user_pages(current, current->mm, (unsigned long)data, | ||
| 288 | num_pages, 0, 0, pages, NULL); | ||
| 289 | up_read(¤t->mm->mmap_sem); | ||
| 290 | if (rc < 0) | ||
| 291 | goto fail; | ||
| 292 | return pages; | ||
| 293 | |||
| 294 | fail: | ||
| 295 | kfree(pages); | ||
| 296 | return ERR_PTR(rc); | ||
| 297 | } | ||
| 298 | |||
| 299 | static void put_page_vector(struct page **pages, int num_pages) | ||
| 300 | { | ||
| 301 | int i; | ||
| 302 | |||
| 303 | for (i = 0; i < num_pages; i++) | ||
| 304 | put_page(pages[i]); | ||
| 305 | kfree(pages); | ||
| 306 | } | ||
| 307 | |||
| 308 | void ceph_release_page_vector(struct page **pages, int num_pages) | ||
| 309 | { | ||
| 310 | int i; | ||
| 311 | |||
| 312 | for (i = 0; i < num_pages; i++) | ||
| 313 | __free_pages(pages[i], 0); | ||
| 314 | kfree(pages); | ||
| 315 | } | ||
| 316 | |||
| 317 | /* | ||
| 318 | * allocate a vector new pages | ||
| 319 | */ | ||
| 320 | static struct page **alloc_page_vector(int num_pages) | ||
| 321 | { | ||
| 322 | struct page **pages; | ||
| 323 | int i; | ||
| 324 | |||
| 325 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); | ||
| 326 | if (!pages) | ||
| 327 | return ERR_PTR(-ENOMEM); | ||
| 328 | for (i = 0; i < num_pages; i++) { | ||
| 329 | pages[i] = alloc_page(GFP_NOFS); | ||
| 330 | if (pages[i] == NULL) { | ||
| 331 | ceph_release_page_vector(pages, i); | ||
| 332 | return ERR_PTR(-ENOMEM); | ||
| 333 | } | ||
| 334 | } | ||
| 335 | return pages; | ||
| 336 | } | ||
| 337 | |||
| 338 | /* | ||
| 339 | * copy user data into a page vector | ||
| 340 | */ | ||
| 341 | static int copy_user_to_page_vector(struct page **pages, | ||
| 342 | const char __user *data, | ||
| 343 | loff_t off, size_t len) | ||
| 344 | { | ||
| 345 | int i = 0; | ||
| 346 | int po = off & ~PAGE_CACHE_MASK; | ||
| 347 | int left = len; | ||
| 348 | int l, bad; | ||
| 349 | |||
| 350 | while (left > 0) { | ||
| 351 | l = min_t(int, PAGE_CACHE_SIZE-po, left); | ||
| 352 | bad = copy_from_user(page_address(pages[i]) + po, data, l); | ||
| 353 | if (bad == l) | ||
| 354 | return -EFAULT; | ||
| 355 | data += l - bad; | ||
| 356 | left -= l - bad; | ||
| 357 | po += l - bad; | ||
| 358 | if (po == PAGE_CACHE_SIZE) { | ||
| 359 | po = 0; | ||
| 360 | i++; | ||
| 361 | } | ||
| 362 | } | ||
| 363 | return len; | ||
| 364 | } | ||
| 365 | |||
| 366 | /* | ||
| 367 | * copy user data from a page vector into a user pointer | ||
| 368 | */ | ||
| 369 | static int copy_page_vector_to_user(struct page **pages, char __user *data, | ||
| 370 | loff_t off, size_t len) | ||
| 371 | { | ||
| 372 | int i = 0; | ||
| 373 | int po = off & ~PAGE_CACHE_MASK; | ||
| 374 | int left = len; | ||
| 375 | int l, bad; | ||
| 376 | |||
| 377 | while (left > 0) { | ||
| 378 | l = min_t(int, left, PAGE_CACHE_SIZE-po); | ||
| 379 | bad = copy_to_user(data, page_address(pages[i]) + po, l); | ||
| 380 | if (bad == l) | ||
| 381 | return -EFAULT; | ||
| 382 | data += l - bad; | ||
| 383 | left -= l - bad; | ||
| 384 | if (po) { | ||
| 385 | po += l - bad; | ||
| 386 | if (po == PAGE_CACHE_SIZE) | ||
| 387 | po = 0; | ||
| 388 | } | ||
| 389 | i++; | ||
| 390 | } | ||
| 391 | return len; | ||
| 392 | } | ||
| 393 | |||
| 394 | /* | ||
| 395 | * Zero an extent within a page vector. Offset is relative to the | ||
| 396 | * start of the first page. | ||
| 397 | */ | ||
| 398 | static void zero_page_vector_range(int off, int len, struct page **pages) | ||
| 399 | { | ||
| 400 | int i = off >> PAGE_CACHE_SHIFT; | ||
| 401 | |||
| 402 | off &= ~PAGE_CACHE_MASK; | ||
| 403 | |||
| 404 | dout("zero_page_vector_page %u~%u\n", off, len); | ||
| 405 | |||
| 406 | /* leading partial page? */ | ||
| 407 | if (off) { | ||
| 408 | int end = min((int)PAGE_CACHE_SIZE, off + len); | ||
| 409 | dout("zeroing %d %p head from %d\n", i, pages[i], | ||
| 410 | (int)off); | ||
| 411 | zero_user_segment(pages[i], off, end); | ||
| 412 | len -= (end - off); | ||
| 413 | i++; | ||
| 414 | } | ||
| 415 | while (len >= PAGE_CACHE_SIZE) { | ||
| 416 | dout("zeroing %d %p len=%d\n", i, pages[i], len); | ||
| 417 | zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); | ||
| 418 | len -= PAGE_CACHE_SIZE; | ||
| 419 | i++; | ||
| 420 | } | ||
| 421 | /* trailing partial page? */ | ||
| 422 | if (len) { | ||
| 423 | dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); | ||
| 424 | zero_user_segment(pages[i], 0, len); | ||
| 425 | } | ||
| 426 | } | ||
| 427 | |||
| 428 | |||
| 429 | /* | ||
| 430 | * Read a range of bytes striped over one or more objects. Iterate over | ||
| 431 | * objects we stripe over. (That's not atomic, but good enough for now.) | ||
| 432 | * | ||
| 433 | * If we get a short result from the OSD, check against i_size; we need to | ||
| 434 | * only return a short read to the caller if we hit EOF. | ||
| 435 | */ | ||
| 436 | static int striped_read(struct inode *inode, | ||
| 437 | u64 off, u64 len, | ||
| 438 | struct page **pages, int num_pages, | ||
| 439 | int *checkeof) | ||
| 440 | { | ||
| 441 | struct ceph_client *client = ceph_inode_to_client(inode); | ||
| 442 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 443 | u64 pos, this_len; | ||
| 444 | int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ | ||
| 445 | int left, pages_left; | ||
| 446 | int read; | ||
| 447 | struct page **page_pos; | ||
| 448 | int ret; | ||
| 449 | bool hit_stripe, was_short; | ||
| 450 | |||
| 451 | /* | ||
| 452 | * we may need to do multiple reads. not atomic, unfortunately. | ||
| 453 | */ | ||
| 454 | pos = off; | ||
| 455 | left = len; | ||
| 456 | page_pos = pages; | ||
| 457 | pages_left = num_pages; | ||
| 458 | read = 0; | ||
| 459 | |||
| 460 | more: | ||
| 461 | this_len = left; | ||
| 462 | ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode), | ||
| 463 | &ci->i_layout, pos, &this_len, | ||
| 464 | ci->i_truncate_seq, | ||
| 465 | ci->i_truncate_size, | ||
| 466 | page_pos, pages_left); | ||
| 467 | hit_stripe = this_len < left; | ||
| 468 | was_short = ret >= 0 && ret < this_len; | ||
| 469 | if (ret == -ENOENT) | ||
| 470 | ret = 0; | ||
| 471 | dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, | ||
| 472 | ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); | ||
| 473 | |||
| 474 | if (ret > 0) { | ||
| 475 | int didpages = | ||
| 476 | ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT; | ||
| 477 | |||
| 478 | if (read < pos - off) { | ||
| 479 | dout(" zero gap %llu to %llu\n", off + read, pos); | ||
| 480 | zero_page_vector_range(page_off + read, | ||
| 481 | pos - off - read, pages); | ||
| 482 | } | ||
| 483 | pos += ret; | ||
| 484 | read = pos - off; | ||
| 485 | left -= ret; | ||
| 486 | page_pos += didpages; | ||
| 487 | pages_left -= didpages; | ||
| 488 | |||
| 489 | /* hit stripe? */ | ||
| 490 | if (left && hit_stripe) | ||
| 491 | goto more; | ||
| 492 | } | ||
| 493 | |||
| 494 | if (was_short) { | ||
| 495 | /* was original extent fully inside i_size? */ | ||
| 496 | if (pos + left <= inode->i_size) { | ||
| 497 | dout("zero tail\n"); | ||
| 498 | zero_page_vector_range(page_off + read, len - read, | ||
| 499 | pages); | ||
| 500 | read = len; | ||
| 501 | goto out; | ||
| 502 | } | ||
| 503 | |||
| 504 | /* check i_size */ | ||
| 505 | *checkeof = 1; | ||
| 506 | } | ||
| 507 | |||
| 508 | out: | ||
| 509 | if (ret >= 0) | ||
| 510 | ret = read; | ||
| 511 | dout("striped_read returns %d\n", ret); | ||
| 512 | return ret; | ||
| 513 | } | ||
| 514 | |||
| 515 | /* | ||
| 516 | * Completely synchronous read and write methods. Direct from __user | ||
| 517 | * buffer to osd, or directly to user pages (if O_DIRECT). | ||
| 518 | * | ||
| 519 | * If the read spans object boundary, just do multiple reads. | ||
| 520 | */ | ||
| 521 | static ssize_t ceph_sync_read(struct file *file, char __user *data, | ||
| 522 | unsigned len, loff_t *poff, int *checkeof) | ||
| 523 | { | ||
| 524 | struct inode *inode = file->f_dentry->d_inode; | ||
| 525 | struct page **pages; | ||
| 526 | u64 off = *poff; | ||
| 527 | int num_pages = calc_pages_for(off, len); | ||
| 528 | int ret; | ||
| 529 | |||
| 530 | dout("sync_read on file %p %llu~%u %s\n", file, off, len, | ||
| 531 | (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | ||
| 532 | |||
| 533 | if (file->f_flags & O_DIRECT) { | ||
| 534 | pages = get_direct_page_vector(data, num_pages, off, len); | ||
| 535 | |||
| 536 | /* | ||
| 537 | * flush any page cache pages in this range. this | ||
| 538 | * will make concurrent normal and O_DIRECT io slow, | ||
| 539 | * but it will at least behave sensibly when they are | ||
| 540 | * in sequence. | ||
| 541 | */ | ||
| 542 | } else { | ||
| 543 | pages = alloc_page_vector(num_pages); | ||
| 544 | } | ||
| 545 | if (IS_ERR(pages)) | ||
| 546 | return PTR_ERR(pages); | ||
| 547 | |||
| 548 | ret = filemap_write_and_wait(inode->i_mapping); | ||
| 549 | if (ret < 0) | ||
| 550 | goto done; | ||
| 551 | |||
| 552 | ret = striped_read(inode, off, len, pages, num_pages, checkeof); | ||
| 553 | |||
| 554 | if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) | ||
| 555 | ret = copy_page_vector_to_user(pages, data, off, ret); | ||
| 556 | if (ret >= 0) | ||
| 557 | *poff = off + ret; | ||
| 558 | |||
| 559 | done: | ||
| 560 | if (file->f_flags & O_DIRECT) | ||
| 561 | put_page_vector(pages, num_pages); | ||
| 562 | else | ||
| 563 | ceph_release_page_vector(pages, num_pages); | ||
| 564 | dout("sync_read result %d\n", ret); | ||
| 565 | return ret; | ||
| 566 | } | ||
| 567 | |||
| 568 | /* | ||
| 569 | * Write commit callback, called if we requested both an ACK and | ||
| 570 | * ONDISK commit reply from the OSD. | ||
| 571 | */ | ||
| 572 | static void sync_write_commit(struct ceph_osd_request *req, | ||
| 573 | struct ceph_msg *msg) | ||
| 574 | { | ||
| 575 | struct ceph_inode_info *ci = ceph_inode(req->r_inode); | ||
| 576 | |||
| 577 | dout("sync_write_commit %p tid %llu\n", req, req->r_tid); | ||
| 578 | spin_lock(&ci->i_unsafe_lock); | ||
| 579 | list_del_init(&req->r_unsafe_item); | ||
| 580 | spin_unlock(&ci->i_unsafe_lock); | ||
| 581 | ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); | ||
| 582 | } | ||
| 583 | |||
| 584 | /* | ||
| 585 | * Synchronous write, straight from __user pointer or user pages (if | ||
| 586 | * O_DIRECT). | ||
| 587 | * | ||
| 588 | * If write spans object boundary, just do multiple writes. (For a | ||
| 589 | * correct atomic write, we should e.g. take write locks on all | ||
| 590 | * objects, rollback on failure, etc.) | ||
| 591 | */ | ||
| 592 | static ssize_t ceph_sync_write(struct file *file, const char __user *data, | ||
| 593 | size_t left, loff_t *offset) | ||
| 594 | { | ||
| 595 | struct inode *inode = file->f_dentry->d_inode; | ||
| 596 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 597 | struct ceph_client *client = ceph_inode_to_client(inode); | ||
| 598 | struct ceph_osd_request *req; | ||
| 599 | struct page **pages; | ||
| 600 | int num_pages; | ||
| 601 | long long unsigned pos; | ||
| 602 | u64 len; | ||
| 603 | int written = 0; | ||
| 604 | int flags; | ||
| 605 | int do_sync = 0; | ||
| 606 | int check_caps = 0; | ||
| 607 | int ret; | ||
| 608 | struct timespec mtime = CURRENT_TIME; | ||
| 609 | |||
| 610 | if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP) | ||
| 611 | return -EROFS; | ||
| 612 | |||
| 613 | dout("sync_write on file %p %lld~%u %s\n", file, *offset, | ||
| 614 | (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); | ||
| 615 | |||
| 616 | if (file->f_flags & O_APPEND) | ||
| 617 | pos = i_size_read(inode); | ||
| 618 | else | ||
| 619 | pos = *offset; | ||
| 620 | |||
| 621 | ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); | ||
| 622 | if (ret < 0) | ||
| 623 | return ret; | ||
| 624 | |||
| 625 | ret = invalidate_inode_pages2_range(inode->i_mapping, | ||
| 626 | pos >> PAGE_CACHE_SHIFT, | ||
| 627 | (pos + left) >> PAGE_CACHE_SHIFT); | ||
| 628 | if (ret < 0) | ||
| 629 | dout("invalidate_inode_pages2_range returned %d\n", ret); | ||
| 630 | |||
| 631 | flags = CEPH_OSD_FLAG_ORDERSNAP | | ||
| 632 | CEPH_OSD_FLAG_ONDISK | | ||
| 633 | CEPH_OSD_FLAG_WRITE; | ||
| 634 | if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) | ||
| 635 | flags |= CEPH_OSD_FLAG_ACK; | ||
| 636 | else | ||
| 637 | do_sync = 1; | ||
| 638 | |||
| 639 | /* | ||
| 640 | * we may need to do multiple writes here if we span an object | ||
| 641 | * boundary. this isn't atomic, unfortunately. :( | ||
| 642 | */ | ||
| 643 | more: | ||
| 644 | len = left; | ||
| 645 | req = ceph_osdc_new_request(&client->osdc, &ci->i_layout, | ||
| 646 | ceph_vino(inode), pos, &len, | ||
| 647 | CEPH_OSD_OP_WRITE, flags, | ||
| 648 | ci->i_snap_realm->cached_context, | ||
| 649 | do_sync, | ||
| 650 | ci->i_truncate_seq, ci->i_truncate_size, | ||
| 651 | &mtime, false, 2); | ||
| 652 | if (IS_ERR(req)) | ||
| 653 | return PTR_ERR(req); | ||
| 654 | |||
| 655 | num_pages = calc_pages_for(pos, len); | ||
| 656 | |||
| 657 | if (file->f_flags & O_DIRECT) { | ||
| 658 | pages = get_direct_page_vector(data, num_pages, pos, len); | ||
| 659 | if (IS_ERR(pages)) { | ||
| 660 | ret = PTR_ERR(pages); | ||
| 661 | goto out; | ||
| 662 | } | ||
| 663 | |||
| 664 | /* | ||
| 665 | * throw out any page cache pages in this range. this | ||
| 666 | * may block. | ||
| 667 | */ | ||
| 668 | truncate_inode_pages_range(inode->i_mapping, pos, pos+len); | ||
| 669 | } else { | ||
| 670 | pages = alloc_page_vector(num_pages); | ||
| 671 | if (IS_ERR(pages)) { | ||
| 672 | ret = PTR_ERR(pages); | ||
| 673 | goto out; | ||
| 674 | } | ||
| 675 | ret = copy_user_to_page_vector(pages, data, pos, len); | ||
| 676 | if (ret < 0) { | ||
| 677 | ceph_release_page_vector(pages, num_pages); | ||
| 678 | goto out; | ||
| 679 | } | ||
| 680 | |||
| 681 | if ((file->f_flags & O_SYNC) == 0) { | ||
| 682 | /* get a second commit callback */ | ||
| 683 | req->r_safe_callback = sync_write_commit; | ||
| 684 | req->r_own_pages = 1; | ||
| 685 | } | ||
| 686 | } | ||
| 687 | req->r_pages = pages; | ||
| 688 | req->r_num_pages = num_pages; | ||
| 689 | req->r_inode = inode; | ||
| 690 | |||
| 691 | ret = ceph_osdc_start_request(&client->osdc, req, false); | ||
| 692 | if (!ret) { | ||
| 693 | if (req->r_safe_callback) { | ||
| 694 | /* | ||
| 695 | * Add to inode unsafe list only after we | ||
| 696 | * start_request so that a tid has been assigned. | ||
| 697 | */ | ||
| 698 | spin_lock(&ci->i_unsafe_lock); | ||
| 699 | list_add(&ci->i_unsafe_writes, &req->r_unsafe_item); | ||
| 700 | spin_unlock(&ci->i_unsafe_lock); | ||
| 701 | ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); | ||
| 702 | } | ||
| 703 | ret = ceph_osdc_wait_request(&client->osdc, req); | ||
| 704 | } | ||
| 705 | |||
| 706 | if (file->f_flags & O_DIRECT) | ||
| 707 | put_page_vector(pages, num_pages); | ||
| 708 | else if (file->f_flags & O_SYNC) | ||
| 709 | ceph_release_page_vector(pages, num_pages); | ||
| 710 | |||
| 711 | out: | ||
| 712 | ceph_osdc_put_request(req); | ||
| 713 | if (ret == 0) { | ||
| 714 | pos += len; | ||
| 715 | written += len; | ||
| 716 | left -= len; | ||
| 717 | if (left) | ||
| 718 | goto more; | ||
| 719 | |||
| 720 | ret = written; | ||
| 721 | *offset = pos; | ||
| 722 | if (pos > i_size_read(inode)) | ||
| 723 | check_caps = ceph_inode_set_size(inode, pos); | ||
| 724 | if (check_caps) | ||
| 725 | ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, | ||
| 726 | NULL); | ||
| 727 | } | ||
| 728 | return ret; | ||
| 729 | } | ||
| 730 | |||
| 731 | /* | ||
| 732 | * Wrap generic_file_aio_read with checks for cap bits on the inode. | ||
| 733 | * Atomically grab references, so that those bits are not released | ||
| 734 | * back to the MDS mid-read. | ||
| 735 | * | ||
| 736 | * Hmm, the sync read case isn't actually async... should it be? | ||
| 737 | */ | ||
| 738 | static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, | ||
| 739 | unsigned long nr_segs, loff_t pos) | ||
| 740 | { | ||
| 741 | struct file *filp = iocb->ki_filp; | ||
| 742 | loff_t *ppos = &iocb->ki_pos; | ||
| 743 | size_t len = iov->iov_len; | ||
| 744 | struct inode *inode = filp->f_dentry->d_inode; | ||
| 745 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 746 | void *base = iov->iov_base; | ||
| 747 | ssize_t ret; | ||
| 748 | int got = 0; | ||
| 749 | int checkeof = 0, read = 0; | ||
| 750 | |||
| 751 | dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", | ||
| 752 | inode, ceph_vinop(inode), pos, (unsigned)len, inode); | ||
| 753 | again: | ||
| 754 | __ceph_do_pending_vmtruncate(inode); | ||
| 755 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE, | ||
| 756 | &got, -1); | ||
| 757 | if (ret < 0) | ||
| 758 | goto out; | ||
| 759 | dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", | ||
| 760 | inode, ceph_vinop(inode), pos, (unsigned)len, | ||
| 761 | ceph_cap_string(got)); | ||
| 762 | |||
| 763 | if ((got & CEPH_CAP_FILE_CACHE) == 0 || | ||
| 764 | (iocb->ki_filp->f_flags & O_DIRECT) || | ||
| 765 | (inode->i_sb->s_flags & MS_SYNCHRONOUS)) | ||
| 766 | /* hmm, this isn't really async... */ | ||
| 767 | ret = ceph_sync_read(filp, base, len, ppos, &checkeof); | ||
| 768 | else | ||
| 769 | ret = generic_file_aio_read(iocb, iov, nr_segs, pos); | ||
| 770 | |||
| 771 | out: | ||
| 772 | dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", | ||
| 773 | inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); | ||
| 774 | ceph_put_cap_refs(ci, got); | ||
| 775 | |||
| 776 | if (checkeof && ret >= 0) { | ||
| 777 | int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); | ||
| 778 | |||
| 779 | /* hit EOF or hole? */ | ||
| 780 | if (statret == 0 && *ppos < inode->i_size) { | ||
| 781 | dout("aio_read sync_read hit hole, reading more\n"); | ||
| 782 | read += ret; | ||
| 783 | base += ret; | ||
| 784 | len -= ret; | ||
| 785 | checkeof = 0; | ||
| 786 | goto again; | ||
| 787 | } | ||
| 788 | } | ||
| 789 | if (ret >= 0) | ||
| 790 | ret += read; | ||
| 791 | |||
| 792 | return ret; | ||
| 793 | } | ||
| 794 | |||
| 795 | /* | ||
| 796 | * Take cap references to avoid releasing caps to MDS mid-write. | ||
| 797 | * | ||
| 798 | * If we are synchronous, and write with an old snap context, the OSD | ||
| 799 | * may return EOLDSNAPC. In that case, retry the write.. _after_ | ||
| 800 | * dropping our cap refs and allowing the pending snap to logically | ||
| 801 | * complete _before_ this write occurs. | ||
| 802 | * | ||
| 803 | * If we are near ENOSPC, write synchronously. | ||
| 804 | */ | ||
| 805 | static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, | ||
| 806 | unsigned long nr_segs, loff_t pos) | ||
| 807 | { | ||
| 808 | struct file *file = iocb->ki_filp; | ||
| 809 | struct inode *inode = file->f_dentry->d_inode; | ||
| 810 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 811 | struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc; | ||
| 812 | loff_t endoff = pos + iov->iov_len; | ||
| 813 | int got = 0; | ||
| 814 | int ret, err; | ||
| 815 | |||
| 816 | if (ceph_snap(inode) != CEPH_NOSNAP) | ||
| 817 | return -EROFS; | ||
| 818 | |||
| 819 | retry_snap: | ||
| 820 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) | ||
| 821 | return -ENOSPC; | ||
| 822 | __ceph_do_pending_vmtruncate(inode); | ||
| 823 | dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", | ||
| 824 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | ||
| 825 | inode->i_size); | ||
| 826 | ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER, | ||
| 827 | &got, endoff); | ||
| 828 | if (ret < 0) | ||
| 829 | goto out; | ||
| 830 | |||
| 831 | dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", | ||
| 832 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | ||
| 833 | ceph_cap_string(got)); | ||
| 834 | |||
| 835 | if ((got & CEPH_CAP_FILE_BUFFER) == 0 || | ||
| 836 | (iocb->ki_filp->f_flags & O_DIRECT) || | ||
| 837 | (inode->i_sb->s_flags & MS_SYNCHRONOUS)) { | ||
| 838 | ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, | ||
| 839 | &iocb->ki_pos); | ||
| 840 | } else { | ||
| 841 | ret = generic_file_aio_write(iocb, iov, nr_segs, pos); | ||
| 842 | |||
| 843 | if ((ret >= 0 || ret == -EIOCBQUEUED) && | ||
| 844 | ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) | ||
| 845 | || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { | ||
| 846 | err = vfs_fsync_range(file, file->f_path.dentry, | ||
| 847 | pos, pos + ret - 1, 1); | ||
| 848 | if (err < 0) | ||
| 849 | ret = err; | ||
| 850 | } | ||
| 851 | } | ||
| 852 | if (ret >= 0) { | ||
| 853 | spin_lock(&inode->i_lock); | ||
| 854 | __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); | ||
| 855 | spin_unlock(&inode->i_lock); | ||
| 856 | } | ||
| 857 | |||
| 858 | out: | ||
| 859 | dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", | ||
| 860 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, | ||
| 861 | ceph_cap_string(got)); | ||
| 862 | ceph_put_cap_refs(ci, got); | ||
| 863 | |||
| 864 | if (ret == -EOLDSNAPC) { | ||
| 865 | dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", | ||
| 866 | inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); | ||
| 867 | goto retry_snap; | ||
| 868 | } | ||
| 869 | |||
| 870 | return ret; | ||
| 871 | } | ||
| 872 | |||
| 873 | /* | ||
| 874 | * llseek. be sure to verify file size on SEEK_END. | ||
| 875 | */ | ||
| 876 | static loff_t ceph_llseek(struct file *file, loff_t offset, int origin) | ||
| 877 | { | ||
| 878 | struct inode *inode = file->f_mapping->host; | ||
| 879 | int ret; | ||
| 880 | |||
| 881 | mutex_lock(&inode->i_mutex); | ||
| 882 | __ceph_do_pending_vmtruncate(inode); | ||
| 883 | switch (origin) { | ||
| 884 | case SEEK_END: | ||
| 885 | ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); | ||
| 886 | if (ret < 0) { | ||
| 887 | offset = ret; | ||
| 888 | goto out; | ||
| 889 | } | ||
| 890 | offset += inode->i_size; | ||
| 891 | break; | ||
| 892 | case SEEK_CUR: | ||
| 893 | /* | ||
| 894 | * Here we special-case the lseek(fd, 0, SEEK_CUR) | ||
| 895 | * position-querying operation. Avoid rewriting the "same" | ||
| 896 | * f_pos value back to the file because a concurrent read(), | ||
| 897 | * write() or lseek() might have altered it | ||
| 898 | */ | ||
| 899 | if (offset == 0) { | ||
| 900 | offset = file->f_pos; | ||
| 901 | goto out; | ||
| 902 | } | ||
| 903 | offset += file->f_pos; | ||
| 904 | break; | ||
| 905 | } | ||
| 906 | |||
| 907 | if (offset < 0 || offset > inode->i_sb->s_maxbytes) { | ||
| 908 | offset = -EINVAL; | ||
| 909 | goto out; | ||
| 910 | } | ||
| 911 | |||
| 912 | /* Special lock needed here? */ | ||
| 913 | if (offset != file->f_pos) { | ||
| 914 | file->f_pos = offset; | ||
| 915 | file->f_version = 0; | ||
| 916 | } | ||
| 917 | |||
| 918 | out: | ||
| 919 | mutex_unlock(&inode->i_mutex); | ||
| 920 | return offset; | ||
| 921 | } | ||
| 922 | |||
| 923 | const struct file_operations ceph_file_fops = { | ||
| 924 | .open = ceph_open, | ||
| 925 | .release = ceph_release, | ||
| 926 | .llseek = ceph_llseek, | ||
| 927 | .read = do_sync_read, | ||
| 928 | .write = do_sync_write, | ||
| 929 | .aio_read = ceph_aio_read, | ||
| 930 | .aio_write = ceph_aio_write, | ||
| 931 | .mmap = ceph_mmap, | ||
| 932 | .fsync = ceph_fsync, | ||
| 933 | .splice_read = generic_file_splice_read, | ||
| 934 | .splice_write = generic_file_splice_write, | ||
| 935 | .unlocked_ioctl = ceph_ioctl, | ||
| 936 | .compat_ioctl = ceph_ioctl, | ||
| 937 | }; | ||
| 938 | |||
