diff options
author | Matthew Wilcox <willy@infradead.org> | 2018-03-05 22:46:03 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2018-10-21 10:46:33 -0400 |
commit | eb797a8ee0ab4cd03df556980ce7bf167cadaa50 (patch) | |
tree | e7c81b546b56103adf4ea9d15965034c10c52763 /include/linux/fs.h | |
parent | f32f004cddf86d63a9c42994bbce9f4e2f07c9fa (diff) |
page cache: Rearrange address_space
Change i_pages from a radix_tree_root to an xarray, convert the
documentation into kernel-doc format and change the order of the elements
to pack them better on 64-bit systems.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'include/linux/fs.h')
-rw-r--r-- | include/linux/fs.h | 46 |
1 files changed, 31 insertions, 15 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h index 6c0b4a1c22ff..d126cad0f621 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -403,24 +403,40 @@ int pagecache_write_end(struct file *, struct address_space *mapping, | |||
403 | loff_t pos, unsigned len, unsigned copied, | 403 | loff_t pos, unsigned len, unsigned copied, |
404 | struct page *page, void *fsdata); | 404 | struct page *page, void *fsdata); |
405 | 405 | ||
406 | /** | ||
407 | * struct address_space - Contents of a cacheable, mappable object. | ||
408 | * @host: Owner, either the inode or the block_device. | ||
409 | * @i_pages: Cached pages. | ||
410 | * @gfp_mask: Memory allocation flags to use for allocating pages. | ||
411 | * @i_mmap_writable: Number of VM_SHARED mappings. | ||
412 | * @i_mmap: Tree of private and shared mappings. | ||
413 | * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. | ||
414 | * @nrpages: Number of page entries, protected by the i_pages lock. | ||
415 | * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock. | ||
416 | * @writeback_index: Writeback starts here. | ||
417 | * @a_ops: Methods. | ||
418 | * @flags: Error bits and flags (AS_*). | ||
419 | * @wb_err: The most recent error which has occurred. | ||
420 | * @private_lock: For use by the owner of the address_space. | ||
421 | * @private_list: For use by the owner of the address_space. | ||
422 | * @private_data: For use by the owner of the address_space. | ||
423 | */ | ||
406 | struct address_space { | 424 | struct address_space { |
407 | struct inode *host; /* owner: inode, block_device */ | 425 | struct inode *host; |
408 | struct radix_tree_root i_pages; /* cached pages */ | 426 | struct xarray i_pages; |
409 | atomic_t i_mmap_writable;/* count VM_SHARED mappings */ | 427 | gfp_t gfp_mask; |
410 | struct rb_root_cached i_mmap; /* tree of private and shared mappings */ | 428 | atomic_t i_mmap_writable; |
411 | struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ | 429 | struct rb_root_cached i_mmap; |
412 | /* Protected by the i_pages lock */ | 430 | struct rw_semaphore i_mmap_rwsem; |
413 | unsigned long nrpages; /* number of total pages */ | 431 | unsigned long nrpages; |
414 | /* number of shadow or DAX exceptional entries */ | ||
415 | unsigned long nrexceptional; | 432 | unsigned long nrexceptional; |
416 | pgoff_t writeback_index;/* writeback starts here */ | 433 | pgoff_t writeback_index; |
417 | const struct address_space_operations *a_ops; /* methods */ | 434 | const struct address_space_operations *a_ops; |
418 | unsigned long flags; /* error bits */ | 435 | unsigned long flags; |
419 | spinlock_t private_lock; /* for use by the address_space */ | ||
420 | gfp_t gfp_mask; /* implicit gfp mask for allocations */ | ||
421 | struct list_head private_list; /* for use by the address_space */ | ||
422 | void *private_data; /* ditto */ | ||
423 | errseq_t wb_err; | 436 | errseq_t wb_err; |
437 | spinlock_t private_lock; | ||
438 | struct list_head private_list; | ||
439 | void *private_data; | ||
424 | } __attribute__((aligned(sizeof(long)))) __randomize_layout; | 440 | } __attribute__((aligned(sizeof(long)))) __randomize_layout; |
425 | /* | 441 | /* |
426 | * On most architectures that alignment is already the case; but | 442 | * On most architectures that alignment is already the case; but |