diff options
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 20 |
1 files changed, 7 insertions, 13 deletions
@@ -165,15 +165,6 @@ static struct vfsmount *aio_mnt; | |||
165 | static const struct file_operations aio_ring_fops; | 165 | static const struct file_operations aio_ring_fops; |
166 | static const struct address_space_operations aio_ctx_aops; | 166 | static const struct address_space_operations aio_ctx_aops; |
167 | 167 | ||
168 | /* Backing dev info for aio fs. | ||
169 | * -no dirty page accounting or writeback happens | ||
170 | */ | ||
171 | static struct backing_dev_info aio_fs_backing_dev_info = { | ||
172 | .name = "aiofs", | ||
173 | .state = 0, | ||
174 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY, | ||
175 | }; | ||
176 | |||
177 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) | 168 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) |
178 | { | 169 | { |
179 | struct qstr this = QSTR_INIT("[aio]", 5); | 170 | struct qstr this = QSTR_INIT("[aio]", 5); |
@@ -185,7 +176,6 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) | |||
185 | 176 | ||
186 | inode->i_mapping->a_ops = &aio_ctx_aops; | 177 | inode->i_mapping->a_ops = &aio_ctx_aops; |
187 | inode->i_mapping->private_data = ctx; | 178 | inode->i_mapping->private_data = ctx; |
188 | inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info; | ||
189 | inode->i_size = PAGE_SIZE * nr_pages; | 179 | inode->i_size = PAGE_SIZE * nr_pages; |
190 | 180 | ||
191 | path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); | 181 | path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); |
@@ -230,9 +220,6 @@ static int __init aio_setup(void) | |||
230 | if (IS_ERR(aio_mnt)) | 220 | if (IS_ERR(aio_mnt)) |
231 | panic("Failed to create aio fs mount."); | 221 | panic("Failed to create aio fs mount."); |
232 | 222 | ||
233 | if (bdi_init(&aio_fs_backing_dev_info)) | ||
234 | panic("Failed to init aio fs backing dev info."); | ||
235 | |||
236 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 223 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
237 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 224 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
238 | 225 | ||
@@ -1140,6 +1127,13 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
1140 | long ret = 0; | 1127 | long ret = 0; |
1141 | int copy_ret; | 1128 | int copy_ret; |
1142 | 1129 | ||
1130 | /* | ||
1131 | * The mutex can block and wake us up and that will cause | ||
1132 | * wait_event_interruptible_hrtimeout() to schedule without sleeping | ||
1133 | * and repeat. This should be rare enough that it doesn't cause | ||
1134 | * peformance issues. See the comment in read_events() for more detail. | ||
1135 | */ | ||
1136 | sched_annotate_sleep(); | ||
1143 | mutex_lock(&ctx->ring_lock); | 1137 | mutex_lock(&ctx->ring_lock); |
1144 | 1138 | ||
1145 | /* Access to ->ring_pages here is protected by ctx->ring_lock. */ | 1139 | /* Access to ->ring_pages here is protected by ctx->ring_lock. */ |