diff options
author | Hugh Dickins <hugh@veritas.com> | 2009-03-22 21:41:27 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-03-24 19:38:26 -0400 |
commit | 095160aee954688a9bad225952c4bee546541e19 (patch) | |
tree | 5f2ceb17b0a414da00b7173384efda8c56c1755c | |
parent | f520360d93cdc37de5d972dac4bf3bdef6a7f6a7 (diff) |
sysfs: fix some bin_vm_ops errors
Commit 86c9508eb1c0ce5aa07b5cf1d36b60c54efc3d7a
"sysfs: don't block indefinitely for unmapped files" in linux-next
crashes the PowerMac G5 when X starts up. It's caught out by the way
powerpc's pci_mmap of legacy_mem uses shmem_zero_setup(), substituting
a new vma->vm_file whose private_data no longer points to the bin_buffer
(substitution done because some versions of X crash if that mmap fails).
The fix to this is straightforward: the original vm_file is fput() in
that case, so this mmap won't block sysfs at all, so just don't switch
over to bin_vm_ops if vm_file has changed.
But more fixes made before realizing that was the problem:-
It should not be an error if bin_page_mkwrite() finds no underlying
page_mkwrite().
Check that a file already mmap'ed has the same underlying vm_ops
_before_ pointing vma->vm_ops at bin_vm_ops.
If the file being mmap'ed is a shmem/tmpfs file, don't fail the mmap
on CONFIG_NUMA=y, just because that has a set_policy and get_policy:
provide bin_set_policy, bin_get_policy and bin_migrate.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Eric Biederman <ebiederm@aristanetworks.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | fs/sysfs/bin.c | 89 |
1 files changed, 79 insertions, 10 deletions
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c index 96cc2bf6a84e..07703d3ff4a1 100644 --- a/fs/sysfs/bin.c +++ b/fs/sysfs/bin.c | |||
@@ -241,9 +241,12 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct page *page) | |||
241 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; | 241 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; |
242 | int ret; | 242 | int ret; |
243 | 243 | ||
244 | if (!bb->vm_ops || !bb->vm_ops->page_mkwrite) | 244 | if (!bb->vm_ops) |
245 | return -EINVAL; | 245 | return -EINVAL; |
246 | 246 | ||
247 | if (!bb->vm_ops->page_mkwrite) | ||
248 | return 0; | ||
249 | |||
247 | if (!sysfs_get_active_two(attr_sd)) | 250 | if (!sysfs_get_active_two(attr_sd)) |
248 | return -EINVAL; | 251 | return -EINVAL; |
249 | 252 | ||
@@ -273,12 +276,78 @@ static int bin_access(struct vm_area_struct *vma, unsigned long addr, | |||
273 | return ret; | 276 | return ret; |
274 | } | 277 | } |
275 | 278 | ||
279 | #ifdef CONFIG_NUMA | ||
280 | static int bin_set_policy(struct vm_area_struct *vma, struct mempolicy *new) | ||
281 | { | ||
282 | struct file *file = vma->vm_file; | ||
283 | struct bin_buffer *bb = file->private_data; | ||
284 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; | ||
285 | int ret; | ||
286 | |||
287 | if (!bb->vm_ops || !bb->vm_ops->set_policy) | ||
288 | return 0; | ||
289 | |||
290 | if (!sysfs_get_active_two(attr_sd)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | ret = bb->vm_ops->set_policy(vma, new); | ||
294 | |||
295 | sysfs_put_active_two(attr_sd); | ||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | static struct mempolicy *bin_get_policy(struct vm_area_struct *vma, | ||
300 | unsigned long addr) | ||
301 | { | ||
302 | struct file *file = vma->vm_file; | ||
303 | struct bin_buffer *bb = file->private_data; | ||
304 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; | ||
305 | struct mempolicy *pol; | ||
306 | |||
307 | if (!bb->vm_ops || !bb->vm_ops->get_policy) | ||
308 | return vma->vm_policy; | ||
309 | |||
310 | if (!sysfs_get_active_two(attr_sd)) | ||
311 | return vma->vm_policy; | ||
312 | |||
313 | pol = bb->vm_ops->get_policy(vma, addr); | ||
314 | |||
315 | sysfs_put_active_two(attr_sd); | ||
316 | return pol; | ||
317 | } | ||
318 | |||
319 | static int bin_migrate(struct vm_area_struct *vma, const nodemask_t *from, | ||
320 | const nodemask_t *to, unsigned long flags) | ||
321 | { | ||
322 | struct file *file = vma->vm_file; | ||
323 | struct bin_buffer *bb = file->private_data; | ||
324 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; | ||
325 | int ret; | ||
326 | |||
327 | if (!bb->vm_ops || !bb->vm_ops->migrate) | ||
328 | return 0; | ||
329 | |||
330 | if (!sysfs_get_active_two(attr_sd)) | ||
331 | return 0; | ||
332 | |||
333 | ret = bb->vm_ops->migrate(vma, from, to, flags); | ||
334 | |||
335 | sysfs_put_active_two(attr_sd); | ||
336 | return ret; | ||
337 | } | ||
338 | #endif | ||
339 | |||
276 | static struct vm_operations_struct bin_vm_ops = { | 340 | static struct vm_operations_struct bin_vm_ops = { |
277 | .open = bin_vma_open, | 341 | .open = bin_vma_open, |
278 | .close = bin_vma_close, | 342 | .close = bin_vma_close, |
279 | .fault = bin_fault, | 343 | .fault = bin_fault, |
280 | .page_mkwrite = bin_page_mkwrite, | 344 | .page_mkwrite = bin_page_mkwrite, |
281 | .access = bin_access, | 345 | .access = bin_access, |
346 | #ifdef CONFIG_NUMA | ||
347 | .set_policy = bin_set_policy, | ||
348 | .get_policy = bin_get_policy, | ||
349 | .migrate = bin_migrate, | ||
350 | #endif | ||
282 | }; | 351 | }; |
283 | 352 | ||
284 | static int mmap(struct file *file, struct vm_area_struct *vma) | 353 | static int mmap(struct file *file, struct vm_area_struct *vma) |
@@ -287,7 +356,6 @@ static int mmap(struct file *file, struct vm_area_struct *vma) | |||
287 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; | 356 | struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata; |
288 | struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr; | 357 | struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr; |
289 | struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; | 358 | struct kobject *kobj = attr_sd->s_parent->s_dir.kobj; |
290 | struct vm_operations_struct *vm_ops; | ||
291 | int rc; | 359 | int rc; |
292 | 360 | ||
293 | mutex_lock(&bb->mutex); | 361 | mutex_lock(&bb->mutex); |
@@ -302,24 +370,25 @@ static int mmap(struct file *file, struct vm_area_struct *vma) | |||
302 | goto out_put; | 370 | goto out_put; |
303 | 371 | ||
304 | rc = attr->mmap(kobj, attr, vma); | 372 | rc = attr->mmap(kobj, attr, vma); |
305 | vm_ops = vma->vm_ops; | ||
306 | vma->vm_ops = &bin_vm_ops; | ||
307 | if (rc) | 373 | if (rc) |
308 | goto out_put; | 374 | goto out_put; |
309 | 375 | ||
310 | rc = -EINVAL; | 376 | /* |
311 | if (bb->mmapped && bb->vm_ops != vma->vm_ops) | 377 | * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup() |
378 | * to satisfy versions of X which crash if the mmap fails: that | ||
379 | * substitutes a new vm_file, and we don't then want bin_vm_ops. | ||
380 | */ | ||
381 | if (vma->vm_file != file) | ||
312 | goto out_put; | 382 | goto out_put; |
313 | 383 | ||
314 | #ifdef CONFIG_NUMA | ||
315 | rc = -EINVAL; | 384 | rc = -EINVAL; |
316 | if (vm_ops && ((vm_ops->set_policy || vm_ops->get_policy || vm_ops->migrate))) | 385 | if (bb->mmapped && bb->vm_ops != vma->vm_ops) |
317 | goto out_put; | 386 | goto out_put; |
318 | #endif | ||
319 | 387 | ||
320 | rc = 0; | 388 | rc = 0; |
321 | bb->mmapped = 1; | 389 | bb->mmapped = 1; |
322 | bb->vm_ops = vm_ops; | 390 | bb->vm_ops = vma->vm_ops; |
391 | vma->vm_ops = &bin_vm_ops; | ||
323 | out_put: | 392 | out_put: |
324 | sysfs_put_active_two(attr_sd); | 393 | sysfs_put_active_two(attr_sd); |
325 | out_unlock: | 394 | out_unlock: |