diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 19:08:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 19:08:54 -0400 |
commit | c2d95729e3094ecdd8c54e856bbe971adbbd7f48 (patch) | |
tree | 76cc5b551227d3d55d68a93105c1fe8080dfb812 /fs | |
parent | bbda1baeeb2f4aff3addac3d086a1e56c3f2503e (diff) | |
parent | b34081f1cd59585451efaa69e1dff1b9507e6c89 (diff) |
Merge branch 'akpm' (patches from Andrew Morton)
Merge first patch-bomb from Andrew Morton:
- Some pidns/fork/exec tweaks
- OCFS2 updates
- Most of MM - there remain quite a few memcg parts which depend on
pending core cgroups changes. Which might have been already merged -
I'll check tomorrow...
- Various misc stuff all over the place
- A few block bits which I never got around to sending to Jens -
relatively minor things.
- MAINTAINERS maintenance
- A small number of lib/ updates
- checkpatch updates
- epoll
- firmware/dmi-scan
- Some kprobes work for S390
- drivers/rtc updates
- hfsplus feature work
- vmcore feature work
- rbtree upgrades
- AOE updates
- pktcdvd cleanups
- PPS
- memstick
- w1
- New "inittmpfs" feature, which does the obvious
- More IPC work from Davidlohr.
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (303 commits)
lz4: fix compression/decompression signedness mismatch
ipc: drop ipc_lock_check
ipc, shm: drop shm_lock_check
ipc: drop ipc_lock_by_ptr
ipc, shm: guard against non-existant vma in shmdt(2)
ipc: document general ipc locking scheme
ipc,msg: drop msg_unlock
ipc: rename ids->rw_mutex
ipc,shm: shorten critical region for shmat
ipc,shm: cleanup do_shmat pasta
ipc,shm: shorten critical region for shmctl
ipc,shm: make shmctl_nolock lockless
ipc,shm: introduce shmctl_nolock
ipc: drop ipcctl_pre_down
ipc,shm: shorten critical region in shmctl_down
ipc,shm: introduce lockless functions to obtain the ipc object
initmpfs: use initramfs if rootfstype= or root= specified
initmpfs: make rootfs use tmpfs when CONFIG_TMPFS enabled
initmpfs: move rootfs code from fs/ramfs/ to init/
initmpfs: move bdi setup from init_rootfs to init_ramfs
...
Diffstat (limited to 'fs')
51 files changed, 874 insertions, 333 deletions
diff --git a/fs/affs/file.c b/fs/affs/file.c index af3261b78102..776e3935a758 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c | |||
@@ -836,7 +836,7 @@ affs_truncate(struct inode *inode) | |||
836 | struct address_space *mapping = inode->i_mapping; | 836 | struct address_space *mapping = inode->i_mapping; |
837 | struct page *page; | 837 | struct page *page; |
838 | void *fsdata; | 838 | void *fsdata; |
839 | u32 size = inode->i_size; | 839 | loff_t size = inode->i_size; |
840 | int res; | 840 | int res; |
841 | 841 | ||
842 | res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata); | 842 | res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata); |
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 8fb42916d8a2..60250847929f 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -716,13 +716,14 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size) | |||
716 | return 0; | 716 | return 0; |
717 | 717 | ||
718 | bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab); | 718 | bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab); |
719 | 719 | if (!bs->bio_integrity_pool) | |
720 | bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size); | ||
721 | if (!bs->bvec_integrity_pool) | ||
722 | return -1; | 720 | return -1; |
723 | 721 | ||
724 | if (!bs->bio_integrity_pool) | 722 | bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size); |
723 | if (!bs->bvec_integrity_pool) { | ||
724 | mempool_destroy(bs->bio_integrity_pool); | ||
725 | return -1; | 725 | return -1; |
726 | } | ||
726 | 727 | ||
727 | return 0; | 728 | return 0; |
728 | } | 729 | } |
diff --git a/fs/coredump.c b/fs/coredump.c index 72f816d6cad9..9bdeca12ae0e 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
@@ -190,6 +190,11 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm) | |||
190 | err = cn_printf(cn, "%d", | 190 | err = cn_printf(cn, "%d", |
191 | task_tgid_vnr(current)); | 191 | task_tgid_vnr(current)); |
192 | break; | 192 | break; |
193 | /* global pid */ | ||
194 | case 'P': | ||
195 | err = cn_printf(cn, "%d", | ||
196 | task_tgid_nr(current)); | ||
197 | break; | ||
193 | /* uid */ | 198 | /* uid */ |
194 | case 'u': | 199 | case 'u': |
195 | err = cn_printf(cn, "%d", cred->uid); | 200 | err = cn_printf(cn, "%d", cred->uid); |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 293f86741ddb..473e09da7d02 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -740,6 +740,7 @@ static void ep_free(struct eventpoll *ep) | |||
740 | epi = rb_entry(rbp, struct epitem, rbn); | 740 | epi = rb_entry(rbp, struct epitem, rbn); |
741 | 741 | ||
742 | ep_unregister_pollwait(ep, epi); | 742 | ep_unregister_pollwait(ep, epi); |
743 | cond_resched(); | ||
743 | } | 744 | } |
744 | 745 | ||
745 | /* | 746 | /* |
@@ -754,6 +755,7 @@ static void ep_free(struct eventpoll *ep) | |||
754 | while ((rbp = rb_first(&ep->rbr)) != NULL) { | 755 | while ((rbp = rb_first(&ep->rbr)) != NULL) { |
755 | epi = rb_entry(rbp, struct epitem, rbn); | 756 | epi = rb_entry(rbp, struct epitem, rbn); |
756 | ep_remove(ep, epi); | 757 | ep_remove(ep, epi); |
758 | cond_resched(); | ||
757 | } | 759 | } |
758 | mutex_unlock(&ep->mtx); | 760 | mutex_unlock(&ep->mtx); |
759 | 761 | ||
@@ -74,6 +74,8 @@ static DEFINE_RWLOCK(binfmt_lock); | |||
74 | void __register_binfmt(struct linux_binfmt * fmt, int insert) | 74 | void __register_binfmt(struct linux_binfmt * fmt, int insert) |
75 | { | 75 | { |
76 | BUG_ON(!fmt); | 76 | BUG_ON(!fmt); |
77 | if (WARN_ON(!fmt->load_binary)) | ||
78 | return; | ||
77 | write_lock(&binfmt_lock); | 79 | write_lock(&binfmt_lock); |
78 | insert ? list_add(&fmt->lh, &formats) : | 80 | insert ? list_add(&fmt->lh, &formats) : |
79 | list_add_tail(&fmt->lh, &formats); | 81 | list_add_tail(&fmt->lh, &formats); |
@@ -266,7 +268,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) | |||
266 | BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); | 268 | BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); |
267 | vma->vm_end = STACK_TOP_MAX; | 269 | vma->vm_end = STACK_TOP_MAX; |
268 | vma->vm_start = vma->vm_end - PAGE_SIZE; | 270 | vma->vm_start = vma->vm_end - PAGE_SIZE; |
269 | vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; | 271 | vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; |
270 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | 272 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
271 | INIT_LIST_HEAD(&vma->anon_vma_chain); | 273 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
272 | 274 | ||
@@ -1365,18 +1367,18 @@ out: | |||
1365 | } | 1367 | } |
1366 | EXPORT_SYMBOL(remove_arg_zero); | 1368 | EXPORT_SYMBOL(remove_arg_zero); |
1367 | 1369 | ||
1370 | #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) | ||
1368 | /* | 1371 | /* |
1369 | * cycle the list of binary formats handler, until one recognizes the image | 1372 | * cycle the list of binary formats handler, until one recognizes the image |
1370 | */ | 1373 | */ |
1371 | int search_binary_handler(struct linux_binprm *bprm) | 1374 | int search_binary_handler(struct linux_binprm *bprm) |
1372 | { | 1375 | { |
1373 | unsigned int depth = bprm->recursion_depth; | 1376 | bool need_retry = IS_ENABLED(CONFIG_MODULES); |
1374 | int try,retval; | ||
1375 | struct linux_binfmt *fmt; | 1377 | struct linux_binfmt *fmt; |
1376 | pid_t old_pid, old_vpid; | 1378 | int retval; |
1377 | 1379 | ||
1378 | /* This allows 4 levels of binfmt rewrites before failing hard. */ | 1380 | /* This allows 4 levels of binfmt rewrites before failing hard. */ |
1379 | if (depth > 5) | 1381 | if (bprm->recursion_depth > 5) |
1380 | return -ELOOP; | 1382 | return -ELOOP; |
1381 | 1383 | ||
1382 | retval = security_bprm_check(bprm); | 1384 | retval = security_bprm_check(bprm); |
@@ -1387,71 +1389,67 @@ int search_binary_handler(struct linux_binprm *bprm) | |||
1387 | if (retval) | 1389 | if (retval) |
1388 | return retval; | 1390 | return retval; |
1389 | 1391 | ||
1392 | retval = -ENOENT; | ||
1393 | retry: | ||
1394 | read_lock(&binfmt_lock); | ||
1395 | list_for_each_entry(fmt, &formats, lh) { | ||
1396 | if (!try_module_get(fmt->module)) | ||
1397 | continue; | ||
1398 | read_unlock(&binfmt_lock); | ||
1399 | bprm->recursion_depth++; | ||
1400 | retval = fmt->load_binary(bprm); | ||
1401 | bprm->recursion_depth--; | ||
1402 | if (retval >= 0 || retval != -ENOEXEC || | ||
1403 | bprm->mm == NULL || bprm->file == NULL) { | ||
1404 | put_binfmt(fmt); | ||
1405 | return retval; | ||
1406 | } | ||
1407 | read_lock(&binfmt_lock); | ||
1408 | put_binfmt(fmt); | ||
1409 | } | ||
1410 | read_unlock(&binfmt_lock); | ||
1411 | |||
1412 | if (need_retry && retval == -ENOEXEC) { | ||
1413 | if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && | ||
1414 | printable(bprm->buf[2]) && printable(bprm->buf[3])) | ||
1415 | return retval; | ||
1416 | if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0) | ||
1417 | return retval; | ||
1418 | need_retry = false; | ||
1419 | goto retry; | ||
1420 | } | ||
1421 | |||
1422 | return retval; | ||
1423 | } | ||
1424 | EXPORT_SYMBOL(search_binary_handler); | ||
1425 | |||
1426 | static int exec_binprm(struct linux_binprm *bprm) | ||
1427 | { | ||
1428 | pid_t old_pid, old_vpid; | ||
1429 | int ret; | ||
1430 | |||
1390 | /* Need to fetch pid before load_binary changes it */ | 1431 | /* Need to fetch pid before load_binary changes it */ |
1391 | old_pid = current->pid; | 1432 | old_pid = current->pid; |
1392 | rcu_read_lock(); | 1433 | rcu_read_lock(); |
1393 | old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); | 1434 | old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); |
1394 | rcu_read_unlock(); | 1435 | rcu_read_unlock(); |
1395 | 1436 | ||
1396 | retval = -ENOENT; | 1437 | ret = search_binary_handler(bprm); |
1397 | for (try=0; try<2; try++) { | 1438 | if (ret >= 0) { |
1398 | read_lock(&binfmt_lock); | 1439 | trace_sched_process_exec(current, old_pid, bprm); |
1399 | list_for_each_entry(fmt, &formats, lh) { | 1440 | ptrace_event(PTRACE_EVENT_EXEC, old_vpid); |
1400 | int (*fn)(struct linux_binprm *) = fmt->load_binary; | 1441 | current->did_exec = 1; |
1401 | if (!fn) | 1442 | proc_exec_connector(current); |
1402 | continue; | 1443 | |
1403 | if (!try_module_get(fmt->module)) | 1444 | if (bprm->file) { |
1404 | continue; | 1445 | allow_write_access(bprm->file); |
1405 | read_unlock(&binfmt_lock); | 1446 | fput(bprm->file); |
1406 | bprm->recursion_depth = depth + 1; | 1447 | bprm->file = NULL; /* to catch use-after-free */ |
1407 | retval = fn(bprm); | ||
1408 | bprm->recursion_depth = depth; | ||
1409 | if (retval >= 0) { | ||
1410 | if (depth == 0) { | ||
1411 | trace_sched_process_exec(current, old_pid, bprm); | ||
1412 | ptrace_event(PTRACE_EVENT_EXEC, old_vpid); | ||
1413 | } | ||
1414 | put_binfmt(fmt); | ||
1415 | allow_write_access(bprm->file); | ||
1416 | if (bprm->file) | ||
1417 | fput(bprm->file); | ||
1418 | bprm->file = NULL; | ||
1419 | current->did_exec = 1; | ||
1420 | proc_exec_connector(current); | ||
1421 | return retval; | ||
1422 | } | ||
1423 | read_lock(&binfmt_lock); | ||
1424 | put_binfmt(fmt); | ||
1425 | if (retval != -ENOEXEC || bprm->mm == NULL) | ||
1426 | break; | ||
1427 | if (!bprm->file) { | ||
1428 | read_unlock(&binfmt_lock); | ||
1429 | return retval; | ||
1430 | } | ||
1431 | } | 1448 | } |
1432 | read_unlock(&binfmt_lock); | ||
1433 | #ifdef CONFIG_MODULES | ||
1434 | if (retval != -ENOEXEC || bprm->mm == NULL) { | ||
1435 | break; | ||
1436 | } else { | ||
1437 | #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) | ||
1438 | if (printable(bprm->buf[0]) && | ||
1439 | printable(bprm->buf[1]) && | ||
1440 | printable(bprm->buf[2]) && | ||
1441 | printable(bprm->buf[3])) | ||
1442 | break; /* -ENOEXEC */ | ||
1443 | if (try) | ||
1444 | break; /* -ENOEXEC */ | ||
1445 | request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2])); | ||
1446 | } | ||
1447 | #else | ||
1448 | break; | ||
1449 | #endif | ||
1450 | } | 1449 | } |
1451 | return retval; | ||
1452 | } | ||
1453 | 1450 | ||
1454 | EXPORT_SYMBOL(search_binary_handler); | 1451 | return ret; |
1452 | } | ||
1455 | 1453 | ||
1456 | /* | 1454 | /* |
1457 | * sys_execve() executes a new program. | 1455 | * sys_execve() executes a new program. |
@@ -1541,7 +1539,7 @@ static int do_execve_common(const char *filename, | |||
1541 | if (retval < 0) | 1539 | if (retval < 0) |
1542 | goto out; | 1540 | goto out; |
1543 | 1541 | ||
1544 | retval = search_binary_handler(bprm); | 1542 | retval = exec_binprm(bprm); |
1545 | if (retval < 0) | 1543 | if (retval < 0) |
1546 | goto out; | 1544 | goto out; |
1547 | 1545 | ||
diff --git a/fs/file_table.c b/fs/file_table.c index 322cd37626cb..abdd15ad13c9 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -311,8 +311,7 @@ void fput(struct file *file) | |||
311 | return; | 311 | return; |
312 | /* | 312 | /* |
313 | * After this task has run exit_task_work(), | 313 | * After this task has run exit_task_work(), |
314 | * task_work_add() will fail. free_ipc_ns()-> | 314 | * task_work_add() will fail. Fall through to delayed |
315 | * shm_destroy() can do this. Fall through to delayed | ||
316 | * fput to avoid leaking *file. | 315 | * fput to avoid leaking *file. |
317 | */ | 316 | */ |
318 | } | 317 | } |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 68851ff2fd41..30f6f27d5a59 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -723,7 +723,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, | |||
723 | return wrote; | 723 | return wrote; |
724 | } | 724 | } |
725 | 725 | ||
726 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, | 726 | static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, |
727 | enum wb_reason reason) | 727 | enum wb_reason reason) |
728 | { | 728 | { |
729 | struct wb_writeback_work work = { | 729 | struct wb_writeback_work work = { |
@@ -1049,10 +1049,8 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) | |||
1049 | { | 1049 | { |
1050 | struct backing_dev_info *bdi; | 1050 | struct backing_dev_info *bdi; |
1051 | 1051 | ||
1052 | if (!nr_pages) { | 1052 | if (!nr_pages) |
1053 | nr_pages = global_page_state(NR_FILE_DIRTY) + | 1053 | nr_pages = get_nr_dirty_pages(); |
1054 | global_page_state(NR_UNSTABLE_NFS); | ||
1055 | } | ||
1056 | 1054 | ||
1057 | rcu_read_lock(); | 1055 | rcu_read_lock(); |
1058 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { | 1056 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { |
@@ -1173,6 +1171,8 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1173 | bool wakeup_bdi = false; | 1171 | bool wakeup_bdi = false; |
1174 | bdi = inode_to_bdi(inode); | 1172 | bdi = inode_to_bdi(inode); |
1175 | 1173 | ||
1174 | spin_unlock(&inode->i_lock); | ||
1175 | spin_lock(&bdi->wb.list_lock); | ||
1176 | if (bdi_cap_writeback_dirty(bdi)) { | 1176 | if (bdi_cap_writeback_dirty(bdi)) { |
1177 | WARN(!test_bit(BDI_registered, &bdi->state), | 1177 | WARN(!test_bit(BDI_registered, &bdi->state), |
1178 | "bdi-%s not registered\n", bdi->name); | 1178 | "bdi-%s not registered\n", bdi->name); |
@@ -1187,8 +1187,6 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1187 | wakeup_bdi = true; | 1187 | wakeup_bdi = true; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | spin_unlock(&inode->i_lock); | ||
1191 | spin_lock(&bdi->wb.list_lock); | ||
1192 | inode->dirtied_when = jiffies; | 1190 | inode->dirtied_when = jiffies; |
1193 | list_move(&inode->i_wb_list, &bdi->wb.b_dirty); | 1191 | list_move(&inode->i_wb_list, &bdi->wb.b_dirty); |
1194 | spin_unlock(&bdi->wb.list_lock); | 1192 | spin_unlock(&bdi->wb.list_lock); |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 8702b732109a..73899c1c3449 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -913,7 +913,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, | |||
913 | (1 << FSCACHE_OP_WAITING) | | 913 | (1 << FSCACHE_OP_WAITING) | |
914 | (1 << FSCACHE_OP_UNUSE_COOKIE); | 914 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
915 | 915 | ||
916 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | 916 | ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM); |
917 | if (ret < 0) | 917 | if (ret < 0) |
918 | goto nomem_free; | 918 | goto nomem_free; |
919 | 919 | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index e0fe703ee3d6..84434594e80e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -930,7 +930,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) | |||
930 | fc->bdi.name = "fuse"; | 930 | fc->bdi.name = "fuse"; |
931 | fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 931 | fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
932 | /* fuse does it's own writeback accounting */ | 932 | /* fuse does it's own writeback accounting */ |
933 | fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; | 933 | fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT; |
934 | 934 | ||
935 | err = bdi_init(&fc->bdi); | 935 | err = bdi_init(&fc->bdi); |
936 | if (err) | 936 | if (err) |
diff --git a/fs/hfsplus/Kconfig b/fs/hfsplus/Kconfig index a63371815aab..24bc20fd42f7 100644 --- a/fs/hfsplus/Kconfig +++ b/fs/hfsplus/Kconfig | |||
@@ -11,3 +11,21 @@ config HFSPLUS_FS | |||
11 | MacOS 8. It includes all Mac specific filesystem data such as | 11 | MacOS 8. It includes all Mac specific filesystem data such as |
12 | data forks and creator codes, but it also has several UNIX | 12 | data forks and creator codes, but it also has several UNIX |
13 | style features such as file ownership and permissions. | 13 | style features such as file ownership and permissions. |
14 | |||
15 | config HFSPLUS_FS_POSIX_ACL | ||
16 | bool "HFS+ POSIX Access Control Lists" | ||
17 | depends on HFSPLUS_FS | ||
18 | select FS_POSIX_ACL | ||
19 | help | ||
20 | POSIX Access Control Lists (ACLs) support permissions for users and | ||
21 | groups beyond the owner/group/world scheme. | ||
22 | |||
23 | To learn more about Access Control Lists, visit the POSIX ACLs for | ||
24 | Linux website <http://acl.bestbits.at/>. | ||
25 | |||
26 | It needs to understand that POSIX ACLs are treated only under | ||
27 | Linux. POSIX ACLs doesn't mean something under Mac OS X. | ||
28 | Mac OS X beginning with version 10.4 ("Tiger") support NFSv4 ACLs, | ||
29 | which are part of the NFSv4 standard. | ||
30 | |||
31 | If you don't know what Access Control Lists are, say N | ||
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile index 09d278bb7b91..683fca2e5e65 100644 --- a/fs/hfsplus/Makefile +++ b/fs/hfsplus/Makefile | |||
@@ -7,3 +7,5 @@ obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o | |||
7 | hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \ | 7 | hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \ |
8 | bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \ | 8 | bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \ |
9 | attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o | 9 | attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o |
10 | |||
11 | hfsplus-$(CONFIG_HFSPLUS_FS_POSIX_ACL) += posix_acl.o | ||
diff --git a/fs/hfsplus/acl.h b/fs/hfsplus/acl.h new file mode 100644 index 000000000000..07c0d4947527 --- /dev/null +++ b/fs/hfsplus/acl.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * linux/fs/hfsplus/acl.h | ||
3 | * | ||
4 | * Vyacheslav Dubeyko <slava@dubeyko.com> | ||
5 | * | ||
6 | * Handler for Posix Access Control Lists (ACLs) support. | ||
7 | */ | ||
8 | |||
9 | #include <linux/posix_acl_xattr.h> | ||
10 | |||
11 | #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL | ||
12 | |||
13 | /* posix_acl.c */ | ||
14 | struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type); | ||
15 | extern int hfsplus_posix_acl_chmod(struct inode *); | ||
16 | extern int hfsplus_init_posix_acl(struct inode *, struct inode *); | ||
17 | |||
18 | #else /* CONFIG_HFSPLUS_FS_POSIX_ACL */ | ||
19 | #define hfsplus_get_posix_acl NULL | ||
20 | |||
21 | static inline int hfsplus_posix_acl_chmod(struct inode *inode) | ||
22 | { | ||
23 | return 0; | ||
24 | } | ||
25 | |||
26 | static inline int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir) | ||
27 | { | ||
28 | return 0; | ||
29 | } | ||
30 | #endif /* CONFIG_HFSPLUS_FS_POSIX_ACL */ | ||
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index d8ce4bd17fc5..4a4fea002673 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include "hfsplus_fs.h" | 16 | #include "hfsplus_fs.h" |
17 | #include "hfsplus_raw.h" | 17 | #include "hfsplus_raw.h" |
18 | #include "xattr.h" | 18 | #include "xattr.h" |
19 | #include "acl.h" | ||
19 | 20 | ||
20 | static inline void hfsplus_instantiate(struct dentry *dentry, | 21 | static inline void hfsplus_instantiate(struct dentry *dentry, |
21 | struct inode *inode, u32 cnid) | 22 | struct inode *inode, u32 cnid) |
@@ -529,6 +530,9 @@ const struct inode_operations hfsplus_dir_inode_operations = { | |||
529 | .getxattr = generic_getxattr, | 530 | .getxattr = generic_getxattr, |
530 | .listxattr = hfsplus_listxattr, | 531 | .listxattr = hfsplus_listxattr, |
531 | .removexattr = hfsplus_removexattr, | 532 | .removexattr = hfsplus_removexattr, |
533 | #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL | ||
534 | .get_acl = hfsplus_get_posix_acl, | ||
535 | #endif | ||
532 | }; | 536 | }; |
533 | 537 | ||
534 | const struct file_operations hfsplus_dir_operations = { | 538 | const struct file_operations hfsplus_dir_operations = { |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index ede79317cfb8..2b9cd01696e2 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #define DBG_EXTENT 0x00000020 | 30 | #define DBG_EXTENT 0x00000020 |
31 | #define DBG_BITMAP 0x00000040 | 31 | #define DBG_BITMAP 0x00000040 |
32 | #define DBG_ATTR_MOD 0x00000080 | 32 | #define DBG_ATTR_MOD 0x00000080 |
33 | #define DBG_ACL_MOD 0x00000100 | ||
33 | 34 | ||
34 | #if 0 | 35 | #if 0 |
35 | #define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD) | 36 | #define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD) |
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index f833d35630ab..4d2edaea891c 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include "hfsplus_fs.h" | 19 | #include "hfsplus_fs.h" |
20 | #include "hfsplus_raw.h" | 20 | #include "hfsplus_raw.h" |
21 | #include "xattr.h" | 21 | #include "xattr.h" |
22 | #include "acl.h" | ||
22 | 23 | ||
23 | static int hfsplus_readpage(struct file *file, struct page *page) | 24 | static int hfsplus_readpage(struct file *file, struct page *page) |
24 | { | 25 | { |
@@ -316,6 +317,13 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) | |||
316 | 317 | ||
317 | setattr_copy(inode, attr); | 318 | setattr_copy(inode, attr); |
318 | mark_inode_dirty(inode); | 319 | mark_inode_dirty(inode); |
320 | |||
321 | if (attr->ia_valid & ATTR_MODE) { | ||
322 | error = hfsplus_posix_acl_chmod(inode); | ||
323 | if (unlikely(error)) | ||
324 | return error; | ||
325 | } | ||
326 | |||
319 | return 0; | 327 | return 0; |
320 | } | 328 | } |
321 | 329 | ||
@@ -383,6 +391,9 @@ static const struct inode_operations hfsplus_file_inode_operations = { | |||
383 | .getxattr = generic_getxattr, | 391 | .getxattr = generic_getxattr, |
384 | .listxattr = hfsplus_listxattr, | 392 | .listxattr = hfsplus_listxattr, |
385 | .removexattr = hfsplus_removexattr, | 393 | .removexattr = hfsplus_removexattr, |
394 | #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL | ||
395 | .get_acl = hfsplus_get_posix_acl, | ||
396 | #endif | ||
386 | }; | 397 | }; |
387 | 398 | ||
388 | static const struct file_operations hfsplus_file_operations = { | 399 | static const struct file_operations hfsplus_file_operations = { |
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c new file mode 100644 index 000000000000..b609cc14c72e --- /dev/null +++ b/fs/hfsplus/posix_acl.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /* | ||
2 | * linux/fs/hfsplus/posix_acl.c | ||
3 | * | ||
4 | * Vyacheslav Dubeyko <slava@dubeyko.com> | ||
5 | * | ||
6 | * Handler for Posix Access Control Lists (ACLs) support. | ||
7 | */ | ||
8 | |||
9 | #include "hfsplus_fs.h" | ||
10 | #include "xattr.h" | ||
11 | #include "acl.h" | ||
12 | |||
13 | struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type) | ||
14 | { | ||
15 | struct posix_acl *acl; | ||
16 | char *xattr_name; | ||
17 | char *value = NULL; | ||
18 | ssize_t size; | ||
19 | |||
20 | acl = get_cached_acl(inode, type); | ||
21 | if (acl != ACL_NOT_CACHED) | ||
22 | return acl; | ||
23 | |||
24 | switch (type) { | ||
25 | case ACL_TYPE_ACCESS: | ||
26 | xattr_name = POSIX_ACL_XATTR_ACCESS; | ||
27 | break; | ||
28 | case ACL_TYPE_DEFAULT: | ||
29 | xattr_name = POSIX_ACL_XATTR_DEFAULT; | ||
30 | break; | ||
31 | default: | ||
32 | return ERR_PTR(-EINVAL); | ||
33 | } | ||
34 | |||
35 | size = __hfsplus_getxattr(inode, xattr_name, NULL, 0); | ||
36 | |||
37 | if (size > 0) { | ||
38 | value = (char *)hfsplus_alloc_attr_entry(); | ||
39 | if (unlikely(!value)) | ||
40 | return ERR_PTR(-ENOMEM); | ||
41 | size = __hfsplus_getxattr(inode, xattr_name, value, size); | ||
42 | } | ||
43 | |||
44 | if (size > 0) | ||
45 | acl = posix_acl_from_xattr(&init_user_ns, value, size); | ||
46 | else if (size == -ENODATA) | ||
47 | acl = NULL; | ||
48 | else | ||
49 | acl = ERR_PTR(size); | ||
50 | |||
51 | hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value); | ||
52 | |||
53 | if (!IS_ERR(acl)) | ||
54 | set_cached_acl(inode, type, acl); | ||
55 | |||
56 | return acl; | ||
57 | } | ||
58 | |||
59 | static int hfsplus_set_posix_acl(struct inode *inode, | ||
60 | int type, | ||
61 | struct posix_acl *acl) | ||
62 | { | ||
63 | int err; | ||
64 | char *xattr_name; | ||
65 | size_t size = 0; | ||
66 | char *value = NULL; | ||
67 | |||
68 | if (S_ISLNK(inode->i_mode)) | ||
69 | return -EOPNOTSUPP; | ||
70 | |||
71 | switch (type) { | ||
72 | case ACL_TYPE_ACCESS: | ||
73 | xattr_name = POSIX_ACL_XATTR_ACCESS; | ||
74 | if (acl) { | ||
75 | err = posix_acl_equiv_mode(acl, &inode->i_mode); | ||
76 | if (err < 0) | ||
77 | return err; | ||
78 | } | ||
79 | err = 0; | ||
80 | break; | ||
81 | |||
82 | case ACL_TYPE_DEFAULT: | ||
83 | xattr_name = POSIX_ACL_XATTR_DEFAULT; | ||
84 | if (!S_ISDIR(inode->i_mode)) | ||
85 | return acl ? -EACCES : 0; | ||
86 | break; | ||
87 | |||
88 | default: | ||
89 | return -EINVAL; | ||
90 | } | ||
91 | |||
92 | if (acl) { | ||
93 | size = posix_acl_xattr_size(acl->a_count); | ||
94 | if (unlikely(size > HFSPLUS_MAX_INLINE_DATA_SIZE)) | ||
95 | return -ENOMEM; | ||
96 | value = (char *)hfsplus_alloc_attr_entry(); | ||
97 | if (unlikely(!value)) | ||
98 | return -ENOMEM; | ||
99 | err = posix_acl_to_xattr(&init_user_ns, acl, value, size); | ||
100 | if (unlikely(err < 0)) | ||
101 | goto end_set_acl; | ||
102 | } | ||
103 | |||
104 | err = __hfsplus_setxattr(inode, xattr_name, value, size, 0); | ||
105 | |||
106 | end_set_acl: | ||
107 | hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value); | ||
108 | |||
109 | if (!err) | ||
110 | set_cached_acl(inode, type, acl); | ||
111 | |||
112 | return err; | ||
113 | } | ||
114 | |||
115 | int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir) | ||
116 | { | ||
117 | int err = 0; | ||
118 | struct posix_acl *acl = NULL; | ||
119 | |||
120 | hfs_dbg(ACL_MOD, | ||
121 | "[%s]: ino %lu, dir->ino %lu\n", | ||
122 | __func__, inode->i_ino, dir->i_ino); | ||
123 | |||
124 | if (S_ISLNK(inode->i_mode)) | ||
125 | return 0; | ||
126 | |||
127 | acl = hfsplus_get_posix_acl(dir, ACL_TYPE_DEFAULT); | ||
128 | if (IS_ERR(acl)) | ||
129 | return PTR_ERR(acl); | ||
130 | |||
131 | if (acl) { | ||
132 | if (S_ISDIR(inode->i_mode)) { | ||
133 | err = hfsplus_set_posix_acl(inode, | ||
134 | ACL_TYPE_DEFAULT, | ||
135 | acl); | ||
136 | if (unlikely(err)) | ||
137 | goto init_acl_cleanup; | ||
138 | } | ||
139 | |||
140 | err = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode); | ||
141 | if (unlikely(err < 0)) | ||
142 | return err; | ||
143 | |||
144 | if (err > 0) | ||
145 | err = hfsplus_set_posix_acl(inode, | ||
146 | ACL_TYPE_ACCESS, | ||
147 | acl); | ||
148 | } else | ||
149 | inode->i_mode &= ~current_umask(); | ||
150 | |||
151 | init_acl_cleanup: | ||
152 | posix_acl_release(acl); | ||
153 | return err; | ||
154 | } | ||
155 | |||
156 | int hfsplus_posix_acl_chmod(struct inode *inode) | ||
157 | { | ||
158 | int err; | ||
159 | struct posix_acl *acl; | ||
160 | |||
161 | hfs_dbg(ACL_MOD, "[%s]: ino %lu\n", __func__, inode->i_ino); | ||
162 | |||
163 | if (S_ISLNK(inode->i_mode)) | ||
164 | return -EOPNOTSUPP; | ||
165 | |||
166 | acl = hfsplus_get_posix_acl(inode, ACL_TYPE_ACCESS); | ||
167 | if (IS_ERR(acl) || !acl) | ||
168 | return PTR_ERR(acl); | ||
169 | |||
170 | err = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); | ||
171 | if (unlikely(err)) | ||
172 | return err; | ||
173 | |||
174 | err = hfsplus_set_posix_acl(inode, ACL_TYPE_ACCESS, acl); | ||
175 | posix_acl_release(acl); | ||
176 | return err; | ||
177 | } | ||
178 | |||
179 | static int hfsplus_xattr_get_posix_acl(struct dentry *dentry, | ||
180 | const char *name, | ||
181 | void *buffer, | ||
182 | size_t size, | ||
183 | int type) | ||
184 | { | ||
185 | int err = 0; | ||
186 | struct posix_acl *acl; | ||
187 | |||
188 | hfs_dbg(ACL_MOD, | ||
189 | "[%s]: ino %lu, buffer %p, size %zu, type %#x\n", | ||
190 | __func__, dentry->d_inode->i_ino, buffer, size, type); | ||
191 | |||
192 | if (strcmp(name, "") != 0) | ||
193 | return -EINVAL; | ||
194 | |||
195 | acl = hfsplus_get_posix_acl(dentry->d_inode, type); | ||
196 | if (IS_ERR(acl)) | ||
197 | return PTR_ERR(acl); | ||
198 | if (acl == NULL) | ||
199 | return -ENODATA; | ||
200 | |||
201 | err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); | ||
202 | posix_acl_release(acl); | ||
203 | |||
204 | return err; | ||
205 | } | ||
206 | |||
207 | static int hfsplus_xattr_set_posix_acl(struct dentry *dentry, | ||
208 | const char *name, | ||
209 | const void *value, | ||
210 | size_t size, | ||
211 | int flags, | ||
212 | int type) | ||
213 | { | ||
214 | int err = 0; | ||
215 | struct inode *inode = dentry->d_inode; | ||
216 | struct posix_acl *acl = NULL; | ||
217 | |||
218 | hfs_dbg(ACL_MOD, | ||
219 | "[%s]: ino %lu, value %p, size %zu, flags %#x, type %#x\n", | ||
220 | __func__, inode->i_ino, value, size, flags, type); | ||
221 | |||
222 | if (strcmp(name, "") != 0) | ||
223 | return -EINVAL; | ||
224 | |||
225 | if (!inode_owner_or_capable(inode)) | ||
226 | return -EPERM; | ||
227 | |||
228 | if (value) { | ||
229 | acl = posix_acl_from_xattr(&init_user_ns, value, size); | ||
230 | if (IS_ERR(acl)) | ||
231 | return PTR_ERR(acl); | ||
232 | else if (acl) { | ||
233 | err = posix_acl_valid(acl); | ||
234 | if (err) | ||
235 | goto end_xattr_set_acl; | ||
236 | } | ||
237 | } | ||
238 | |||
239 | err = hfsplus_set_posix_acl(inode, type, acl); | ||
240 | |||
241 | end_xattr_set_acl: | ||
242 | posix_acl_release(acl); | ||
243 | return err; | ||
244 | } | ||
245 | |||
246 | static size_t hfsplus_xattr_list_posix_acl(struct dentry *dentry, | ||
247 | char *list, | ||
248 | size_t list_size, | ||
249 | const char *name, | ||
250 | size_t name_len, | ||
251 | int type) | ||
252 | { | ||
253 | /* | ||
254 | * This method is not used. | ||
255 | * It is used hfsplus_listxattr() instead of generic_listxattr(). | ||
256 | */ | ||
257 | return -EOPNOTSUPP; | ||
258 | } | ||
259 | |||
260 | const struct xattr_handler hfsplus_xattr_acl_access_handler = { | ||
261 | .prefix = POSIX_ACL_XATTR_ACCESS, | ||
262 | .flags = ACL_TYPE_ACCESS, | ||
263 | .list = hfsplus_xattr_list_posix_acl, | ||
264 | .get = hfsplus_xattr_get_posix_acl, | ||
265 | .set = hfsplus_xattr_set_posix_acl, | ||
266 | }; | ||
267 | |||
268 | const struct xattr_handler hfsplus_xattr_acl_default_handler = { | ||
269 | .prefix = POSIX_ACL_XATTR_DEFAULT, | ||
270 | .flags = ACL_TYPE_DEFAULT, | ||
271 | .list = hfsplus_xattr_list_posix_acl, | ||
272 | .get = hfsplus_xattr_get_posix_acl, | ||
273 | .set = hfsplus_xattr_set_posix_acl, | ||
274 | }; | ||
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index f66346155df5..bd8471fb9a6a 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c | |||
@@ -8,11 +8,16 @@ | |||
8 | 8 | ||
9 | #include "hfsplus_fs.h" | 9 | #include "hfsplus_fs.h" |
10 | #include "xattr.h" | 10 | #include "xattr.h" |
11 | #include "acl.h" | ||
11 | 12 | ||
12 | const struct xattr_handler *hfsplus_xattr_handlers[] = { | 13 | const struct xattr_handler *hfsplus_xattr_handlers[] = { |
13 | &hfsplus_xattr_osx_handler, | 14 | &hfsplus_xattr_osx_handler, |
14 | &hfsplus_xattr_user_handler, | 15 | &hfsplus_xattr_user_handler, |
15 | &hfsplus_xattr_trusted_handler, | 16 | &hfsplus_xattr_trusted_handler, |
17 | #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL | ||
18 | &hfsplus_xattr_acl_access_handler, | ||
19 | &hfsplus_xattr_acl_default_handler, | ||
20 | #endif | ||
16 | &hfsplus_xattr_security_handler, | 21 | &hfsplus_xattr_security_handler, |
17 | NULL | 22 | NULL |
18 | }; | 23 | }; |
@@ -46,11 +51,58 @@ static inline int is_known_namespace(const char *name) | |||
46 | return true; | 51 | return true; |
47 | } | 52 | } |
48 | 53 | ||
54 | static int can_set_system_xattr(struct inode *inode, const char *name, | ||
55 | const void *value, size_t size) | ||
56 | { | ||
57 | #ifdef CONFIG_HFSPLUS_FS_POSIX_ACL | ||
58 | struct posix_acl *acl; | ||
59 | int err; | ||
60 | |||
61 | if (!inode_owner_or_capable(inode)) | ||
62 | return -EPERM; | ||
63 | |||
64 | /* | ||
65 | * POSIX_ACL_XATTR_ACCESS is tied to i_mode | ||
66 | */ | ||
67 | if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) { | ||
68 | acl = posix_acl_from_xattr(&init_user_ns, value, size); | ||
69 | if (IS_ERR(acl)) | ||
70 | return PTR_ERR(acl); | ||
71 | if (acl) { | ||
72 | err = posix_acl_equiv_mode(acl, &inode->i_mode); | ||
73 | posix_acl_release(acl); | ||
74 | if (err < 0) | ||
75 | return err; | ||
76 | mark_inode_dirty(inode); | ||
77 | } | ||
78 | /* | ||
79 | * We're changing the ACL. Get rid of the cached one | ||
80 | */ | ||
81 | forget_cached_acl(inode, ACL_TYPE_ACCESS); | ||
82 | |||
83 | return 0; | ||
84 | } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) { | ||
85 | acl = posix_acl_from_xattr(&init_user_ns, value, size); | ||
86 | if (IS_ERR(acl)) | ||
87 | return PTR_ERR(acl); | ||
88 | posix_acl_release(acl); | ||
89 | |||
90 | /* | ||
91 | * We're changing the default ACL. Get rid of the cached one | ||
92 | */ | ||
93 | forget_cached_acl(inode, ACL_TYPE_DEFAULT); | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | #endif /* CONFIG_HFSPLUS_FS_POSIX_ACL */ | ||
98 | return -EOPNOTSUPP; | ||
99 | } | ||
100 | |||
49 | static int can_set_xattr(struct inode *inode, const char *name, | 101 | static int can_set_xattr(struct inode *inode, const char *name, |
50 | const void *value, size_t value_len) | 102 | const void *value, size_t value_len) |
51 | { | 103 | { |
52 | if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) | 104 | if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) |
53 | return -EOPNOTSUPP; /* TODO: implement ACL support */ | 105 | return can_set_system_xattr(inode, name, value, value_len); |
54 | 106 | ||
55 | if (!strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN)) { | 107 | if (!strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN)) { |
56 | /* | 108 | /* |
@@ -253,11 +305,10 @@ static int copy_name(char *buffer, const char *xattr_name, int name_len) | |||
253 | return len; | 305 | return len; |
254 | } | 306 | } |
255 | 307 | ||
256 | static ssize_t hfsplus_getxattr_finder_info(struct dentry *dentry, | 308 | static ssize_t hfsplus_getxattr_finder_info(struct inode *inode, |
257 | void *value, size_t size) | 309 | void *value, size_t size) |
258 | { | 310 | { |
259 | ssize_t res = 0; | 311 | ssize_t res = 0; |
260 | struct inode *inode = dentry->d_inode; | ||
261 | struct hfs_find_data fd; | 312 | struct hfs_find_data fd; |
262 | u16 entry_type; | 313 | u16 entry_type; |
263 | u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo); | 314 | u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo); |
@@ -304,10 +355,9 @@ end_getxattr_finder_info: | |||
304 | return res; | 355 | return res; |
305 | } | 356 | } |
306 | 357 | ||
307 | ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, | 358 | ssize_t __hfsplus_getxattr(struct inode *inode, const char *name, |
308 | void *value, size_t size) | 359 | void *value, size_t size) |
309 | { | 360 | { |
310 | struct inode *inode = dentry->d_inode; | ||
311 | struct hfs_find_data fd; | 361 | struct hfs_find_data fd; |
312 | hfsplus_attr_entry *entry; | 362 | hfsplus_attr_entry *entry; |
313 | __be32 xattr_record_type; | 363 | __be32 xattr_record_type; |
@@ -333,7 +383,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, | |||
333 | } | 383 | } |
334 | 384 | ||
335 | if (!strcmp_xattr_finder_info(name)) | 385 | if (!strcmp_xattr_finder_info(name)) |
336 | return hfsplus_getxattr_finder_info(dentry, value, size); | 386 | return hfsplus_getxattr_finder_info(inode, value, size); |
337 | 387 | ||
338 | if (!HFSPLUS_SB(inode->i_sb)->attr_tree) | 388 | if (!HFSPLUS_SB(inode->i_sb)->attr_tree) |
339 | return -EOPNOTSUPP; | 389 | return -EOPNOTSUPP; |
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h index 847b695b984d..841b5698c0fc 100644 --- a/fs/hfsplus/xattr.h +++ b/fs/hfsplus/xattr.h | |||
@@ -14,8 +14,8 @@ | |||
14 | extern const struct xattr_handler hfsplus_xattr_osx_handler; | 14 | extern const struct xattr_handler hfsplus_xattr_osx_handler; |
15 | extern const struct xattr_handler hfsplus_xattr_user_handler; | 15 | extern const struct xattr_handler hfsplus_xattr_user_handler; |
16 | extern const struct xattr_handler hfsplus_xattr_trusted_handler; | 16 | extern const struct xattr_handler hfsplus_xattr_trusted_handler; |
17 | /*extern const struct xattr_handler hfsplus_xattr_acl_access_handler;*/ | 17 | extern const struct xattr_handler hfsplus_xattr_acl_access_handler; |
18 | /*extern const struct xattr_handler hfsplus_xattr_acl_default_handler;*/ | 18 | extern const struct xattr_handler hfsplus_xattr_acl_default_handler; |
19 | extern const struct xattr_handler hfsplus_xattr_security_handler; | 19 | extern const struct xattr_handler hfsplus_xattr_security_handler; |
20 | 20 | ||
21 | extern const struct xattr_handler *hfsplus_xattr_handlers[]; | 21 | extern const struct xattr_handler *hfsplus_xattr_handlers[]; |
@@ -29,9 +29,17 @@ static inline int hfsplus_setxattr(struct dentry *dentry, const char *name, | |||
29 | return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags); | 29 | return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags); |
30 | } | 30 | } |
31 | 31 | ||
32 | ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, | 32 | ssize_t __hfsplus_getxattr(struct inode *inode, const char *name, |
33 | void *value, size_t size); | 33 | void *value, size_t size); |
34 | 34 | ||
35 | static inline ssize_t hfsplus_getxattr(struct dentry *dentry, | ||
36 | const char *name, | ||
37 | void *value, | ||
38 | size_t size) | ||
39 | { | ||
40 | return __hfsplus_getxattr(dentry->d_inode, name, value, size); | ||
41 | } | ||
42 | |||
35 | ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size); | 43 | ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size); |
36 | 44 | ||
37 | int hfsplus_removexattr(struct dentry *dentry, const char *name); | 45 | int hfsplus_removexattr(struct dentry *dentry, const char *name); |
@@ -39,22 +47,7 @@ int hfsplus_removexattr(struct dentry *dentry, const char *name); | |||
39 | int hfsplus_init_security(struct inode *inode, struct inode *dir, | 47 | int hfsplus_init_security(struct inode *inode, struct inode *dir, |
40 | const struct qstr *qstr); | 48 | const struct qstr *qstr); |
41 | 49 | ||
42 | static inline int hfsplus_init_acl(struct inode *inode, struct inode *dir) | 50 | int hfsplus_init_inode_security(struct inode *inode, struct inode *dir, |
43 | { | 51 | const struct qstr *qstr); |
44 | /*TODO: implement*/ | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static inline int hfsplus_init_inode_security(struct inode *inode, | ||
49 | struct inode *dir, | ||
50 | const struct qstr *qstr) | ||
51 | { | ||
52 | int err; | ||
53 | |||
54 | err = hfsplus_init_acl(inode, dir); | ||
55 | if (!err) | ||
56 | err = hfsplus_init_security(inode, dir, qstr); | ||
57 | return err; | ||
58 | } | ||
59 | 52 | ||
60 | #endif | 53 | #endif |
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c index 83b842f113c5..00722765ea79 100644 --- a/fs/hfsplus/xattr_security.c +++ b/fs/hfsplus/xattr_security.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/security.h> | 9 | #include <linux/security.h> |
10 | #include "hfsplus_fs.h" | 10 | #include "hfsplus_fs.h" |
11 | #include "xattr.h" | 11 | #include "xattr.h" |
12 | #include "acl.h" | ||
12 | 13 | ||
13 | static int hfsplus_security_getxattr(struct dentry *dentry, const char *name, | 14 | static int hfsplus_security_getxattr(struct dentry *dentry, const char *name, |
14 | void *buffer, size_t size, int type) | 15 | void *buffer, size_t size, int type) |
@@ -96,6 +97,18 @@ int hfsplus_init_security(struct inode *inode, struct inode *dir, | |||
96 | &hfsplus_initxattrs, NULL); | 97 | &hfsplus_initxattrs, NULL); |
97 | } | 98 | } |
98 | 99 | ||
100 | int hfsplus_init_inode_security(struct inode *inode, | ||
101 | struct inode *dir, | ||
102 | const struct qstr *qstr) | ||
103 | { | ||
104 | int err; | ||
105 | |||
106 | err = hfsplus_init_posix_acl(inode, dir); | ||
107 | if (!err) | ||
108 | err = hfsplus_init_security(inode, dir, qstr); | ||
109 | return err; | ||
110 | } | ||
111 | |||
99 | const struct xattr_handler hfsplus_xattr_security_handler = { | 112 | const struct xattr_handler hfsplus_xattr_security_handler = { |
100 | .prefix = XATTR_SECURITY_PREFIX, | 113 | .prefix = XATTR_SECURITY_PREFIX, |
101 | .list = hfsplus_security_listxattr, | 114 | .list = hfsplus_security_listxattr, |
diff --git a/fs/namespace.c b/fs/namespace.c index 25845d1b300b..da5c49483430 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/security.h> | 17 | #include <linux/security.h> |
18 | #include <linux/idr.h> | 18 | #include <linux/idr.h> |
19 | #include <linux/acct.h> /* acct_auto_close_mnt */ | 19 | #include <linux/acct.h> /* acct_auto_close_mnt */ |
20 | #include <linux/ramfs.h> /* init_rootfs */ | 20 | #include <linux/init.h> /* init_rootfs */ |
21 | #include <linux/fs_struct.h> /* get_fs_root et.al. */ | 21 | #include <linux/fs_struct.h> /* get_fs_root et.al. */ |
22 | #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ | 22 | #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 8a404576fb26..b4f788e0ca31 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c | |||
@@ -51,10 +51,6 @@ static struct posix_acl *ocfs2_acl_from_xattr(const void *value, size_t size) | |||
51 | return ERR_PTR(-EINVAL); | 51 | return ERR_PTR(-EINVAL); |
52 | 52 | ||
53 | count = size / sizeof(struct posix_acl_entry); | 53 | count = size / sizeof(struct posix_acl_entry); |
54 | if (count < 0) | ||
55 | return ERR_PTR(-EINVAL); | ||
56 | if (count == 0) | ||
57 | return NULL; | ||
58 | 54 | ||
59 | acl = posix_acl_alloc(count, GFP_NOFS); | 55 | acl = posix_acl_alloc(count, GFP_NOFS); |
60 | if (!acl) | 56 | if (!acl) |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 94417a85ce6e..f37d3c0e2053 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -2044,7 +2044,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, | |||
2044 | 2044 | ||
2045 | out_write_size: | 2045 | out_write_size: |
2046 | pos += copied; | 2046 | pos += copied; |
2047 | if (pos > inode->i_size) { | 2047 | if (pos > i_size_read(inode)) { |
2048 | i_size_write(inode, pos); | 2048 | i_size_write(inode, pos); |
2049 | mark_inode_dirty(inode); | 2049 | mark_inode_dirty(inode); |
2050 | } | 2050 | } |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 5c1c864e81cc..363f0dcc924f 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -628,11 +628,9 @@ static void o2hb_fire_callbacks(struct o2hb_callback *hbcall, | |||
628 | struct o2nm_node *node, | 628 | struct o2nm_node *node, |
629 | int idx) | 629 | int idx) |
630 | { | 630 | { |
631 | struct list_head *iter; | ||
632 | struct o2hb_callback_func *f; | 631 | struct o2hb_callback_func *f; |
633 | 632 | ||
634 | list_for_each(iter, &hbcall->list) { | 633 | list_for_each_entry(f, &hbcall->list, hc_item) { |
635 | f = list_entry(iter, struct o2hb_callback_func, hc_item); | ||
636 | mlog(ML_HEARTBEAT, "calling funcs %p\n", f); | 634 | mlog(ML_HEARTBEAT, "calling funcs %p\n", f); |
637 | (f->hc_func)(node, idx, f->hc_data); | 635 | (f->hc_func)(node, idx, f->hc_data); |
638 | } | 636 | } |
@@ -641,16 +639,9 @@ static void o2hb_fire_callbacks(struct o2hb_callback *hbcall, | |||
641 | /* Will run the list in order until we process the passed event */ | 639 | /* Will run the list in order until we process the passed event */ |
642 | static void o2hb_run_event_list(struct o2hb_node_event *queued_event) | 640 | static void o2hb_run_event_list(struct o2hb_node_event *queued_event) |
643 | { | 641 | { |
644 | int empty; | ||
645 | struct o2hb_callback *hbcall; | 642 | struct o2hb_callback *hbcall; |
646 | struct o2hb_node_event *event; | 643 | struct o2hb_node_event *event; |
647 | 644 | ||
648 | spin_lock(&o2hb_live_lock); | ||
649 | empty = list_empty(&queued_event->hn_item); | ||
650 | spin_unlock(&o2hb_live_lock); | ||
651 | if (empty) | ||
652 | return; | ||
653 | |||
654 | /* Holding callback sem assures we don't alter the callback | 645 | /* Holding callback sem assures we don't alter the callback |
655 | * lists when doing this, and serializes ourselves with other | 646 | * lists when doing this, and serializes ourselves with other |
656 | * processes wanting callbacks. */ | 647 | * processes wanting callbacks. */ |
@@ -709,6 +700,7 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) | |||
709 | struct o2hb_node_event event = | 700 | struct o2hb_node_event event = |
710 | { .hn_item = LIST_HEAD_INIT(event.hn_item), }; | 701 | { .hn_item = LIST_HEAD_INIT(event.hn_item), }; |
711 | struct o2nm_node *node; | 702 | struct o2nm_node *node; |
703 | int queued = 0; | ||
712 | 704 | ||
713 | node = o2nm_get_node_by_num(slot->ds_node_num); | 705 | node = o2nm_get_node_by_num(slot->ds_node_num); |
714 | if (!node) | 706 | if (!node) |
@@ -726,11 +718,13 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) | |||
726 | 718 | ||
727 | o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, | 719 | o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, |
728 | slot->ds_node_num); | 720 | slot->ds_node_num); |
721 | queued = 1; | ||
729 | } | 722 | } |
730 | } | 723 | } |
731 | spin_unlock(&o2hb_live_lock); | 724 | spin_unlock(&o2hb_live_lock); |
732 | 725 | ||
733 | o2hb_run_event_list(&event); | 726 | if (queued) |
727 | o2hb_run_event_list(&event); | ||
734 | 728 | ||
735 | o2nm_node_put(node); | 729 | o2nm_node_put(node); |
736 | } | 730 | } |
@@ -790,6 +784,7 @@ static int o2hb_check_slot(struct o2hb_region *reg, | |||
790 | unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS; | 784 | unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS; |
791 | unsigned int slot_dead_ms; | 785 | unsigned int slot_dead_ms; |
792 | int tmp; | 786 | int tmp; |
787 | int queued = 0; | ||
793 | 788 | ||
794 | memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); | 789 | memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); |
795 | 790 | ||
@@ -883,6 +878,7 @@ fire_callbacks: | |||
883 | slot->ds_node_num); | 878 | slot->ds_node_num); |
884 | 879 | ||
885 | changed = 1; | 880 | changed = 1; |
881 | queued = 1; | ||
886 | } | 882 | } |
887 | 883 | ||
888 | list_add_tail(&slot->ds_live_item, | 884 | list_add_tail(&slot->ds_live_item, |
@@ -934,6 +930,7 @@ fire_callbacks: | |||
934 | node, slot->ds_node_num); | 930 | node, slot->ds_node_num); |
935 | 931 | ||
936 | changed = 1; | 932 | changed = 1; |
933 | queued = 1; | ||
937 | } | 934 | } |
938 | 935 | ||
939 | /* We don't clear this because the node is still | 936 | /* We don't clear this because the node is still |
@@ -949,7 +946,8 @@ fire_callbacks: | |||
949 | out: | 946 | out: |
950 | spin_unlock(&o2hb_live_lock); | 947 | spin_unlock(&o2hb_live_lock); |
951 | 948 | ||
952 | o2hb_run_event_list(&event); | 949 | if (queued) |
950 | o2hb_run_event_list(&event); | ||
953 | 951 | ||
954 | if (node) | 952 | if (node) |
955 | o2nm_node_put(node); | 953 | o2nm_node_put(node); |
@@ -2516,8 +2514,7 @@ unlock: | |||
2516 | int o2hb_register_callback(const char *region_uuid, | 2514 | int o2hb_register_callback(const char *region_uuid, |
2517 | struct o2hb_callback_func *hc) | 2515 | struct o2hb_callback_func *hc) |
2518 | { | 2516 | { |
2519 | struct o2hb_callback_func *tmp; | 2517 | struct o2hb_callback_func *f; |
2520 | struct list_head *iter; | ||
2521 | struct o2hb_callback *hbcall; | 2518 | struct o2hb_callback *hbcall; |
2522 | int ret; | 2519 | int ret; |
2523 | 2520 | ||
@@ -2540,10 +2537,9 @@ int o2hb_register_callback(const char *region_uuid, | |||
2540 | 2537 | ||
2541 | down_write(&o2hb_callback_sem); | 2538 | down_write(&o2hb_callback_sem); |
2542 | 2539 | ||
2543 | list_for_each(iter, &hbcall->list) { | 2540 | list_for_each_entry(f, &hbcall->list, hc_item) { |
2544 | tmp = list_entry(iter, struct o2hb_callback_func, hc_item); | 2541 | if (hc->hc_priority < f->hc_priority) { |
2545 | if (hc->hc_priority < tmp->hc_priority) { | 2542 | list_add_tail(&hc->hc_item, &f->hc_item); |
2546 | list_add_tail(&hc->hc_item, iter); | ||
2547 | break; | 2543 | break; |
2548 | } | 2544 | } |
2549 | } | 2545 | } |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index d644dc611425..2cd2406b4140 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -543,8 +543,9 @@ static void o2net_set_nn_state(struct o2net_node *nn, | |||
543 | } | 543 | } |
544 | 544 | ||
545 | if (was_valid && !valid) { | 545 | if (was_valid && !valid) { |
546 | printk(KERN_NOTICE "o2net: No longer connected to " | 546 | if (old_sc) |
547 | SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); | 547 | printk(KERN_NOTICE "o2net: No longer connected to " |
548 | SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); | ||
548 | o2net_complete_nodes_nsw(nn); | 549 | o2net_complete_nodes_nsw(nn); |
549 | } | 550 | } |
550 | 551 | ||
@@ -765,32 +766,32 @@ static struct o2net_msg_handler * | |||
765 | o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p, | 766 | o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p, |
766 | struct rb_node **ret_parent) | 767 | struct rb_node **ret_parent) |
767 | { | 768 | { |
768 | struct rb_node **p = &o2net_handler_tree.rb_node; | 769 | struct rb_node **p = &o2net_handler_tree.rb_node; |
769 | struct rb_node *parent = NULL; | 770 | struct rb_node *parent = NULL; |
770 | struct o2net_msg_handler *nmh, *ret = NULL; | 771 | struct o2net_msg_handler *nmh, *ret = NULL; |
771 | int cmp; | 772 | int cmp; |
772 | 773 | ||
773 | while (*p) { | 774 | while (*p) { |
774 | parent = *p; | 775 | parent = *p; |
775 | nmh = rb_entry(parent, struct o2net_msg_handler, nh_node); | 776 | nmh = rb_entry(parent, struct o2net_msg_handler, nh_node); |
776 | cmp = o2net_handler_cmp(nmh, msg_type, key); | 777 | cmp = o2net_handler_cmp(nmh, msg_type, key); |
777 | 778 | ||
778 | if (cmp < 0) | 779 | if (cmp < 0) |
779 | p = &(*p)->rb_left; | 780 | p = &(*p)->rb_left; |
780 | else if (cmp > 0) | 781 | else if (cmp > 0) |
781 | p = &(*p)->rb_right; | 782 | p = &(*p)->rb_right; |
782 | else { | 783 | else { |
783 | ret = nmh; | 784 | ret = nmh; |
784 | break; | 785 | break; |
785 | } | 786 | } |
786 | } | 787 | } |
787 | 788 | ||
788 | if (ret_p != NULL) | 789 | if (ret_p != NULL) |
789 | *ret_p = p; | 790 | *ret_p = p; |
790 | if (ret_parent != NULL) | 791 | if (ret_parent != NULL) |
791 | *ret_parent = parent; | 792 | *ret_parent = parent; |
792 | 793 | ||
793 | return ret; | 794 | return ret; |
794 | } | 795 | } |
795 | 796 | ||
796 | static void o2net_handler_kref_release(struct kref *kref) | 797 | static void o2net_handler_kref_release(struct kref *kref) |
@@ -1695,13 +1696,12 @@ static void o2net_start_connect(struct work_struct *work) | |||
1695 | ret = 0; | 1696 | ret = 0; |
1696 | 1697 | ||
1697 | out: | 1698 | out: |
1698 | if (ret) { | 1699 | if (ret && sc) { |
1699 | printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT | 1700 | printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT |
1700 | " failed with errno %d\n", SC_NODEF_ARGS(sc), ret); | 1701 | " failed with errno %d\n", SC_NODEF_ARGS(sc), ret); |
1701 | /* 0 err so that another will be queued and attempted | 1702 | /* 0 err so that another will be queued and attempted |
1702 | * from set_nn_state */ | 1703 | * from set_nn_state */ |
1703 | if (sc) | 1704 | o2net_ensure_shutdown(nn, sc, 0); |
1704 | o2net_ensure_shutdown(nn, sc, 0); | ||
1705 | } | 1705 | } |
1706 | if (sc) | 1706 | if (sc) |
1707 | sc_put(sc); | 1707 | sc_put(sc); |
@@ -1873,12 +1873,16 @@ static int o2net_accept_one(struct socket *sock) | |||
1873 | 1873 | ||
1874 | if (o2nm_this_node() >= node->nd_num) { | 1874 | if (o2nm_this_node() >= node->nd_num) { |
1875 | local_node = o2nm_get_node_by_num(o2nm_this_node()); | 1875 | local_node = o2nm_get_node_by_num(o2nm_this_node()); |
1876 | printk(KERN_NOTICE "o2net: Unexpected connect attempt seen " | 1876 | if (local_node) |
1877 | "at node '%s' (%u, %pI4:%d) from node '%s' (%u, " | 1877 | printk(KERN_NOTICE "o2net: Unexpected connect attempt " |
1878 | "%pI4:%d)\n", local_node->nd_name, local_node->nd_num, | 1878 | "seen at node '%s' (%u, %pI4:%d) from " |
1879 | &(local_node->nd_ipv4_address), | 1879 | "node '%s' (%u, %pI4:%d)\n", |
1880 | ntohs(local_node->nd_ipv4_port), node->nd_name, | 1880 | local_node->nd_name, local_node->nd_num, |
1881 | node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port)); | 1881 | &(local_node->nd_ipv4_address), |
1882 | ntohs(local_node->nd_ipv4_port), | ||
1883 | node->nd_name, | ||
1884 | node->nd_num, &sin.sin_addr.s_addr, | ||
1885 | ntohs(sin.sin_port)); | ||
1882 | ret = -EINVAL; | 1886 | ret = -EINVAL; |
1883 | goto out; | 1887 | goto out; |
1884 | } | 1888 | } |
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index fbec0be62326..b46278f9ae44 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -292,7 +292,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, | |||
292 | struct dlm_lock *lock = NULL; | 292 | struct dlm_lock *lock = NULL; |
293 | struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; | 293 | struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; |
294 | char *name; | 294 | char *name; |
295 | struct list_head *iter, *head=NULL; | 295 | struct list_head *head = NULL; |
296 | __be64 cookie; | 296 | __be64 cookie; |
297 | u32 flags; | 297 | u32 flags; |
298 | u8 node; | 298 | u8 node; |
@@ -373,8 +373,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, | |||
373 | /* try convert queue for both ast/bast */ | 373 | /* try convert queue for both ast/bast */ |
374 | head = &res->converting; | 374 | head = &res->converting; |
375 | lock = NULL; | 375 | lock = NULL; |
376 | list_for_each(iter, head) { | 376 | list_for_each_entry(lock, head, list) { |
377 | lock = list_entry (iter, struct dlm_lock, list); | ||
378 | if (lock->ml.cookie == cookie) | 377 | if (lock->ml.cookie == cookie) |
379 | goto do_ast; | 378 | goto do_ast; |
380 | } | 379 | } |
@@ -385,8 +384,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, | |||
385 | else | 384 | else |
386 | head = &res->granted; | 385 | head = &res->granted; |
387 | 386 | ||
388 | list_for_each(iter, head) { | 387 | list_for_each_entry(lock, head, list) { |
389 | lock = list_entry (iter, struct dlm_lock, list); | ||
390 | if (lock->ml.cookie == cookie) | 388 | if (lock->ml.cookie == cookie) |
391 | goto do_ast; | 389 | goto do_ast; |
392 | } | 390 | } |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index de854cca12a2..e0517762fcc0 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -1079,11 +1079,9 @@ static inline int dlm_lock_compatible(int existing, int request) | |||
1079 | static inline int dlm_lock_on_list(struct list_head *head, | 1079 | static inline int dlm_lock_on_list(struct list_head *head, |
1080 | struct dlm_lock *lock) | 1080 | struct dlm_lock *lock) |
1081 | { | 1081 | { |
1082 | struct list_head *iter; | ||
1083 | struct dlm_lock *tmplock; | 1082 | struct dlm_lock *tmplock; |
1084 | 1083 | ||
1085 | list_for_each(iter, head) { | 1084 | list_for_each_entry(tmplock, head, list) { |
1086 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
1087 | if (tmplock == lock) | 1085 | if (tmplock == lock) |
1088 | return 1; | 1086 | return 1; |
1089 | } | 1087 | } |
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index 29a886d1e82c..e36d63ff1783 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c | |||
@@ -123,7 +123,6 @@ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, | |||
123 | int *kick_thread) | 123 | int *kick_thread) |
124 | { | 124 | { |
125 | enum dlm_status status = DLM_NORMAL; | 125 | enum dlm_status status = DLM_NORMAL; |
126 | struct list_head *iter; | ||
127 | struct dlm_lock *tmplock=NULL; | 126 | struct dlm_lock *tmplock=NULL; |
128 | 127 | ||
129 | assert_spin_locked(&res->spinlock); | 128 | assert_spin_locked(&res->spinlock); |
@@ -185,16 +184,14 @@ static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, | |||
185 | 184 | ||
186 | /* upconvert from here on */ | 185 | /* upconvert from here on */ |
187 | status = DLM_NORMAL; | 186 | status = DLM_NORMAL; |
188 | list_for_each(iter, &res->granted) { | 187 | list_for_each_entry(tmplock, &res->granted, list) { |
189 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
190 | if (tmplock == lock) | 188 | if (tmplock == lock) |
191 | continue; | 189 | continue; |
192 | if (!dlm_lock_compatible(tmplock->ml.type, type)) | 190 | if (!dlm_lock_compatible(tmplock->ml.type, type)) |
193 | goto switch_queues; | 191 | goto switch_queues; |
194 | } | 192 | } |
195 | 193 | ||
196 | list_for_each(iter, &res->converting) { | 194 | list_for_each_entry(tmplock, &res->converting, list) { |
197 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
198 | if (!dlm_lock_compatible(tmplock->ml.type, type)) | 195 | if (!dlm_lock_compatible(tmplock->ml.type, type)) |
199 | goto switch_queues; | 196 | goto switch_queues; |
200 | /* existing conversion requests take precedence */ | 197 | /* existing conversion requests take precedence */ |
@@ -424,8 +421,8 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, | |||
424 | struct dlm_ctxt *dlm = data; | 421 | struct dlm_ctxt *dlm = data; |
425 | struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; | 422 | struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; |
426 | struct dlm_lock_resource *res = NULL; | 423 | struct dlm_lock_resource *res = NULL; |
427 | struct list_head *iter; | ||
428 | struct dlm_lock *lock = NULL; | 424 | struct dlm_lock *lock = NULL; |
425 | struct dlm_lock *tmp_lock; | ||
429 | struct dlm_lockstatus *lksb; | 426 | struct dlm_lockstatus *lksb; |
430 | enum dlm_status status = DLM_NORMAL; | 427 | enum dlm_status status = DLM_NORMAL; |
431 | u32 flags; | 428 | u32 flags; |
@@ -471,14 +468,13 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, | |||
471 | dlm_error(status); | 468 | dlm_error(status); |
472 | goto leave; | 469 | goto leave; |
473 | } | 470 | } |
474 | list_for_each(iter, &res->granted) { | 471 | list_for_each_entry(tmp_lock, &res->granted, list) { |
475 | lock = list_entry(iter, struct dlm_lock, list); | 472 | if (tmp_lock->ml.cookie == cnv->cookie && |
476 | if (lock->ml.cookie == cnv->cookie && | 473 | tmp_lock->ml.node == cnv->node_idx) { |
477 | lock->ml.node == cnv->node_idx) { | 474 | lock = tmp_lock; |
478 | dlm_lock_get(lock); | 475 | dlm_lock_get(lock); |
479 | break; | 476 | break; |
480 | } | 477 | } |
481 | lock = NULL; | ||
482 | } | 478 | } |
483 | spin_unlock(&res->spinlock); | 479 | spin_unlock(&res->spinlock); |
484 | if (!lock) { | 480 | if (!lock) { |
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 0e28e242226d..e33cd7a3c582 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
@@ -96,7 +96,6 @@ static void __dlm_print_lock(struct dlm_lock *lock) | |||
96 | 96 | ||
97 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) | 97 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) |
98 | { | 98 | { |
99 | struct list_head *iter2; | ||
100 | struct dlm_lock *lock; | 99 | struct dlm_lock *lock; |
101 | char buf[DLM_LOCKID_NAME_MAX]; | 100 | char buf[DLM_LOCKID_NAME_MAX]; |
102 | 101 | ||
@@ -118,18 +117,15 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) | |||
118 | res->inflight_locks, atomic_read(&res->asts_reserved)); | 117 | res->inflight_locks, atomic_read(&res->asts_reserved)); |
119 | dlm_print_lockres_refmap(res); | 118 | dlm_print_lockres_refmap(res); |
120 | printk(" granted queue:\n"); | 119 | printk(" granted queue:\n"); |
121 | list_for_each(iter2, &res->granted) { | 120 | list_for_each_entry(lock, &res->granted, list) { |
122 | lock = list_entry(iter2, struct dlm_lock, list); | ||
123 | __dlm_print_lock(lock); | 121 | __dlm_print_lock(lock); |
124 | } | 122 | } |
125 | printk(" converting queue:\n"); | 123 | printk(" converting queue:\n"); |
126 | list_for_each(iter2, &res->converting) { | 124 | list_for_each_entry(lock, &res->converting, list) { |
127 | lock = list_entry(iter2, struct dlm_lock, list); | ||
128 | __dlm_print_lock(lock); | 125 | __dlm_print_lock(lock); |
129 | } | 126 | } |
130 | printk(" blocked queue:\n"); | 127 | printk(" blocked queue:\n"); |
131 | list_for_each(iter2, &res->blocked) { | 128 | list_for_each_entry(lock, &res->blocked, list) { |
132 | lock = list_entry(iter2, struct dlm_lock, list); | ||
133 | __dlm_print_lock(lock); | 129 | __dlm_print_lock(lock); |
134 | } | 130 | } |
135 | } | 131 | } |
@@ -446,7 +442,6 @@ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len) | |||
446 | { | 442 | { |
447 | struct dlm_master_list_entry *mle; | 443 | struct dlm_master_list_entry *mle; |
448 | struct hlist_head *bucket; | 444 | struct hlist_head *bucket; |
449 | struct hlist_node *list; | ||
450 | int i, out = 0; | 445 | int i, out = 0; |
451 | unsigned long total = 0, longest = 0, bucket_count = 0; | 446 | unsigned long total = 0, longest = 0, bucket_count = 0; |
452 | 447 | ||
@@ -456,9 +451,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len) | |||
456 | spin_lock(&dlm->master_lock); | 451 | spin_lock(&dlm->master_lock); |
457 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | 452 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
458 | bucket = dlm_master_hash(dlm, i); | 453 | bucket = dlm_master_hash(dlm, i); |
459 | hlist_for_each(list, bucket) { | 454 | hlist_for_each_entry(mle, bucket, master_hash_node) { |
460 | mle = hlist_entry(list, struct dlm_master_list_entry, | ||
461 | master_hash_node); | ||
462 | ++total; | 455 | ++total; |
463 | ++bucket_count; | 456 | ++bucket_count; |
464 | if (len - out < 200) | 457 | if (len - out < 200) |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index dbb17c07656a..8b3382abf840 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -193,7 +193,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, | |||
193 | unsigned int hash) | 193 | unsigned int hash) |
194 | { | 194 | { |
195 | struct hlist_head *bucket; | 195 | struct hlist_head *bucket; |
196 | struct hlist_node *list; | 196 | struct dlm_lock_resource *res; |
197 | 197 | ||
198 | mlog(0, "%.*s\n", len, name); | 198 | mlog(0, "%.*s\n", len, name); |
199 | 199 | ||
@@ -201,9 +201,7 @@ struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, | |||
201 | 201 | ||
202 | bucket = dlm_lockres_hash(dlm, hash); | 202 | bucket = dlm_lockres_hash(dlm, hash); |
203 | 203 | ||
204 | hlist_for_each(list, bucket) { | 204 | hlist_for_each_entry(res, bucket, hash_node) { |
205 | struct dlm_lock_resource *res = hlist_entry(list, | ||
206 | struct dlm_lock_resource, hash_node); | ||
207 | if (res->lockname.name[0] != name[0]) | 205 | if (res->lockname.name[0] != name[0]) |
208 | continue; | 206 | continue; |
209 | if (unlikely(res->lockname.len != len)) | 207 | if (unlikely(res->lockname.len != len)) |
@@ -262,22 +260,19 @@ struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, | |||
262 | 260 | ||
263 | static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) | 261 | static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) |
264 | { | 262 | { |
265 | struct dlm_ctxt *tmp = NULL; | 263 | struct dlm_ctxt *tmp; |
266 | struct list_head *iter; | ||
267 | 264 | ||
268 | assert_spin_locked(&dlm_domain_lock); | 265 | assert_spin_locked(&dlm_domain_lock); |
269 | 266 | ||
270 | /* tmp->name here is always NULL terminated, | 267 | /* tmp->name here is always NULL terminated, |
271 | * but domain may not be! */ | 268 | * but domain may not be! */ |
272 | list_for_each(iter, &dlm_domains) { | 269 | list_for_each_entry(tmp, &dlm_domains, list) { |
273 | tmp = list_entry (iter, struct dlm_ctxt, list); | ||
274 | if (strlen(tmp->name) == len && | 270 | if (strlen(tmp->name) == len && |
275 | memcmp(tmp->name, domain, len)==0) | 271 | memcmp(tmp->name, domain, len)==0) |
276 | break; | 272 | return tmp; |
277 | tmp = NULL; | ||
278 | } | 273 | } |
279 | 274 | ||
280 | return tmp; | 275 | return NULL; |
281 | } | 276 | } |
282 | 277 | ||
283 | /* For null terminated domain strings ONLY */ | 278 | /* For null terminated domain strings ONLY */ |
@@ -366,25 +361,22 @@ static void __dlm_get(struct dlm_ctxt *dlm) | |||
366 | * you shouldn't trust your pointer. */ | 361 | * you shouldn't trust your pointer. */ |
367 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm) | 362 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm) |
368 | { | 363 | { |
369 | struct list_head *iter; | 364 | struct dlm_ctxt *target; |
370 | struct dlm_ctxt *target = NULL; | 365 | struct dlm_ctxt *ret = NULL; |
371 | 366 | ||
372 | spin_lock(&dlm_domain_lock); | 367 | spin_lock(&dlm_domain_lock); |
373 | 368 | ||
374 | list_for_each(iter, &dlm_domains) { | 369 | list_for_each_entry(target, &dlm_domains, list) { |
375 | target = list_entry (iter, struct dlm_ctxt, list); | ||
376 | |||
377 | if (target == dlm) { | 370 | if (target == dlm) { |
378 | __dlm_get(target); | 371 | __dlm_get(target); |
372 | ret = target; | ||
379 | break; | 373 | break; |
380 | } | 374 | } |
381 | |||
382 | target = NULL; | ||
383 | } | 375 | } |
384 | 376 | ||
385 | spin_unlock(&dlm_domain_lock); | 377 | spin_unlock(&dlm_domain_lock); |
386 | 378 | ||
387 | return target; | 379 | return ret; |
388 | } | 380 | } |
389 | 381 | ||
390 | int dlm_domain_fully_joined(struct dlm_ctxt *dlm) | 382 | int dlm_domain_fully_joined(struct dlm_ctxt *dlm) |
@@ -2296,13 +2288,10 @@ static DECLARE_RWSEM(dlm_callback_sem); | |||
2296 | void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, | 2288 | void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, |
2297 | int node_num) | 2289 | int node_num) |
2298 | { | 2290 | { |
2299 | struct list_head *iter; | ||
2300 | struct dlm_eviction_cb *cb; | 2291 | struct dlm_eviction_cb *cb; |
2301 | 2292 | ||
2302 | down_read(&dlm_callback_sem); | 2293 | down_read(&dlm_callback_sem); |
2303 | list_for_each(iter, &dlm->dlm_eviction_callbacks) { | 2294 | list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) { |
2304 | cb = list_entry(iter, struct dlm_eviction_cb, ec_item); | ||
2305 | |||
2306 | cb->ec_func(node_num, cb->ec_data); | 2295 | cb->ec_func(node_num, cb->ec_data); |
2307 | } | 2296 | } |
2308 | up_read(&dlm_callback_sem); | 2297 | up_read(&dlm_callback_sem); |
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 47e67c2d228f..5d32f7511f74 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -91,19 +91,14 @@ void dlm_destroy_lock_cache(void) | |||
91 | static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, | 91 | static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, |
92 | struct dlm_lock *lock) | 92 | struct dlm_lock *lock) |
93 | { | 93 | { |
94 | struct list_head *iter; | ||
95 | struct dlm_lock *tmplock; | 94 | struct dlm_lock *tmplock; |
96 | 95 | ||
97 | list_for_each(iter, &res->granted) { | 96 | list_for_each_entry(tmplock, &res->granted, list) { |
98 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
99 | |||
100 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) | 97 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) |
101 | return 0; | 98 | return 0; |
102 | } | 99 | } |
103 | 100 | ||
104 | list_for_each(iter, &res->converting) { | 101 | list_for_each_entry(tmplock, &res->converting, list) { |
105 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
106 | |||
107 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) | 102 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) |
108 | return 0; | 103 | return 0; |
109 | if (!dlm_lock_compatible(tmplock->ml.convert_type, | 104 | if (!dlm_lock_compatible(tmplock->ml.convert_type, |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 33ecbe0e6734..cf0f103963b1 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -342,16 +342,13 @@ static int dlm_find_mle(struct dlm_ctxt *dlm, | |||
342 | { | 342 | { |
343 | struct dlm_master_list_entry *tmpmle; | 343 | struct dlm_master_list_entry *tmpmle; |
344 | struct hlist_head *bucket; | 344 | struct hlist_head *bucket; |
345 | struct hlist_node *list; | ||
346 | unsigned int hash; | 345 | unsigned int hash; |
347 | 346 | ||
348 | assert_spin_locked(&dlm->master_lock); | 347 | assert_spin_locked(&dlm->master_lock); |
349 | 348 | ||
350 | hash = dlm_lockid_hash(name, namelen); | 349 | hash = dlm_lockid_hash(name, namelen); |
351 | bucket = dlm_master_hash(dlm, hash); | 350 | bucket = dlm_master_hash(dlm, hash); |
352 | hlist_for_each(list, bucket) { | 351 | hlist_for_each_entry(tmpmle, bucket, master_hash_node) { |
353 | tmpmle = hlist_entry(list, struct dlm_master_list_entry, | ||
354 | master_hash_node); | ||
355 | if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) | 352 | if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) |
356 | continue; | 353 | continue; |
357 | dlm_get_mle(tmpmle); | 354 | dlm_get_mle(tmpmle); |
@@ -3183,7 +3180,7 @@ void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) | |||
3183 | struct dlm_master_list_entry *mle; | 3180 | struct dlm_master_list_entry *mle; |
3184 | struct dlm_lock_resource *res; | 3181 | struct dlm_lock_resource *res; |
3185 | struct hlist_head *bucket; | 3182 | struct hlist_head *bucket; |
3186 | struct hlist_node *list; | 3183 | struct hlist_node *tmp; |
3187 | unsigned int i; | 3184 | unsigned int i; |
3188 | 3185 | ||
3189 | mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); | 3186 | mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); |
@@ -3194,10 +3191,7 @@ top: | |||
3194 | spin_lock(&dlm->master_lock); | 3191 | spin_lock(&dlm->master_lock); |
3195 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | 3192 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
3196 | bucket = dlm_master_hash(dlm, i); | 3193 | bucket = dlm_master_hash(dlm, i); |
3197 | hlist_for_each(list, bucket) { | 3194 | hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { |
3198 | mle = hlist_entry(list, struct dlm_master_list_entry, | ||
3199 | master_hash_node); | ||
3200 | |||
3201 | BUG_ON(mle->type != DLM_MLE_BLOCK && | 3195 | BUG_ON(mle->type != DLM_MLE_BLOCK && |
3202 | mle->type != DLM_MLE_MASTER && | 3196 | mle->type != DLM_MLE_MASTER && |
3203 | mle->type != DLM_MLE_MIGRATION); | 3197 | mle->type != DLM_MLE_MIGRATION); |
@@ -3378,7 +3372,7 @@ void dlm_force_free_mles(struct dlm_ctxt *dlm) | |||
3378 | int i; | 3372 | int i; |
3379 | struct hlist_head *bucket; | 3373 | struct hlist_head *bucket; |
3380 | struct dlm_master_list_entry *mle; | 3374 | struct dlm_master_list_entry *mle; |
3381 | struct hlist_node *tmp, *list; | 3375 | struct hlist_node *tmp; |
3382 | 3376 | ||
3383 | /* | 3377 | /* |
3384 | * We notified all other nodes that we are exiting the domain and | 3378 | * We notified all other nodes that we are exiting the domain and |
@@ -3394,9 +3388,7 @@ void dlm_force_free_mles(struct dlm_ctxt *dlm) | |||
3394 | 3388 | ||
3395 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | 3389 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
3396 | bucket = dlm_master_hash(dlm, i); | 3390 | bucket = dlm_master_hash(dlm, i); |
3397 | hlist_for_each_safe(list, tmp, bucket) { | 3391 | hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { |
3398 | mle = hlist_entry(list, struct dlm_master_list_entry, | ||
3399 | master_hash_node); | ||
3400 | if (mle->type != DLM_MLE_BLOCK) { | 3392 | if (mle->type != DLM_MLE_BLOCK) { |
3401 | mlog(ML_ERROR, "bad mle: %p\n", mle); | 3393 | mlog(ML_ERROR, "bad mle: %p\n", mle); |
3402 | dlm_print_one_mle(mle); | 3394 | dlm_print_one_mle(mle); |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 773bd32bfd8c..0b5adca1b178 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -787,6 +787,7 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | |||
787 | { | 787 | { |
788 | struct dlm_lock_request lr; | 788 | struct dlm_lock_request lr; |
789 | int ret; | 789 | int ret; |
790 | int status; | ||
790 | 791 | ||
791 | mlog(0, "\n"); | 792 | mlog(0, "\n"); |
792 | 793 | ||
@@ -800,13 +801,15 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | |||
800 | 801 | ||
801 | // send message | 802 | // send message |
802 | ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, | 803 | ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, |
803 | &lr, sizeof(lr), request_from, NULL); | 804 | &lr, sizeof(lr), request_from, &status); |
804 | 805 | ||
805 | /* negative status is handled by caller */ | 806 | /* negative status is handled by caller */ |
806 | if (ret < 0) | 807 | if (ret < 0) |
807 | mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " | 808 | mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " |
808 | "to recover dead node %u\n", dlm->name, ret, | 809 | "to recover dead node %u\n", dlm->name, ret, |
809 | request_from, dead_node); | 810 | request_from, dead_node); |
811 | else | ||
812 | ret = status; | ||
810 | // return from here, then | 813 | // return from here, then |
811 | // sleep until all received or error | 814 | // sleep until all received or error |
812 | return ret; | 815 | return ret; |
@@ -2328,6 +2331,14 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |||
2328 | } else if (res->owner == dlm->node_num) { | 2331 | } else if (res->owner == dlm->node_num) { |
2329 | dlm_free_dead_locks(dlm, res, dead_node); | 2332 | dlm_free_dead_locks(dlm, res, dead_node); |
2330 | __dlm_lockres_calc_usage(dlm, res); | 2333 | __dlm_lockres_calc_usage(dlm, res); |
2334 | } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
2335 | if (test_bit(dead_node, res->refmap)) { | ||
2336 | mlog(0, "%s:%.*s: dead node %u had a ref, but had " | ||
2337 | "no locks and had not purged before dying\n", | ||
2338 | dlm->name, res->lockname.len, | ||
2339 | res->lockname.name, dead_node); | ||
2340 | dlm_lockres_clear_refmap_bit(dlm, res, dead_node); | ||
2341 | } | ||
2331 | } | 2342 | } |
2332 | spin_unlock(&res->spinlock); | 2343 | spin_unlock(&res->spinlock); |
2333 | } | 2344 | } |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index e73c833fc2a1..9db869de829d 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -286,8 +286,6 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | |||
286 | struct dlm_lock_resource *res) | 286 | struct dlm_lock_resource *res) |
287 | { | 287 | { |
288 | struct dlm_lock *lock, *target; | 288 | struct dlm_lock *lock, *target; |
289 | struct list_head *iter; | ||
290 | struct list_head *head; | ||
291 | int can_grant = 1; | 289 | int can_grant = 1; |
292 | 290 | ||
293 | /* | 291 | /* |
@@ -314,9 +312,7 @@ converting: | |||
314 | dlm->name, res->lockname.len, res->lockname.name); | 312 | dlm->name, res->lockname.len, res->lockname.name); |
315 | BUG(); | 313 | BUG(); |
316 | } | 314 | } |
317 | head = &res->granted; | 315 | list_for_each_entry(lock, &res->granted, list) { |
318 | list_for_each(iter, head) { | ||
319 | lock = list_entry(iter, struct dlm_lock, list); | ||
320 | if (lock==target) | 316 | if (lock==target) |
321 | continue; | 317 | continue; |
322 | if (!dlm_lock_compatible(lock->ml.type, | 318 | if (!dlm_lock_compatible(lock->ml.type, |
@@ -333,9 +329,8 @@ converting: | |||
333 | target->ml.convert_type; | 329 | target->ml.convert_type; |
334 | } | 330 | } |
335 | } | 331 | } |
336 | head = &res->converting; | 332 | |
337 | list_for_each(iter, head) { | 333 | list_for_each_entry(lock, &res->converting, list) { |
338 | lock = list_entry(iter, struct dlm_lock, list); | ||
339 | if (lock==target) | 334 | if (lock==target) |
340 | continue; | 335 | continue; |
341 | if (!dlm_lock_compatible(lock->ml.type, | 336 | if (!dlm_lock_compatible(lock->ml.type, |
@@ -384,9 +379,7 @@ blocked: | |||
384 | goto leave; | 379 | goto leave; |
385 | target = list_entry(res->blocked.next, struct dlm_lock, list); | 380 | target = list_entry(res->blocked.next, struct dlm_lock, list); |
386 | 381 | ||
387 | head = &res->granted; | 382 | list_for_each_entry(lock, &res->granted, list) { |
388 | list_for_each(iter, head) { | ||
389 | lock = list_entry(iter, struct dlm_lock, list); | ||
390 | if (lock==target) | 383 | if (lock==target) |
391 | continue; | 384 | continue; |
392 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { | 385 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { |
@@ -400,9 +393,7 @@ blocked: | |||
400 | } | 393 | } |
401 | } | 394 | } |
402 | 395 | ||
403 | head = &res->converting; | 396 | list_for_each_entry(lock, &res->converting, list) { |
404 | list_for_each(iter, head) { | ||
405 | lock = list_entry(iter, struct dlm_lock, list); | ||
406 | if (lock==target) | 397 | if (lock==target) |
407 | continue; | 398 | continue; |
408 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { | 399 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { |
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 850aa7e87537..5698b52cf5c9 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c | |||
@@ -388,7 +388,6 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, | |||
388 | struct dlm_ctxt *dlm = data; | 388 | struct dlm_ctxt *dlm = data; |
389 | struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; | 389 | struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; |
390 | struct dlm_lock_resource *res = NULL; | 390 | struct dlm_lock_resource *res = NULL; |
391 | struct list_head *iter; | ||
392 | struct dlm_lock *lock = NULL; | 391 | struct dlm_lock *lock = NULL; |
393 | enum dlm_status status = DLM_NORMAL; | 392 | enum dlm_status status = DLM_NORMAL; |
394 | int found = 0, i; | 393 | int found = 0, i; |
@@ -458,8 +457,7 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, | |||
458 | } | 457 | } |
459 | 458 | ||
460 | for (i=0; i<3; i++) { | 459 | for (i=0; i<3; i++) { |
461 | list_for_each(iter, queue) { | 460 | list_for_each_entry(lock, queue, list) { |
462 | lock = list_entry(iter, struct dlm_lock, list); | ||
463 | if (lock->ml.cookie == unlock->cookie && | 461 | if (lock->ml.cookie == unlock->cookie && |
464 | lock->ml.node == unlock->node_idx) { | 462 | lock->ml.node == unlock->node_idx) { |
465 | dlm_lock_get(lock); | 463 | dlm_lock_get(lock); |
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 12bafb7265ce..efa2b3d339e3 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c | |||
@@ -401,11 +401,8 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb) | |||
401 | { | 401 | { |
402 | struct inode *inode = new_inode(sb); | 402 | struct inode *inode = new_inode(sb); |
403 | umode_t mode = S_IFDIR | 0755; | 403 | umode_t mode = S_IFDIR | 0755; |
404 | struct dlmfs_inode_private *ip; | ||
405 | 404 | ||
406 | if (inode) { | 405 | if (inode) { |
407 | ip = DLMFS_I(inode); | ||
408 | |||
409 | inode->i_ino = get_next_ino(); | 406 | inode->i_ino = get_next_ino(); |
410 | inode_init_owner(inode, NULL, mode); | 407 | inode_init_owner(inode, NULL, mode); |
411 | inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; | 408 | inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; |
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 2487116d0d33..767370b656ca 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c | |||
@@ -781,7 +781,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
781 | cpos = map_start >> osb->s_clustersize_bits; | 781 | cpos = map_start >> osb->s_clustersize_bits; |
782 | mapping_end = ocfs2_clusters_for_bytes(inode->i_sb, | 782 | mapping_end = ocfs2_clusters_for_bytes(inode->i_sb, |
783 | map_start + map_len); | 783 | map_start + map_len); |
784 | mapping_end -= cpos; | ||
785 | is_last = 0; | 784 | is_last = 0; |
786 | while (cpos < mapping_end && !is_last) { | 785 | while (cpos < mapping_end && !is_last) { |
787 | u32 fe_flags; | 786 | u32 fe_flags; |
@@ -852,20 +851,20 @@ int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence) | |||
852 | 851 | ||
853 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | 852 | down_read(&OCFS2_I(inode)->ip_alloc_sem); |
854 | 853 | ||
855 | if (*offset >= inode->i_size) { | 854 | if (*offset >= i_size_read(inode)) { |
856 | ret = -ENXIO; | 855 | ret = -ENXIO; |
857 | goto out_unlock; | 856 | goto out_unlock; |
858 | } | 857 | } |
859 | 858 | ||
860 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { | 859 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { |
861 | if (whence == SEEK_HOLE) | 860 | if (whence == SEEK_HOLE) |
862 | *offset = inode->i_size; | 861 | *offset = i_size_read(inode); |
863 | goto out_unlock; | 862 | goto out_unlock; |
864 | } | 863 | } |
865 | 864 | ||
866 | clen = 0; | 865 | clen = 0; |
867 | cpos = *offset >> cs_bits; | 866 | cpos = *offset >> cs_bits; |
868 | cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size); | 867 | cend = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); |
869 | 868 | ||
870 | while (cpos < cend && !is_last) { | 869 | while (cpos < cend && !is_last) { |
871 | ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size, | 870 | ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size, |
@@ -904,8 +903,8 @@ int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence) | |||
904 | extlen = clen; | 903 | extlen = clen; |
905 | extlen <<= cs_bits; | 904 | extlen <<= cs_bits; |
906 | 905 | ||
907 | if ((extoff + extlen) > inode->i_size) | 906 | if ((extoff + extlen) > i_size_read(inode)) |
908 | extlen = inode->i_size - extoff; | 907 | extlen = i_size_read(inode) - extoff; |
909 | extoff += extlen; | 908 | extoff += extlen; |
910 | if (extoff > *offset) | 909 | if (extoff > *offset) |
911 | *offset = extoff; | 910 | *offset = extoff; |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 3261d71319ee..4f8197caa487 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -671,11 +671,7 @@ restarted_transaction: | |||
671 | } else { | 671 | } else { |
672 | BUG_ON(why != RESTART_TRANS); | 672 | BUG_ON(why != RESTART_TRANS); |
673 | 673 | ||
674 | /* TODO: This can be more intelligent. */ | 674 | status = ocfs2_allocate_extend_trans(handle, 1); |
675 | credits = ocfs2_calc_extend_credits(osb->sb, | ||
676 | &fe->id2.i_list, | ||
677 | clusters_to_add); | ||
678 | status = ocfs2_extend_trans(handle, credits); | ||
679 | if (status < 0) { | 675 | if (status < 0) { |
680 | /* handle still has to be committed at | 676 | /* handle still has to be committed at |
681 | * this point. */ | 677 | * this point. */ |
@@ -1800,6 +1796,7 @@ static int ocfs2_remove_inode_range(struct inode *inode, | |||
1800 | ocfs2_truncate_cluster_pages(inode, byte_start, byte_len); | 1796 | ocfs2_truncate_cluster_pages(inode, byte_start, byte_len); |
1801 | 1797 | ||
1802 | out: | 1798 | out: |
1799 | ocfs2_free_path(path); | ||
1803 | ocfs2_schedule_truncate_log_flush(osb, 1); | 1800 | ocfs2_schedule_truncate_log_flush(osb, 1); |
1804 | ocfs2_run_deallocs(osb, &dealloc); | 1801 | ocfs2_run_deallocs(osb, &dealloc); |
1805 | 1802 | ||
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 0c60ef2d8056..fa32ce9b455d 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c | |||
@@ -303,7 +303,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode, | |||
303 | if (o2info_from_user(oij, req)) | 303 | if (o2info_from_user(oij, req)) |
304 | goto bail; | 304 | goto bail; |
305 | 305 | ||
306 | oij.ij_journal_size = osb->journal->j_inode->i_size; | 306 | oij.ij_journal_size = i_size_read(osb->journal->j_inode); |
307 | 307 | ||
308 | o2info_set_request_filled(&oij.ij_req); | 308 | o2info_set_request_filled(&oij.ij_req); |
309 | 309 | ||
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 242170d83971..44fc3e530c3d 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -455,6 +455,41 @@ bail: | |||
455 | return status; | 455 | return status; |
456 | } | 456 | } |
457 | 457 | ||
458 | /* | ||
459 | * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA. | ||
460 | * If that fails, restart the transaction & regain write access for the | ||
461 | * buffer head which is used for metadata modifications. | ||
462 | * Taken from Ext4: extend_or_restart_transaction() | ||
463 | */ | ||
464 | int ocfs2_allocate_extend_trans(handle_t *handle, int thresh) | ||
465 | { | ||
466 | int status, old_nblks; | ||
467 | |||
468 | BUG_ON(!handle); | ||
469 | |||
470 | old_nblks = handle->h_buffer_credits; | ||
471 | trace_ocfs2_allocate_extend_trans(old_nblks, thresh); | ||
472 | |||
473 | if (old_nblks < thresh) | ||
474 | return 0; | ||
475 | |||
476 | status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA); | ||
477 | if (status < 0) { | ||
478 | mlog_errno(status); | ||
479 | goto bail; | ||
480 | } | ||
481 | |||
482 | if (status > 0) { | ||
483 | status = jbd2_journal_restart(handle, OCFS2_MAX_TRANS_DATA); | ||
484 | if (status < 0) | ||
485 | mlog_errno(status); | ||
486 | } | ||
487 | |||
488 | bail: | ||
489 | return status; | ||
490 | } | ||
491 | |||
492 | |||
458 | struct ocfs2_triggers { | 493 | struct ocfs2_triggers { |
459 | struct jbd2_buffer_trigger_type ot_triggers; | 494 | struct jbd2_buffer_trigger_type ot_triggers; |
460 | int ot_offset; | 495 | int ot_offset; |
@@ -801,14 +836,14 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) | |||
801 | inode_lock = 1; | 836 | inode_lock = 1; |
802 | di = (struct ocfs2_dinode *)bh->b_data; | 837 | di = (struct ocfs2_dinode *)bh->b_data; |
803 | 838 | ||
804 | if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) { | 839 | if (i_size_read(inode) < OCFS2_MIN_JOURNAL_SIZE) { |
805 | mlog(ML_ERROR, "Journal file size (%lld) is too small!\n", | 840 | mlog(ML_ERROR, "Journal file size (%lld) is too small!\n", |
806 | inode->i_size); | 841 | i_size_read(inode)); |
807 | status = -EINVAL; | 842 | status = -EINVAL; |
808 | goto done; | 843 | goto done; |
809 | } | 844 | } |
810 | 845 | ||
811 | trace_ocfs2_journal_init(inode->i_size, | 846 | trace_ocfs2_journal_init(i_size_read(inode), |
812 | (unsigned long long)inode->i_blocks, | 847 | (unsigned long long)inode->i_blocks, |
813 | OCFS2_I(inode)->ip_clusters); | 848 | OCFS2_I(inode)->ip_clusters); |
814 | 849 | ||
@@ -1096,7 +1131,7 @@ static int ocfs2_force_read_journal(struct inode *inode) | |||
1096 | 1131 | ||
1097 | memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); | 1132 | memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); |
1098 | 1133 | ||
1099 | num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); | 1134 | num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); |
1100 | v_blkno = 0; | 1135 | v_blkno = 0; |
1101 | while (v_blkno < num_blocks) { | 1136 | while (v_blkno < num_blocks) { |
1102 | status = ocfs2_extent_map_get_blocks(inode, v_blkno, | 1137 | status = ocfs2_extent_map_get_blocks(inode, v_blkno, |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 0a992737dcaf..0b479bab3671 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -258,6 +258,17 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb, | |||
258 | int ocfs2_commit_trans(struct ocfs2_super *osb, | 258 | int ocfs2_commit_trans(struct ocfs2_super *osb, |
259 | handle_t *handle); | 259 | handle_t *handle); |
260 | int ocfs2_extend_trans(handle_t *handle, int nblocks); | 260 | int ocfs2_extend_trans(handle_t *handle, int nblocks); |
261 | int ocfs2_allocate_extend_trans(handle_t *handle, | ||
262 | int thresh); | ||
263 | |||
264 | /* | ||
265 | * Define an arbitrary limit for the amount of data we will anticipate | ||
266 | * writing to any given transaction. For unbounded transactions such as | ||
267 | * fallocate(2) we can write more than this, but we always | ||
268 | * start off at the maximum transaction size and grow the transaction | ||
269 | * optimistically as we go. | ||
270 | */ | ||
271 | #define OCFS2_MAX_TRANS_DATA 64U | ||
261 | 272 | ||
262 | /* | 273 | /* |
263 | * Create access is for when we get a newly created buffer and we're | 274 | * Create access is for when we get a newly created buffer and we're |
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index aebeacd807c3..cd5496b7a0a3 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c | |||
@@ -1082,7 +1082,7 @@ static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, | |||
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | retry_enospc: | 1084 | retry_enospc: |
1085 | (*ac)->ac_bits_wanted = osb->local_alloc_default_bits; | 1085 | (*ac)->ac_bits_wanted = osb->local_alloc_bits; |
1086 | status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); | 1086 | status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); |
1087 | if (status == -ENOSPC) { | 1087 | if (status == -ENOSPC) { |
1088 | if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) == | 1088 | if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) == |
@@ -1154,7 +1154,7 @@ retry_enospc: | |||
1154 | OCFS2_LA_DISABLED) | 1154 | OCFS2_LA_DISABLED) |
1155 | goto bail; | 1155 | goto bail; |
1156 | 1156 | ||
1157 | ac->ac_bits_wanted = osb->local_alloc_default_bits; | 1157 | ac->ac_bits_wanted = osb->local_alloc_bits; |
1158 | status = ocfs2_claim_clusters(handle, ac, | 1158 | status = ocfs2_claim_clusters(handle, ac, |
1159 | osb->local_alloc_bits, | 1159 | osb->local_alloc_bits, |
1160 | &cluster_off, | 1160 | &cluster_off, |
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index 452068b45749..3d3f3c83065c 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c | |||
@@ -152,6 +152,7 @@ static int __ocfs2_move_extent(handle_t *handle, | |||
152 | } | 152 | } |
153 | 153 | ||
154 | out: | 154 | out: |
155 | ocfs2_free_path(path); | ||
155 | return ret; | 156 | return ret; |
156 | } | 157 | } |
157 | 158 | ||
@@ -845,7 +846,7 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh, | |||
845 | struct ocfs2_move_extents *range = context->range; | 846 | struct ocfs2_move_extents *range = context->range; |
846 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 847 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
847 | 848 | ||
848 | if ((inode->i_size == 0) || (range->me_len == 0)) | 849 | if ((i_size_read(inode) == 0) || (range->me_len == 0)) |
849 | return 0; | 850 | return 0; |
850 | 851 | ||
851 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) | 852 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) |
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h index 3b481f490633..1b60c62aa9d6 100644 --- a/fs/ocfs2/ocfs2_trace.h +++ b/fs/ocfs2/ocfs2_trace.h | |||
@@ -2579,6 +2579,8 @@ DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans); | |||
2579 | 2579 | ||
2580 | DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart); | 2580 | DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart); |
2581 | 2581 | ||
2582 | DEFINE_OCFS2_INT_INT_EVENT(ocfs2_allocate_extend_trans); | ||
2583 | |||
2582 | DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access); | 2584 | DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access); |
2583 | 2585 | ||
2584 | DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty); | 2586 | DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty); |
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 332a281f217e..aaa50611ec66 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
@@ -234,7 +234,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, | |||
234 | len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset; | 234 | len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset; |
235 | } | 235 | } |
236 | 236 | ||
237 | if (gqinode->i_size < off + len) { | 237 | if (i_size_read(gqinode) < off + len) { |
238 | loff_t rounded_end = | 238 | loff_t rounded_end = |
239 | ocfs2_align_bytes_to_blocks(sb, off + len); | 239 | ocfs2_align_bytes_to_blocks(sb, off + len); |
240 | 240 | ||
@@ -778,8 +778,8 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) | |||
778 | */ | 778 | */ |
779 | WARN_ON(journal_current_handle()); | 779 | WARN_ON(journal_current_handle()); |
780 | status = ocfs2_extend_no_holes(gqinode, NULL, | 780 | status = ocfs2_extend_no_holes(gqinode, NULL, |
781 | gqinode->i_size + (need_alloc << sb->s_blocksize_bits), | 781 | i_size_read(gqinode) + (need_alloc << sb->s_blocksize_bits), |
782 | gqinode->i_size); | 782 | i_size_read(gqinode)); |
783 | if (status < 0) | 783 | if (status < 0) |
784 | goto out_dq; | 784 | goto out_dq; |
785 | } | 785 | } |
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 27fe7ee4874c..2e4344be3b96 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c | |||
@@ -982,14 +982,14 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( | |||
982 | 982 | ||
983 | /* We are protected by dqio_sem so no locking needed */ | 983 | /* We are protected by dqio_sem so no locking needed */ |
984 | status = ocfs2_extend_no_holes(lqinode, NULL, | 984 | status = ocfs2_extend_no_holes(lqinode, NULL, |
985 | lqinode->i_size + 2 * sb->s_blocksize, | 985 | i_size_read(lqinode) + 2 * sb->s_blocksize, |
986 | lqinode->i_size); | 986 | i_size_read(lqinode)); |
987 | if (status < 0) { | 987 | if (status < 0) { |
988 | mlog_errno(status); | 988 | mlog_errno(status); |
989 | goto out; | 989 | goto out; |
990 | } | 990 | } |
991 | status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh, | 991 | status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh, |
992 | lqinode->i_size + 2 * sb->s_blocksize); | 992 | i_size_read(lqinode) + 2 * sb->s_blocksize); |
993 | if (status < 0) { | 993 | if (status < 0) { |
994 | mlog_errno(status); | 994 | mlog_errno(status); |
995 | goto out; | 995 | goto out; |
@@ -1125,14 +1125,14 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( | |||
1125 | 1125 | ||
1126 | /* We are protected by dqio_sem so no locking needed */ | 1126 | /* We are protected by dqio_sem so no locking needed */ |
1127 | status = ocfs2_extend_no_holes(lqinode, NULL, | 1127 | status = ocfs2_extend_no_holes(lqinode, NULL, |
1128 | lqinode->i_size + sb->s_blocksize, | 1128 | i_size_read(lqinode) + sb->s_blocksize, |
1129 | lqinode->i_size); | 1129 | i_size_read(lqinode)); |
1130 | if (status < 0) { | 1130 | if (status < 0) { |
1131 | mlog_errno(status); | 1131 | mlog_errno(status); |
1132 | goto out; | 1132 | goto out; |
1133 | } | 1133 | } |
1134 | status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh, | 1134 | status = ocfs2_simple_size_update(lqinode, oinfo->dqi_lqi_bh, |
1135 | lqinode->i_size + sb->s_blocksize); | 1135 | i_size_read(lqinode) + sb->s_blocksize); |
1136 | if (status < 0) { | 1136 | if (status < 0) { |
1137 | mlog_errno(status); | 1137 | mlog_errno(status); |
1138 | goto out; | 1138 | goto out; |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index a70d604593b6..bf4dfc14bb2c 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -3854,7 +3854,10 @@ static int ocfs2_attach_refcount_tree(struct inode *inode, | |||
3854 | while (cpos < clusters) { | 3854 | while (cpos < clusters) { |
3855 | ret = ocfs2_get_clusters(inode, cpos, &p_cluster, | 3855 | ret = ocfs2_get_clusters(inode, cpos, &p_cluster, |
3856 | &num_clusters, &ext_flags); | 3856 | &num_clusters, &ext_flags); |
3857 | 3857 | if (ret) { | |
3858 | mlog_errno(ret); | ||
3859 | goto unlock; | ||
3860 | } | ||
3858 | if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) { | 3861 | if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) { |
3859 | ret = ocfs2_add_refcount_flag(inode, &di_et, | 3862 | ret = ocfs2_add_refcount_flag(inode, &di_et, |
3860 | &ref_tree->rf_ci, | 3863 | &ref_tree->rf_ci, |
@@ -4025,7 +4028,10 @@ static int ocfs2_duplicate_extent_list(struct inode *s_inode, | |||
4025 | while (cpos < clusters) { | 4028 | while (cpos < clusters) { |
4026 | ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster, | 4029 | ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster, |
4027 | &num_clusters, &ext_flags); | 4030 | &num_clusters, &ext_flags); |
4028 | 4031 | if (ret) { | |
4032 | mlog_errno(ret); | ||
4033 | goto out; | ||
4034 | } | ||
4029 | if (p_cluster) { | 4035 | if (p_cluster) { |
4030 | ret = ocfs2_add_refcounted_extent(t_inode, &et, | 4036 | ret = ocfs2_add_refcounted_extent(t_inode, &et, |
4031 | ref_ci, ref_root_bh, | 4037 | ref_ci, ref_root_bh, |
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 317ef0abccbb..6ce0686eab72 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -3505,7 +3505,7 @@ int ocfs2_xattr_set(struct inode *inode, | |||
3505 | int ret, credits, ref_meta = 0, ref_credits = 0; | 3505 | int ret, credits, ref_meta = 0, ref_credits = 0; |
3506 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 3506 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
3507 | struct inode *tl_inode = osb->osb_tl_inode; | 3507 | struct inode *tl_inode = osb->osb_tl_inode; |
3508 | struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; | 3508 | struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; |
3509 | struct ocfs2_refcount_tree *ref_tree = NULL; | 3509 | struct ocfs2_refcount_tree *ref_tree = NULL; |
3510 | 3510 | ||
3511 | struct ocfs2_xattr_info xi = { | 3511 | struct ocfs2_xattr_info xi = { |
@@ -3609,13 +3609,14 @@ int ocfs2_xattr_set(struct inode *inode, | |||
3609 | if (IS_ERR(ctxt.handle)) { | 3609 | if (IS_ERR(ctxt.handle)) { |
3610 | ret = PTR_ERR(ctxt.handle); | 3610 | ret = PTR_ERR(ctxt.handle); |
3611 | mlog_errno(ret); | 3611 | mlog_errno(ret); |
3612 | goto cleanup; | 3612 | goto out_free_ac; |
3613 | } | 3613 | } |
3614 | 3614 | ||
3615 | ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); | 3615 | ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); |
3616 | 3616 | ||
3617 | ocfs2_commit_trans(osb, ctxt.handle); | 3617 | ocfs2_commit_trans(osb, ctxt.handle); |
3618 | 3618 | ||
3619 | out_free_ac: | ||
3619 | if (ctxt.data_ac) | 3620 | if (ctxt.data_ac) |
3620 | ocfs2_free_alloc_context(ctxt.data_ac); | 3621 | ocfs2_free_alloc_context(ctxt.data_ac); |
3621 | if (ctxt.meta_ac) | 3622 | if (ctxt.meta_ac) |
@@ -5881,6 +5882,10 @@ static int ocfs2_xattr_value_attach_refcount(struct inode *inode, | |||
5881 | while (cpos < clusters) { | 5882 | while (cpos < clusters) { |
5882 | ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, | 5883 | ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, |
5883 | &num_clusters, el, &ext_flags); | 5884 | &num_clusters, el, &ext_flags); |
5885 | if (ret) { | ||
5886 | mlog_errno(ret); | ||
5887 | break; | ||
5888 | } | ||
5884 | 5889 | ||
5885 | cpos += num_clusters; | 5890 | cpos += num_clusters; |
5886 | if ((ext_flags & OCFS2_EXT_REFCOUNTED)) | 5891 | if ((ext_flags & OCFS2_EXT_REFCOUNTED)) |
@@ -6797,7 +6802,7 @@ out: | |||
6797 | if (ret) { | 6802 | if (ret) { |
6798 | if (*meta_ac) { | 6803 | if (*meta_ac) { |
6799 | ocfs2_free_alloc_context(*meta_ac); | 6804 | ocfs2_free_alloc_context(*meta_ac); |
6800 | meta_ac = NULL; | 6805 | *meta_ac = NULL; |
6801 | } | 6806 | } |
6802 | } | 6807 | } |
6803 | 6808 | ||
diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 0ff80f9b930f..985ea881b5bc 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c | |||
@@ -286,7 +286,7 @@ int proc_fd_permission(struct inode *inode, int mask) | |||
286 | int rv = generic_permission(inode, mask); | 286 | int rv = generic_permission(inode, mask); |
287 | if (rv == 0) | 287 | if (rv == 0) |
288 | return 0; | 288 | return 0; |
289 | if (task_pid(current) == proc_pid(inode)) | 289 | if (task_tgid(current) == proc_pid(inode)) |
290 | rv = 0; | 290 | rv = 0; |
291 | return rv; | 291 | return rv; |
292 | } | 292 | } |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 107d026f5d6e..7366e9d63cee 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -740,6 +740,9 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, | |||
740 | ptent = pte_file_clear_soft_dirty(ptent); | 740 | ptent = pte_file_clear_soft_dirty(ptent); |
741 | } | 741 | } |
742 | 742 | ||
743 | if (vma->vm_flags & VM_SOFTDIRTY) | ||
744 | vma->vm_flags &= ~VM_SOFTDIRTY; | ||
745 | |||
743 | set_pte_at(vma->vm_mm, addr, pte, ptent); | 746 | set_pte_at(vma->vm_mm, addr, pte, ptent); |
744 | #endif | 747 | #endif |
745 | } | 748 | } |
@@ -949,13 +952,15 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, | |||
949 | if (is_migration_entry(entry)) | 952 | if (is_migration_entry(entry)) |
950 | page = migration_entry_to_page(entry); | 953 | page = migration_entry_to_page(entry); |
951 | } else { | 954 | } else { |
952 | *pme = make_pme(PM_NOT_PRESENT(pm->v2)); | 955 | if (vma->vm_flags & VM_SOFTDIRTY) |
956 | flags2 |= __PM_SOFT_DIRTY; | ||
957 | *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2)); | ||
953 | return; | 958 | return; |
954 | } | 959 | } |
955 | 960 | ||
956 | if (page && !PageAnon(page)) | 961 | if (page && !PageAnon(page)) |
957 | flags |= PM_FILE; | 962 | flags |= PM_FILE; |
958 | if (pte_soft_dirty(pte)) | 963 | if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte)) |
959 | flags2 |= __PM_SOFT_DIRTY; | 964 | flags2 |= __PM_SOFT_DIRTY; |
960 | 965 | ||
961 | *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags); | 966 | *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags); |
@@ -974,7 +979,7 @@ static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *p | |||
974 | *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) | 979 | *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) |
975 | | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT); | 980 | | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT); |
976 | else | 981 | else |
977 | *pme = make_pme(PM_NOT_PRESENT(pm->v2)); | 982 | *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2)); |
978 | } | 983 | } |
979 | #else | 984 | #else |
980 | static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, | 985 | static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, |
@@ -997,7 +1002,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
997 | if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { | 1002 | if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { |
998 | int pmd_flags2; | 1003 | int pmd_flags2; |
999 | 1004 | ||
1000 | pmd_flags2 = (pmd_soft_dirty(*pmd) ? __PM_SOFT_DIRTY : 0); | 1005 | if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) |
1006 | pmd_flags2 = __PM_SOFT_DIRTY; | ||
1007 | else | ||
1008 | pmd_flags2 = 0; | ||
1009 | |||
1001 | for (; addr != end; addr += PAGE_SIZE) { | 1010 | for (; addr != end; addr += PAGE_SIZE) { |
1002 | unsigned long offset; | 1011 | unsigned long offset; |
1003 | 1012 | ||
@@ -1015,12 +1024,17 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
1015 | if (pmd_trans_unstable(pmd)) | 1024 | if (pmd_trans_unstable(pmd)) |
1016 | return 0; | 1025 | return 0; |
1017 | for (; addr != end; addr += PAGE_SIZE) { | 1026 | for (; addr != end; addr += PAGE_SIZE) { |
1027 | int flags2; | ||
1018 | 1028 | ||
1019 | /* check to see if we've left 'vma' behind | 1029 | /* check to see if we've left 'vma' behind |
1020 | * and need a new, higher one */ | 1030 | * and need a new, higher one */ |
1021 | if (vma && (addr >= vma->vm_end)) { | 1031 | if (vma && (addr >= vma->vm_end)) { |
1022 | vma = find_vma(walk->mm, addr); | 1032 | vma = find_vma(walk->mm, addr); |
1023 | pme = make_pme(PM_NOT_PRESENT(pm->v2)); | 1033 | if (vma && (vma->vm_flags & VM_SOFTDIRTY)) |
1034 | flags2 = __PM_SOFT_DIRTY; | ||
1035 | else | ||
1036 | flags2 = 0; | ||
1037 | pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2)); | ||
1024 | } | 1038 | } |
1025 | 1039 | ||
1026 | /* check that 'vma' actually covers this address, | 1040 | /* check that 'vma' actually covers this address, |
@@ -1044,13 +1058,15 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
1044 | 1058 | ||
1045 | #ifdef CONFIG_HUGETLB_PAGE | 1059 | #ifdef CONFIG_HUGETLB_PAGE |
1046 | static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, | 1060 | static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, |
1047 | pte_t pte, int offset) | 1061 | pte_t pte, int offset, int flags2) |
1048 | { | 1062 | { |
1049 | if (pte_present(pte)) | 1063 | if (pte_present(pte)) |
1050 | *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) | 1064 | *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) | |
1051 | | PM_STATUS2(pm->v2, 0) | PM_PRESENT); | 1065 | PM_STATUS2(pm->v2, flags2) | |
1066 | PM_PRESENT); | ||
1052 | else | 1067 | else |
1053 | *pme = make_pme(PM_NOT_PRESENT(pm->v2)); | 1068 | *pme = make_pme(PM_NOT_PRESENT(pm->v2) | |
1069 | PM_STATUS2(pm->v2, flags2)); | ||
1054 | } | 1070 | } |
1055 | 1071 | ||
1056 | /* This function walks within one hugetlb entry in the single call */ | 1072 | /* This function walks within one hugetlb entry in the single call */ |
@@ -1059,12 +1075,22 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | |||
1059 | struct mm_walk *walk) | 1075 | struct mm_walk *walk) |
1060 | { | 1076 | { |
1061 | struct pagemapread *pm = walk->private; | 1077 | struct pagemapread *pm = walk->private; |
1078 | struct vm_area_struct *vma; | ||
1062 | int err = 0; | 1079 | int err = 0; |
1080 | int flags2; | ||
1063 | pagemap_entry_t pme; | 1081 | pagemap_entry_t pme; |
1064 | 1082 | ||
1083 | vma = find_vma(walk->mm, addr); | ||
1084 | WARN_ON_ONCE(!vma); | ||
1085 | |||
1086 | if (vma && (vma->vm_flags & VM_SOFTDIRTY)) | ||
1087 | flags2 = __PM_SOFT_DIRTY; | ||
1088 | else | ||
1089 | flags2 = 0; | ||
1090 | |||
1065 | for (; addr != end; addr += PAGE_SIZE) { | 1091 | for (; addr != end; addr += PAGE_SIZE) { |
1066 | int offset = (addr & ~hmask) >> PAGE_SHIFT; | 1092 | int offset = (addr & ~hmask) >> PAGE_SHIFT; |
1067 | huge_pte_to_pagemap_entry(&pme, pm, *pte, offset); | 1093 | huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2); |
1068 | err = add_to_pagemap(addr, &pme, pm); | 1094 | err = add_to_pagemap(addr, &pme, pm); |
1069 | if (err) | 1095 | if (err) |
1070 | return err; | 1096 | return err; |
@@ -1376,8 +1402,10 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) | |||
1376 | walk.mm = mm; | 1402 | walk.mm = mm; |
1377 | 1403 | ||
1378 | pol = get_vma_policy(task, vma, vma->vm_start); | 1404 | pol = get_vma_policy(task, vma, vma->vm_start); |
1379 | mpol_to_str(buffer, sizeof(buffer), pol); | 1405 | n = mpol_to_str(buffer, sizeof(buffer), pol); |
1380 | mpol_cond_put(pol); | 1406 | mpol_cond_put(pol); |
1407 | if (n < 0) | ||
1408 | return n; | ||
1381 | 1409 | ||
1382 | seq_printf(m, "%08lx %s", vma->vm_start, buffer); | 1410 | seq_printf(m, "%08lx %s", vma->vm_start, buffer); |
1383 | 1411 | ||
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index a1a16eb97c7b..9100d6959886 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/crash_dump.h> | 21 | #include <linux/crash_dump.h> |
22 | #include <linux/list.h> | 22 | #include <linux/list.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/pagemap.h> | ||
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
26 | #include "internal.h" | 27 | #include "internal.h" |
@@ -123,11 +124,65 @@ static ssize_t read_from_oldmem(char *buf, size_t count, | |||
123 | return read; | 124 | return read; |
124 | } | 125 | } |
125 | 126 | ||
127 | /* | ||
128 | * Architectures may override this function to allocate ELF header in 2nd kernel | ||
129 | */ | ||
130 | int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) | ||
131 | { | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Architectures may override this function to free header | ||
137 | */ | ||
138 | void __weak elfcorehdr_free(unsigned long long addr) | ||
139 | {} | ||
140 | |||
141 | /* | ||
142 | * Architectures may override this function to read from ELF header | ||
143 | */ | ||
144 | ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) | ||
145 | { | ||
146 | return read_from_oldmem(buf, count, ppos, 0); | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Architectures may override this function to read from notes sections | ||
151 | */ | ||
152 | ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) | ||
153 | { | ||
154 | return read_from_oldmem(buf, count, ppos, 0); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Architectures may override this function to map oldmem | ||
159 | */ | ||
160 | int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, | ||
161 | unsigned long from, unsigned long pfn, | ||
162 | unsigned long size, pgprot_t prot) | ||
163 | { | ||
164 | return remap_pfn_range(vma, from, pfn, size, prot); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Copy to either kernel or user space | ||
169 | */ | ||
170 | static int copy_to(void *target, void *src, size_t size, int userbuf) | ||
171 | { | ||
172 | if (userbuf) { | ||
173 | if (copy_to_user((char __user *) target, src, size)) | ||
174 | return -EFAULT; | ||
175 | } else { | ||
176 | memcpy(target, src, size); | ||
177 | } | ||
178 | return 0; | ||
179 | } | ||
180 | |||
126 | /* Read from the ELF header and then the crash dump. On error, negative value is | 181 | /* Read from the ELF header and then the crash dump. On error, negative value is |
127 | * returned otherwise number of bytes read are returned. | 182 | * returned otherwise number of bytes read are returned. |
128 | */ | 183 | */ |
129 | static ssize_t read_vmcore(struct file *file, char __user *buffer, | 184 | static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, |
130 | size_t buflen, loff_t *fpos) | 185 | int userbuf) |
131 | { | 186 | { |
132 | ssize_t acc = 0, tmp; | 187 | ssize_t acc = 0, tmp; |
133 | size_t tsz; | 188 | size_t tsz; |
@@ -144,7 +199,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
144 | /* Read ELF core header */ | 199 | /* Read ELF core header */ |
145 | if (*fpos < elfcorebuf_sz) { | 200 | if (*fpos < elfcorebuf_sz) { |
146 | tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); | 201 | tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); |
147 | if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) | 202 | if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf)) |
148 | return -EFAULT; | 203 | return -EFAULT; |
149 | buflen -= tsz; | 204 | buflen -= tsz; |
150 | *fpos += tsz; | 205 | *fpos += tsz; |
@@ -162,7 +217,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
162 | 217 | ||
163 | tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); | 218 | tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); |
164 | kaddr = elfnotes_buf + *fpos - elfcorebuf_sz; | 219 | kaddr = elfnotes_buf + *fpos - elfcorebuf_sz; |
165 | if (copy_to_user(buffer, kaddr, tsz)) | 220 | if (copy_to(buffer, kaddr, tsz, userbuf)) |
166 | return -EFAULT; | 221 | return -EFAULT; |
167 | buflen -= tsz; | 222 | buflen -= tsz; |
168 | *fpos += tsz; | 223 | *fpos += tsz; |
@@ -178,7 +233,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
178 | if (*fpos < m->offset + m->size) { | 233 | if (*fpos < m->offset + m->size) { |
179 | tsz = min_t(size_t, m->offset + m->size - *fpos, buflen); | 234 | tsz = min_t(size_t, m->offset + m->size - *fpos, buflen); |
180 | start = m->paddr + *fpos - m->offset; | 235 | start = m->paddr + *fpos - m->offset; |
181 | tmp = read_from_oldmem(buffer, tsz, &start, 1); | 236 | tmp = read_from_oldmem(buffer, tsz, &start, userbuf); |
182 | if (tmp < 0) | 237 | if (tmp < 0) |
183 | return tmp; | 238 | return tmp; |
184 | buflen -= tsz; | 239 | buflen -= tsz; |
@@ -195,6 +250,55 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, | |||
195 | return acc; | 250 | return acc; |
196 | } | 251 | } |
197 | 252 | ||
253 | static ssize_t read_vmcore(struct file *file, char __user *buffer, | ||
254 | size_t buflen, loff_t *fpos) | ||
255 | { | ||
256 | return __read_vmcore((__force char *) buffer, buflen, fpos, 1); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * The vmcore fault handler uses the page cache and fills data using the | ||
261 | * standard __vmcore_read() function. | ||
262 | * | ||
263 | * On s390 the fault handler is used for memory regions that can't be mapped | ||
264 | * directly with remap_pfn_range(). | ||
265 | */ | ||
266 | static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
267 | { | ||
268 | #ifdef CONFIG_S390 | ||
269 | struct address_space *mapping = vma->vm_file->f_mapping; | ||
270 | pgoff_t index = vmf->pgoff; | ||
271 | struct page *page; | ||
272 | loff_t offset; | ||
273 | char *buf; | ||
274 | int rc; | ||
275 | |||
276 | page = find_or_create_page(mapping, index, GFP_KERNEL); | ||
277 | if (!page) | ||
278 | return VM_FAULT_OOM; | ||
279 | if (!PageUptodate(page)) { | ||
280 | offset = (loff_t) index << PAGE_CACHE_SHIFT; | ||
281 | buf = __va((page_to_pfn(page) << PAGE_SHIFT)); | ||
282 | rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); | ||
283 | if (rc < 0) { | ||
284 | unlock_page(page); | ||
285 | page_cache_release(page); | ||
286 | return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; | ||
287 | } | ||
288 | SetPageUptodate(page); | ||
289 | } | ||
290 | unlock_page(page); | ||
291 | vmf->page = page; | ||
292 | return 0; | ||
293 | #else | ||
294 | return VM_FAULT_SIGBUS; | ||
295 | #endif | ||
296 | } | ||
297 | |||
298 | static const struct vm_operations_struct vmcore_mmap_ops = { | ||
299 | .fault = mmap_vmcore_fault, | ||
300 | }; | ||
301 | |||
198 | /** | 302 | /** |
199 | * alloc_elfnotes_buf - allocate buffer for ELF note segment in | 303 | * alloc_elfnotes_buf - allocate buffer for ELF note segment in |
200 | * vmalloc memory | 304 | * vmalloc memory |
@@ -223,7 +327,7 @@ static inline char *alloc_elfnotes_buf(size_t notes_sz) | |||
223 | * regions in the 1st kernel pointed to by PT_LOAD entries) into | 327 | * regions in the 1st kernel pointed to by PT_LOAD entries) into |
224 | * virtually contiguous user-space in ELF layout. | 328 | * virtually contiguous user-space in ELF layout. |
225 | */ | 329 | */ |
226 | #if defined(CONFIG_MMU) && !defined(CONFIG_S390) | 330 | #ifdef CONFIG_MMU |
227 | static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | 331 | static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) |
228 | { | 332 | { |
229 | size_t size = vma->vm_end - vma->vm_start; | 333 | size_t size = vma->vm_end - vma->vm_start; |
@@ -241,6 +345,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | |||
241 | 345 | ||
242 | vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); | 346 | vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); |
243 | vma->vm_flags |= VM_MIXEDMAP; | 347 | vma->vm_flags |= VM_MIXEDMAP; |
348 | vma->vm_ops = &vmcore_mmap_ops; | ||
244 | 349 | ||
245 | len = 0; | 350 | len = 0; |
246 | 351 | ||
@@ -282,9 +387,9 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) | |||
282 | 387 | ||
283 | tsz = min_t(size_t, m->offset + m->size - start, size); | 388 | tsz = min_t(size_t, m->offset + m->size - start, size); |
284 | paddr = m->paddr + start - m->offset; | 389 | paddr = m->paddr + start - m->offset; |
285 | if (remap_pfn_range(vma, vma->vm_start + len, | 390 | if (remap_oldmem_pfn_range(vma, vma->vm_start + len, |
286 | paddr >> PAGE_SHIFT, tsz, | 391 | paddr >> PAGE_SHIFT, tsz, |
287 | vma->vm_page_prot)) | 392 | vma->vm_page_prot)) |
288 | goto fail; | 393 | goto fail; |
289 | size -= tsz; | 394 | size -= tsz; |
290 | start += tsz; | 395 | start += tsz; |
@@ -357,7 +462,7 @@ static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) | |||
357 | notes_section = kmalloc(max_sz, GFP_KERNEL); | 462 | notes_section = kmalloc(max_sz, GFP_KERNEL); |
358 | if (!notes_section) | 463 | if (!notes_section) |
359 | return -ENOMEM; | 464 | return -ENOMEM; |
360 | rc = read_from_oldmem(notes_section, max_sz, &offset, 0); | 465 | rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); |
361 | if (rc < 0) { | 466 | if (rc < 0) { |
362 | kfree(notes_section); | 467 | kfree(notes_section); |
363 | return rc; | 468 | return rc; |
@@ -444,7 +549,8 @@ static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf) | |||
444 | if (phdr_ptr->p_type != PT_NOTE) | 549 | if (phdr_ptr->p_type != PT_NOTE) |
445 | continue; | 550 | continue; |
446 | offset = phdr_ptr->p_offset; | 551 | offset = phdr_ptr->p_offset; |
447 | rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0); | 552 | rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, |
553 | &offset); | ||
448 | if (rc < 0) | 554 | if (rc < 0) |
449 | return rc; | 555 | return rc; |
450 | notes_buf += phdr_ptr->p_memsz; | 556 | notes_buf += phdr_ptr->p_memsz; |
@@ -536,7 +642,7 @@ static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) | |||
536 | notes_section = kmalloc(max_sz, GFP_KERNEL); | 642 | notes_section = kmalloc(max_sz, GFP_KERNEL); |
537 | if (!notes_section) | 643 | if (!notes_section) |
538 | return -ENOMEM; | 644 | return -ENOMEM; |
539 | rc = read_from_oldmem(notes_section, max_sz, &offset, 0); | 645 | rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); |
540 | if (rc < 0) { | 646 | if (rc < 0) { |
541 | kfree(notes_section); | 647 | kfree(notes_section); |
542 | return rc; | 648 | return rc; |
@@ -623,7 +729,8 @@ static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf) | |||
623 | if (phdr_ptr->p_type != PT_NOTE) | 729 | if (phdr_ptr->p_type != PT_NOTE) |
624 | continue; | 730 | continue; |
625 | offset = phdr_ptr->p_offset; | 731 | offset = phdr_ptr->p_offset; |
626 | rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0); | 732 | rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, |
733 | &offset); | ||
627 | if (rc < 0) | 734 | if (rc < 0) |
628 | return rc; | 735 | return rc; |
629 | notes_buf += phdr_ptr->p_memsz; | 736 | notes_buf += phdr_ptr->p_memsz; |
@@ -810,7 +917,7 @@ static int __init parse_crash_elf64_headers(void) | |||
810 | addr = elfcorehdr_addr; | 917 | addr = elfcorehdr_addr; |
811 | 918 | ||
812 | /* Read Elf header */ | 919 | /* Read Elf header */ |
813 | rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0); | 920 | rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr); |
814 | if (rc < 0) | 921 | if (rc < 0) |
815 | return rc; | 922 | return rc; |
816 | 923 | ||
@@ -837,7 +944,7 @@ static int __init parse_crash_elf64_headers(void) | |||
837 | if (!elfcorebuf) | 944 | if (!elfcorebuf) |
838 | return -ENOMEM; | 945 | return -ENOMEM; |
839 | addr = elfcorehdr_addr; | 946 | addr = elfcorehdr_addr; |
840 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0); | 947 | rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); |
841 | if (rc < 0) | 948 | if (rc < 0) |
842 | goto fail; | 949 | goto fail; |
843 | 950 | ||
@@ -866,7 +973,7 @@ static int __init parse_crash_elf32_headers(void) | |||
866 | addr = elfcorehdr_addr; | 973 | addr = elfcorehdr_addr; |
867 | 974 | ||
868 | /* Read Elf header */ | 975 | /* Read Elf header */ |
869 | rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0); | 976 | rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr); |
870 | if (rc < 0) | 977 | if (rc < 0) |
871 | return rc; | 978 | return rc; |
872 | 979 | ||
@@ -892,7 +999,7 @@ static int __init parse_crash_elf32_headers(void) | |||
892 | if (!elfcorebuf) | 999 | if (!elfcorebuf) |
893 | return -ENOMEM; | 1000 | return -ENOMEM; |
894 | addr = elfcorehdr_addr; | 1001 | addr = elfcorehdr_addr; |
895 | rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0); | 1002 | rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); |
896 | if (rc < 0) | 1003 | if (rc < 0) |
897 | goto fail; | 1004 | goto fail; |
898 | 1005 | ||
@@ -919,7 +1026,7 @@ static int __init parse_crash_elf_headers(void) | |||
919 | int rc=0; | 1026 | int rc=0; |
920 | 1027 | ||
921 | addr = elfcorehdr_addr; | 1028 | addr = elfcorehdr_addr; |
922 | rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0); | 1029 | rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr); |
923 | if (rc < 0) | 1030 | if (rc < 0) |
924 | return rc; | 1031 | return rc; |
925 | if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { | 1032 | if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { |
@@ -952,7 +1059,14 @@ static int __init vmcore_init(void) | |||
952 | { | 1059 | { |
953 | int rc = 0; | 1060 | int rc = 0; |
954 | 1061 | ||
955 | /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/ | 1062 | /* Allow architectures to allocate ELF header in 2nd kernel */ |
1063 | rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size); | ||
1064 | if (rc) | ||
1065 | return rc; | ||
1066 | /* | ||
1067 | * If elfcorehdr= has been passed in cmdline or created in 2nd kernel, | ||
1068 | * then capture the dump. | ||
1069 | */ | ||
956 | if (!(is_vmcore_usable())) | 1070 | if (!(is_vmcore_usable())) |
957 | return rc; | 1071 | return rc; |
958 | rc = parse_crash_elf_headers(); | 1072 | rc = parse_crash_elf_headers(); |
@@ -960,6 +1074,8 @@ static int __init vmcore_init(void) | |||
960 | pr_warn("Kdump: vmcore not initialized\n"); | 1074 | pr_warn("Kdump: vmcore not initialized\n"); |
961 | return rc; | 1075 | return rc; |
962 | } | 1076 | } |
1077 | elfcorehdr_free(elfcorehdr_addr); | ||
1078 | elfcorehdr_addr = ELFCORE_ADDR_ERR; | ||
963 | 1079 | ||
964 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); | 1080 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); |
965 | if (proc_vmcore) | 1081 | if (proc_vmcore) |
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index c24f1e10b946..39d14659a8d3 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c | |||
@@ -244,12 +244,6 @@ struct dentry *ramfs_mount(struct file_system_type *fs_type, | |||
244 | return mount_nodev(fs_type, flags, data, ramfs_fill_super); | 244 | return mount_nodev(fs_type, flags, data, ramfs_fill_super); |
245 | } | 245 | } |
246 | 246 | ||
247 | static struct dentry *rootfs_mount(struct file_system_type *fs_type, | ||
248 | int flags, const char *dev_name, void *data) | ||
249 | { | ||
250 | return mount_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super); | ||
251 | } | ||
252 | |||
253 | static void ramfs_kill_sb(struct super_block *sb) | 247 | static void ramfs_kill_sb(struct super_block *sb) |
254 | { | 248 | { |
255 | kfree(sb->s_fs_info); | 249 | kfree(sb->s_fs_info); |
@@ -262,29 +256,23 @@ static struct file_system_type ramfs_fs_type = { | |||
262 | .kill_sb = ramfs_kill_sb, | 256 | .kill_sb = ramfs_kill_sb, |
263 | .fs_flags = FS_USERNS_MOUNT, | 257 | .fs_flags = FS_USERNS_MOUNT, |
264 | }; | 258 | }; |
265 | static struct file_system_type rootfs_fs_type = { | ||
266 | .name = "rootfs", | ||
267 | .mount = rootfs_mount, | ||
268 | .kill_sb = kill_litter_super, | ||
269 | }; | ||
270 | 259 | ||
271 | static int __init init_ramfs_fs(void) | 260 | int __init init_ramfs_fs(void) |
272 | { | ||
273 | return register_filesystem(&ramfs_fs_type); | ||
274 | } | ||
275 | module_init(init_ramfs_fs) | ||
276 | |||
277 | int __init init_rootfs(void) | ||
278 | { | 261 | { |
262 | static unsigned long once; | ||
279 | int err; | 263 | int err; |
280 | 264 | ||
265 | if (test_and_set_bit(0, &once)) | ||
266 | return 0; | ||
267 | |||
281 | err = bdi_init(&ramfs_backing_dev_info); | 268 | err = bdi_init(&ramfs_backing_dev_info); |
282 | if (err) | 269 | if (err) |
283 | return err; | 270 | return err; |
284 | 271 | ||
285 | err = register_filesystem(&rootfs_fs_type); | 272 | err = register_filesystem(&ramfs_fs_type); |
286 | if (err) | 273 | if (err) |
287 | bdi_destroy(&ramfs_backing_dev_info); | 274 | bdi_destroy(&ramfs_backing_dev_info); |
288 | 275 | ||
289 | return err; | 276 | return err; |
290 | } | 277 | } |
278 | module_init(init_ramfs_fs) | ||