diff options
author | James Morris <jmorris@namei.org> | 2009-02-05 19:01:45 -0500 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2009-02-05 19:01:45 -0500 |
commit | cb5629b10d64a8006622ce3a52bc887d91057d69 (patch) | |
tree | 7c06d8f30783115e3384721046258ce615b129c5 /fs/dlm/lock.c | |
parent | 8920d5ad6ba74ae8ab020e90cc4d976980e68701 (diff) | |
parent | f01d1d546abb2f4028b5299092f529eefb01253a (diff) |
Merge branch 'master' into next
Conflicts:
fs/namei.c
Manually merged per:
diff --cc fs/namei.c
index 734f2b5,bbc15c2..0000000
--- a/fs/namei.c
+++ b/fs/namei.c
@@@ -860,9 -848,8 +849,10 @@@ static int __link_path_walk(const char
nd->flags |= LOOKUP_CONTINUE;
err = exec_permission_lite(inode);
if (err == -EAGAIN)
- err = vfs_permission(nd, MAY_EXEC);
+ err = inode_permission(nd->path.dentry->d_inode,
+ MAY_EXEC);
+ if (!err)
+ err = ima_path_check(&nd->path, MAY_EXEC);
if (err)
break;
@@@ -1525,14 -1506,9 +1509,14 @@@ int may_open(struct path *path, int acc
flag &= ~O_TRUNC;
}
- error = vfs_permission(nd, acc_mode);
+ error = inode_permission(inode, acc_mode);
if (error)
return error;
+
- error = ima_path_check(&nd->path,
++ error = ima_path_check(path,
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
+ if (error)
+ return error;
/*
* An append-only file must be opened in append mode for writing.
*/
Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'fs/dlm/lock.c')
-rw-r--r-- | fs/dlm/lock.c | 57 |
1 files changed, 29 insertions, 28 deletions
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 724ddac91538..01e7d39c5fba 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c | |||
@@ -307,7 +307,7 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) | |||
307 | lkb->lkb_lksb->sb_status = rv; | 307 | lkb->lkb_lksb->sb_status = rv; |
308 | lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags; | 308 | lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags; |
309 | 309 | ||
310 | dlm_add_ast(lkb, AST_COMP); | 310 | dlm_add_ast(lkb, AST_COMP, 0); |
311 | } | 311 | } |
312 | 312 | ||
313 | static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) | 313 | static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) |
@@ -318,12 +318,12 @@ static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) | |||
318 | 318 | ||
319 | static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) | 319 | static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) |
320 | { | 320 | { |
321 | lkb->lkb_time_bast = ktime_get(); | ||
322 | |||
321 | if (is_master_copy(lkb)) | 323 | if (is_master_copy(lkb)) |
322 | send_bast(r, lkb, rqmode); | 324 | send_bast(r, lkb, rqmode); |
323 | else { | 325 | else |
324 | lkb->lkb_bastmode = rqmode; | 326 | dlm_add_ast(lkb, AST_BAST, rqmode); |
325 | dlm_add_ast(lkb, AST_BAST); | ||
326 | } | ||
327 | } | 327 | } |
328 | 328 | ||
329 | /* | 329 | /* |
@@ -412,9 +412,9 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b, | |||
412 | unsigned int flags, struct dlm_rsb **r_ret) | 412 | unsigned int flags, struct dlm_rsb **r_ret) |
413 | { | 413 | { |
414 | int error; | 414 | int error; |
415 | write_lock(&ls->ls_rsbtbl[b].lock); | 415 | spin_lock(&ls->ls_rsbtbl[b].lock); |
416 | error = _search_rsb(ls, name, len, b, flags, r_ret); | 416 | error = _search_rsb(ls, name, len, b, flags, r_ret); |
417 | write_unlock(&ls->ls_rsbtbl[b].lock); | 417 | spin_unlock(&ls->ls_rsbtbl[b].lock); |
418 | return error; | 418 | return error; |
419 | } | 419 | } |
420 | 420 | ||
@@ -478,16 +478,16 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen, | |||
478 | r->res_nodeid = nodeid; | 478 | r->res_nodeid = nodeid; |
479 | } | 479 | } |
480 | 480 | ||
481 | write_lock(&ls->ls_rsbtbl[bucket].lock); | 481 | spin_lock(&ls->ls_rsbtbl[bucket].lock); |
482 | error = _search_rsb(ls, name, namelen, bucket, 0, &tmp); | 482 | error = _search_rsb(ls, name, namelen, bucket, 0, &tmp); |
483 | if (!error) { | 483 | if (!error) { |
484 | write_unlock(&ls->ls_rsbtbl[bucket].lock); | 484 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
485 | dlm_free_rsb(r); | 485 | dlm_free_rsb(r); |
486 | r = tmp; | 486 | r = tmp; |
487 | goto out; | 487 | goto out; |
488 | } | 488 | } |
489 | list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list); | 489 | list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list); |
490 | write_unlock(&ls->ls_rsbtbl[bucket].lock); | 490 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
491 | error = 0; | 491 | error = 0; |
492 | out: | 492 | out: |
493 | *r_ret = r; | 493 | *r_ret = r; |
@@ -530,9 +530,9 @@ static void put_rsb(struct dlm_rsb *r) | |||
530 | struct dlm_ls *ls = r->res_ls; | 530 | struct dlm_ls *ls = r->res_ls; |
531 | uint32_t bucket = r->res_bucket; | 531 | uint32_t bucket = r->res_bucket; |
532 | 532 | ||
533 | write_lock(&ls->ls_rsbtbl[bucket].lock); | 533 | spin_lock(&ls->ls_rsbtbl[bucket].lock); |
534 | kref_put(&r->res_ref, toss_rsb); | 534 | kref_put(&r->res_ref, toss_rsb); |
535 | write_unlock(&ls->ls_rsbtbl[bucket].lock); | 535 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
536 | } | 536 | } |
537 | 537 | ||
538 | void dlm_put_rsb(struct dlm_rsb *r) | 538 | void dlm_put_rsb(struct dlm_rsb *r) |
@@ -744,6 +744,8 @@ static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status) | |||
744 | 744 | ||
745 | DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); | 745 | DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb);); |
746 | 746 | ||
747 | lkb->lkb_timestamp = ktime_get(); | ||
748 | |||
747 | lkb->lkb_status = status; | 749 | lkb->lkb_status = status; |
748 | 750 | ||
749 | switch (status) { | 751 | switch (status) { |
@@ -965,7 +967,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b) | |||
965 | 967 | ||
966 | for (;;) { | 968 | for (;;) { |
967 | found = 0; | 969 | found = 0; |
968 | write_lock(&ls->ls_rsbtbl[b].lock); | 970 | spin_lock(&ls->ls_rsbtbl[b].lock); |
969 | list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss, | 971 | list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss, |
970 | res_hashchain) { | 972 | res_hashchain) { |
971 | if (!time_after_eq(jiffies, r->res_toss_time + | 973 | if (!time_after_eq(jiffies, r->res_toss_time + |
@@ -976,20 +978,20 @@ static int shrink_bucket(struct dlm_ls *ls, int b) | |||
976 | } | 978 | } |
977 | 979 | ||
978 | if (!found) { | 980 | if (!found) { |
979 | write_unlock(&ls->ls_rsbtbl[b].lock); | 981 | spin_unlock(&ls->ls_rsbtbl[b].lock); |
980 | break; | 982 | break; |
981 | } | 983 | } |
982 | 984 | ||
983 | if (kref_put(&r->res_ref, kill_rsb)) { | 985 | if (kref_put(&r->res_ref, kill_rsb)) { |
984 | list_del(&r->res_hashchain); | 986 | list_del(&r->res_hashchain); |
985 | write_unlock(&ls->ls_rsbtbl[b].lock); | 987 | spin_unlock(&ls->ls_rsbtbl[b].lock); |
986 | 988 | ||
987 | if (is_master(r)) | 989 | if (is_master(r)) |
988 | dir_remove(r); | 990 | dir_remove(r); |
989 | dlm_free_rsb(r); | 991 | dlm_free_rsb(r); |
990 | count++; | 992 | count++; |
991 | } else { | 993 | } else { |
992 | write_unlock(&ls->ls_rsbtbl[b].lock); | 994 | spin_unlock(&ls->ls_rsbtbl[b].lock); |
993 | log_error(ls, "tossed rsb in use %s", r->res_name); | 995 | log_error(ls, "tossed rsb in use %s", r->res_name); |
994 | } | 996 | } |
995 | } | 997 | } |
@@ -1013,10 +1015,8 @@ static void add_timeout(struct dlm_lkb *lkb) | |||
1013 | { | 1015 | { |
1014 | struct dlm_ls *ls = lkb->lkb_resource->res_ls; | 1016 | struct dlm_ls *ls = lkb->lkb_resource->res_ls; |
1015 | 1017 | ||
1016 | if (is_master_copy(lkb)) { | 1018 | if (is_master_copy(lkb)) |
1017 | lkb->lkb_timestamp = jiffies; | ||
1018 | return; | 1019 | return; |
1019 | } | ||
1020 | 1020 | ||
1021 | if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) && | 1021 | if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) && |
1022 | !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { | 1022 | !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { |
@@ -1031,7 +1031,6 @@ static void add_timeout(struct dlm_lkb *lkb) | |||
1031 | DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb);); | 1031 | DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb);); |
1032 | mutex_lock(&ls->ls_timeout_mutex); | 1032 | mutex_lock(&ls->ls_timeout_mutex); |
1033 | hold_lkb(lkb); | 1033 | hold_lkb(lkb); |
1034 | lkb->lkb_timestamp = jiffies; | ||
1035 | list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout); | 1034 | list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout); |
1036 | mutex_unlock(&ls->ls_timeout_mutex); | 1035 | mutex_unlock(&ls->ls_timeout_mutex); |
1037 | } | 1036 | } |
@@ -1059,6 +1058,7 @@ void dlm_scan_timeout(struct dlm_ls *ls) | |||
1059 | struct dlm_rsb *r; | 1058 | struct dlm_rsb *r; |
1060 | struct dlm_lkb *lkb; | 1059 | struct dlm_lkb *lkb; |
1061 | int do_cancel, do_warn; | 1060 | int do_cancel, do_warn; |
1061 | s64 wait_us; | ||
1062 | 1062 | ||
1063 | for (;;) { | 1063 | for (;;) { |
1064 | if (dlm_locking_stopped(ls)) | 1064 | if (dlm_locking_stopped(ls)) |
@@ -1069,14 +1069,15 @@ void dlm_scan_timeout(struct dlm_ls *ls) | |||
1069 | mutex_lock(&ls->ls_timeout_mutex); | 1069 | mutex_lock(&ls->ls_timeout_mutex); |
1070 | list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) { | 1070 | list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) { |
1071 | 1071 | ||
1072 | wait_us = ktime_to_us(ktime_sub(ktime_get(), | ||
1073 | lkb->lkb_timestamp)); | ||
1074 | |||
1072 | if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) && | 1075 | if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) && |
1073 | time_after_eq(jiffies, lkb->lkb_timestamp + | 1076 | wait_us >= (lkb->lkb_timeout_cs * 10000)) |
1074 | lkb->lkb_timeout_cs * HZ/100)) | ||
1075 | do_cancel = 1; | 1077 | do_cancel = 1; |
1076 | 1078 | ||
1077 | if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && | 1079 | if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && |
1078 | time_after_eq(jiffies, lkb->lkb_timestamp + | 1080 | wait_us >= dlm_config.ci_timewarn_cs * 10000) |
1079 | dlm_config.ci_timewarn_cs * HZ/100)) | ||
1080 | do_warn = 1; | 1081 | do_warn = 1; |
1081 | 1082 | ||
1082 | if (!do_cancel && !do_warn) | 1083 | if (!do_cancel && !do_warn) |
@@ -1122,12 +1123,12 @@ void dlm_scan_timeout(struct dlm_ls *ls) | |||
1122 | void dlm_adjust_timeouts(struct dlm_ls *ls) | 1123 | void dlm_adjust_timeouts(struct dlm_ls *ls) |
1123 | { | 1124 | { |
1124 | struct dlm_lkb *lkb; | 1125 | struct dlm_lkb *lkb; |
1125 | long adj = jiffies - ls->ls_recover_begin; | 1126 | u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin); |
1126 | 1127 | ||
1127 | ls->ls_recover_begin = 0; | 1128 | ls->ls_recover_begin = 0; |
1128 | mutex_lock(&ls->ls_timeout_mutex); | 1129 | mutex_lock(&ls->ls_timeout_mutex); |
1129 | list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) | 1130 | list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) |
1130 | lkb->lkb_timestamp += adj; | 1131 | lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us); |
1131 | mutex_unlock(&ls->ls_timeout_mutex); | 1132 | mutex_unlock(&ls->ls_timeout_mutex); |
1132 | } | 1133 | } |
1133 | 1134 | ||
@@ -4223,7 +4224,7 @@ static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket) | |||
4223 | { | 4224 | { |
4224 | struct dlm_rsb *r, *r_ret = NULL; | 4225 | struct dlm_rsb *r, *r_ret = NULL; |
4225 | 4226 | ||
4226 | read_lock(&ls->ls_rsbtbl[bucket].lock); | 4227 | spin_lock(&ls->ls_rsbtbl[bucket].lock); |
4227 | list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) { | 4228 | list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) { |
4228 | if (!rsb_flag(r, RSB_LOCKS_PURGED)) | 4229 | if (!rsb_flag(r, RSB_LOCKS_PURGED)) |
4229 | continue; | 4230 | continue; |
@@ -4232,7 +4233,7 @@ static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket) | |||
4232 | r_ret = r; | 4233 | r_ret = r; |
4233 | break; | 4234 | break; |
4234 | } | 4235 | } |
4235 | read_unlock(&ls->ls_rsbtbl[bucket].lock); | 4236 | spin_unlock(&ls->ls_rsbtbl[bucket].lock); |
4236 | return r_ret; | 4237 | return r_ret; |
4237 | } | 4238 | } |
4238 | 4239 | ||