aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-07-28 04:03:00 -0400
committerIngo Molnar <mingo@kernel.org>2014-07-28 04:03:00 -0400
commitca5bc6cd5de5b53eb8fd6fea39aa3fe2a1e8c3d9 (patch)
tree75beaae2d4b6bc654eb28994dd5906d8dcf5ef46 /fs/gfs2
parentc1221321b7c25b53204447cff9949a6d5a7ddddc (diff)
parentd8d28c8f00e84a72e8bee39a85835635417bee49 (diff)
Merge branch 'sched/urgent' into sched/core, to merge fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/rgrp.c4
5 files changed, 17 insertions, 13 deletions
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4fc3a3046174..26b3f952e6b1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
981 int error = 0; 981 int error = 0;
982 982
983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; 984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
985 985
986 mutex_lock(&fp->f_fl_mutex); 986 mutex_lock(&fp->f_fl_mutex);
987 987
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
991 goto out; 991 goto out;
992 flock_lock_file_wait(file, 992 flock_lock_file_wait(file,
993 &(struct file_lock){.fl_type = F_UNLCK}); 993 &(struct file_lock){.fl_type = F_UNLCK});
994 gfs2_glock_dq_wait(fl_gh); 994 gfs2_glock_dq(fl_gh);
995 gfs2_holder_reinit(state, flags, fl_gh); 995 gfs2_holder_reinit(state, flags, fl_gh);
996 } else { 996 } else {
997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 770e16716d81..7f513b1ceb2c 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
731 cachep = gfs2_glock_aspace_cachep; 731 cachep = gfs2_glock_aspace_cachep;
732 else 732 else
733 cachep = gfs2_glock_cachep; 733 cachep = gfs2_glock_cachep;
734 gl = kmem_cache_alloc(cachep, GFP_KERNEL); 734 gl = kmem_cache_alloc(cachep, GFP_NOFS);
735 if (!gl) 735 if (!gl)
736 return -ENOMEM; 736 return -ENOMEM;
737 737
738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
739 739
740 if (glops->go_flags & GLOF_LVB) { 740 if (glops->go_flags & GLOF_LVB) {
741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); 741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
742 if (!gl->gl_lksb.sb_lvbptr) { 742 if (!gl->gl_lksb.sb_lvbptr) {
743 kmem_cache_free(cachep, gl); 743 kmem_cache_free(cachep, gl);
744 return -ENOMEM; 744 return -ENOMEM;
@@ -1383,12 +1383,16 @@ __acquires(&lru_lock)
1383 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1383 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1384 list_del_init(&gl->gl_lru); 1384 list_del_init(&gl->gl_lru);
1385 if (!spin_trylock(&gl->gl_spin)) { 1385 if (!spin_trylock(&gl->gl_spin)) {
1386add_back_to_lru:
1386 list_add(&gl->gl_lru, &lru_list); 1387 list_add(&gl->gl_lru, &lru_list);
1387 atomic_inc(&lru_count); 1388 atomic_inc(&lru_count);
1388 continue; 1389 continue;
1389 } 1390 }
1391 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1392 spin_unlock(&gl->gl_spin);
1393 goto add_back_to_lru;
1394 }
1390 clear_bit(GLF_LRU, &gl->gl_flags); 1395 clear_bit(GLF_LRU, &gl->gl_flags);
1391 spin_unlock(&lru_lock);
1392 gl->gl_lockref.count++; 1396 gl->gl_lockref.count++;
1393 if (demote_ok(gl)) 1397 if (demote_ok(gl))
1394 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1398 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1396,7 +1400,7 @@ __acquires(&lru_lock)
1396 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1400 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1397 gl->gl_lockref.count--; 1401 gl->gl_lockref.count--;
1398 spin_unlock(&gl->gl_spin); 1402 spin_unlock(&gl->gl_spin);
1399 spin_lock(&lru_lock); 1403 cond_resched_lock(&lru_lock);
1400 } 1404 }
1401} 1405}
1402 1406
@@ -1421,7 +1425,7 @@ static long gfs2_scan_glock_lru(int nr)
1421 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1425 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1422 1426
1423 /* Test for being demotable */ 1427 /* Test for being demotable */
1424 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1428 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1425 list_move(&gl->gl_lru, &dispose); 1429 list_move(&gl->gl_lru, &dispose);
1426 atomic_dec(&lru_count); 1430 atomic_dec(&lru_count);
1427 freed++; 1431 freed++;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fc1100781bbc..2ffc67dce87f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
234 * inode_go_inval - prepare a inode glock to be released 234 * inode_go_inval - prepare a inode glock to be released
235 * @gl: the glock 235 * @gl: the glock
236 * @flags: 236 * @flags:
237 * 237 *
238 * Normally we invlidate everything, but if we are moving into 238 * Normally we invalidate everything, but if we are moving into
239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
240 * can keep hold of the metadata, since it won't have changed. 240 * can keep hold of the metadata, since it won't have changed.
241 * 241 *
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 992ca5b1e045..641383a9c1bb 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1030,8 +1030,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1030 1030
1031 new_size = old_size + RECOVER_SIZE_INC; 1031 new_size = old_size + RECOVER_SIZE_INC;
1032 1032
1033 submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1033 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1034 result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1034 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1035 if (!submit || !result) { 1035 if (!submit || !result) {
1036 kfree(submit); 1036 kfree(submit);
1037 kfree(result); 1037 kfree(result);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index db629d1bd1bd..f4cb9c0d6bbd 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
337 337
338/** 338/**
339 * gfs2_free_extlen - Return extent length of free blocks 339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rbm: Starting position 340 * @rrbm: Starting position
341 * @len: Max length to check 341 * @len: Max length to check
342 * 342 *
343 * Starting at the block specified by the rbm, see how many free blocks 343 * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2522 2522
2523/** 2523/**
2524 * gfs2_rlist_free - free a resource group list 2524 * gfs2_rlist_free - free a resource group list
2525 * @list: the list of resource groups 2525 * @rlist: the list of resource groups
2526 * 2526 *
2527 */ 2527 */
2528 2528