diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/gfs2/glock.c | 38 |
1 files changed, 7 insertions, 31 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 5b772bb0210f..1509481b8ca6 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -211,30 +211,6 @@ out: | |||
211 | } | 211 | } |
212 | 212 | ||
213 | /** | 213 | /** |
214 | * queue_empty - check to see if a glock's queue is empty | ||
215 | * @gl: the glock | ||
216 | * @head: the head of the queue to check | ||
217 | * | ||
218 | * This function protects the list in the event that a process already | ||
219 | * has a holder on the list and is adding a second holder for itself. | ||
220 | * The glmutex lock is what generally prevents processes from working | ||
221 | * on the same glock at once, but the special case of adding a second | ||
222 | * holder for yourself ("recursive" locking) doesn't involve locking | ||
223 | * glmutex, making the spin lock necessary. | ||
224 | * | ||
225 | * Returns: 1 if the queue is empty | ||
226 | */ | ||
227 | |||
228 | static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head) | ||
229 | { | ||
230 | int empty; | ||
231 | spin_lock(&gl->gl_spin); | ||
232 | empty = list_empty(head); | ||
233 | spin_unlock(&gl->gl_spin); | ||
234 | return empty; | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * search_bucket() - Find struct gfs2_glock by lock number | 214 | * search_bucket() - Find struct gfs2_glock by lock number |
239 | * @bucket: the bucket to search | 215 | * @bucket: the bucket to search |
240 | * @name: The lock name | 216 | * @name: The lock name |
@@ -814,7 +790,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
814 | int op_done = 1; | 790 | int op_done = 1; |
815 | 791 | ||
816 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 792 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
817 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 793 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
818 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); | 794 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); |
819 | 795 | ||
820 | state_change(gl, ret & LM_OUT_ST_MASK); | 796 | state_change(gl, ret & LM_OUT_ST_MASK); |
@@ -925,7 +901,7 @@ void gfs2_glock_xmote_th(struct gfs2_holder *gh) | |||
925 | glops->go_xmote_th(gl); | 901 | glops->go_xmote_th(gl); |
926 | 902 | ||
927 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 903 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
928 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 904 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
929 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); | 905 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); |
930 | gfs2_assert_warn(sdp, state != gl->gl_state); | 906 | gfs2_assert_warn(sdp, state != gl->gl_state); |
931 | 907 | ||
@@ -960,7 +936,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | |||
960 | struct gfs2_holder *gh = gl->gl_req_gh; | 936 | struct gfs2_holder *gh = gl->gl_req_gh; |
961 | 937 | ||
962 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 938 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
963 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 939 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
964 | gfs2_assert_warn(sdp, !ret); | 940 | gfs2_assert_warn(sdp, !ret); |
965 | 941 | ||
966 | state_change(gl, LM_ST_UNLOCKED); | 942 | state_change(gl, LM_ST_UNLOCKED); |
@@ -1007,7 +983,7 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl) | |||
1007 | glops->go_drop_th(gl); | 983 | glops->go_drop_th(gl); |
1008 | 984 | ||
1009 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 985 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); |
1010 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 986 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); |
1011 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); | 987 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); |
1012 | 988 | ||
1013 | gfs2_glock_hold(gl); | 989 | gfs2_glock_hold(gl); |
@@ -1697,7 +1673,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | |||
1697 | atomic_inc(&sdp->sd_reclaimed); | 1673 | atomic_inc(&sdp->sd_reclaimed); |
1698 | 1674 | ||
1699 | if (gfs2_glmutex_trylock(gl)) { | 1675 | if (gfs2_glmutex_trylock(gl)) { |
1700 | if (queue_empty(gl, &gl->gl_holders) && | 1676 | if (list_empty(&gl->gl_holders) && |
1701 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1677 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
1702 | handle_callback(gl, LM_ST_UNLOCKED); | 1678 | handle_callback(gl, LM_ST_UNLOCKED); |
1703 | gfs2_glmutex_unlock(gl); | 1679 | gfs2_glmutex_unlock(gl); |
@@ -1761,7 +1737,7 @@ static void scan_glock(struct gfs2_glock *gl) | |||
1761 | return; | 1737 | return; |
1762 | 1738 | ||
1763 | if (gfs2_glmutex_trylock(gl)) { | 1739 | if (gfs2_glmutex_trylock(gl)) { |
1764 | if (queue_empty(gl, &gl->gl_holders) && | 1740 | if (list_empty(&gl->gl_holders) && |
1765 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1741 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
1766 | goto out_schedule; | 1742 | goto out_schedule; |
1767 | gfs2_glmutex_unlock(gl); | 1743 | gfs2_glmutex_unlock(gl); |
@@ -1810,7 +1786,7 @@ static void clear_glock(struct gfs2_glock *gl) | |||
1810 | } | 1786 | } |
1811 | 1787 | ||
1812 | if (gfs2_glmutex_trylock(gl)) { | 1788 | if (gfs2_glmutex_trylock(gl)) { |
1813 | if (queue_empty(gl, &gl->gl_holders) && | 1789 | if (list_empty(gl, &gl->gl_holders) && |
1814 | gl->gl_state != LM_ST_UNLOCKED) | 1790 | gl->gl_state != LM_ST_UNLOCKED) |
1815 | handle_callback(gl, LM_ST_UNLOCKED); | 1791 | handle_callback(gl, LM_ST_UNLOCKED); |
1816 | gfs2_glmutex_unlock(gl); | 1792 | gfs2_glmutex_unlock(gl); |