aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorRobert Peterson <rpeterso@redhat.com>2007-03-23 18:05:15 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2007-05-01 04:10:55 -0400
commit04b933f27bc8e7f3f6423020cec58a4eab3bb7a7 (patch)
tree992d9dd401b81ccb0b1f166fabd3ca315806361e /fs/gfs2
parent172e045a7fcc3ee647fa70dbd585a3c247b49cb2 (diff)
[GFS2] Red Hat bz 228540: owner references
In Testing the previously posted and accepted patch for https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=228540 I uncovered some gfs2 badness. It turns out that the current gfs2 code saves off a process pointer when glocks is taken in both the glock and glock holder structures. Those structures will persist in memory long after the process has ended; pointers to poisoned memory. This problem isn't caused by the 228540 fix; the new capability introduced by the fix just uncovered the problem. I wrote this patch that avoids saving process pointers and instead saves off the process pid. Rather than referencing the bad pointers, it now does process lookups. There is special code that makes the output nicer for printing holder information for processes that have ended. This patch also adds a stub for the new "sprint_symbol" function that exists in Andrew Morton's -mm patch set, but won't go into the base kernel until 2.6.22, since it adds functionality but doesn't fix a bug. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c75
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/incore.h4
3 files changed, 59 insertions, 22 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b8aa816bb6eb..d2e3094c40f8 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -25,6 +25,8 @@
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/module.h>
29#include <linux/kallsyms.h>
28 30
29#include "gfs2.h" 31#include "gfs2.h"
30#include "incore.h" 32#include "incore.h"
@@ -54,6 +56,7 @@ struct glock_iter {
54typedef void (*glock_examiner) (struct gfs2_glock * gl); 56typedef void (*glock_examiner) (struct gfs2_glock * gl);
55 57
56static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 58static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
57static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); 60static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
58static void gfs2_glock_drop_th(struct gfs2_glock *gl); 61static void gfs2_glock_drop_th(struct gfs2_glock *gl);
59static DECLARE_RWSEM(gfs2_umount_flush_sem); 62static DECLARE_RWSEM(gfs2_umount_flush_sem);
@@ -64,6 +67,7 @@ static struct dentry *gfs2_root;
64#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) 67#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
65 68
66static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; 69static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
70static struct dentry *gfs2_root;
67 71
68/* 72/*
69 * Despite what you might think, the numbers below are not arbitrary :-) 73 * Despite what you might think, the numbers below are not arbitrary :-)
@@ -312,7 +316,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
312 atomic_set(&gl->gl_ref, 1); 316 atomic_set(&gl->gl_ref, 1);
313 gl->gl_state = LM_ST_UNLOCKED; 317 gl->gl_state = LM_ST_UNLOCKED;
314 gl->gl_hash = hash; 318 gl->gl_hash = hash;
315 gl->gl_owner = NULL; 319 gl->gl_owner_pid = 0;
316 gl->gl_ip = 0; 320 gl->gl_ip = 0;
317 gl->gl_ops = glops; 321 gl->gl_ops = glops;
318 gl->gl_req_gh = NULL; 322 gl->gl_req_gh = NULL;
@@ -376,7 +380,7 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
376 INIT_LIST_HEAD(&gh->gh_list); 380 INIT_LIST_HEAD(&gh->gh_list);
377 gh->gh_gl = gl; 381 gh->gh_gl = gl;
378 gh->gh_ip = (unsigned long)__builtin_return_address(0); 382 gh->gh_ip = (unsigned long)__builtin_return_address(0);
379 gh->gh_owner = current; 383 gh->gh_owner_pid = current->pid;
380 gh->gh_state = state; 384 gh->gh_state = state;
381 gh->gh_flags = flags; 385 gh->gh_flags = flags;
382 gh->gh_error = 0; 386 gh->gh_error = 0;
@@ -601,7 +605,7 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
601 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 605 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
602 list_add_tail(&gh.gh_list, &gl->gl_waiters1); 606 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
603 } else { 607 } else {
604 gl->gl_owner = current; 608 gl->gl_owner_pid = current->pid;
605 gl->gl_ip = (unsigned long)__builtin_return_address(0); 609 gl->gl_ip = (unsigned long)__builtin_return_address(0);
606 clear_bit(HIF_WAIT, &gh.gh_iflags); 610 clear_bit(HIF_WAIT, &gh.gh_iflags);
607 smp_mb(); 611 smp_mb();
@@ -628,7 +632,7 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
628 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 632 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
629 acquired = 0; 633 acquired = 0;
630 } else { 634 } else {
631 gl->gl_owner = current; 635 gl->gl_owner_pid = current->pid;
632 gl->gl_ip = (unsigned long)__builtin_return_address(0); 636 gl->gl_ip = (unsigned long)__builtin_return_address(0);
633 } 637 }
634 spin_unlock(&gl->gl_spin); 638 spin_unlock(&gl->gl_spin);
@@ -646,7 +650,7 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
646{ 650{
647 spin_lock(&gl->gl_spin); 651 spin_lock(&gl->gl_spin);
648 clear_bit(GLF_LOCK, &gl->gl_flags); 652 clear_bit(GLF_LOCK, &gl->gl_flags);
649 gl->gl_owner = NULL; 653 gl->gl_owner_pid = 0;
650 gl->gl_ip = 0; 654 gl->gl_ip = 0;
651 run_queue(gl); 655 run_queue(gl);
652 BUG_ON(!spin_is_locked(&gl->gl_spin)); 656 BUG_ON(!spin_is_locked(&gl->gl_spin));
@@ -999,12 +1003,12 @@ static int glock_wait_internal(struct gfs2_holder *gh)
999} 1003}
1000 1004
1001static inline struct gfs2_holder * 1005static inline struct gfs2_holder *
1002find_holder_by_owner(struct list_head *head, struct task_struct *owner) 1006find_holder_by_owner(struct list_head *head, pid_t pid)
1003{ 1007{
1004 struct gfs2_holder *gh; 1008 struct gfs2_holder *gh;
1005 1009
1006 list_for_each_entry(gh, head, gh_list) { 1010 list_for_each_entry(gh, head, gh_list) {
1007 if (gh->gh_owner == owner) 1011 if (gh->gh_owner_pid == pid)
1008 return gh; 1012 return gh;
1009 } 1013 }
1010 1014
@@ -1036,24 +1040,24 @@ static void add_to_queue(struct gfs2_holder *gh)
1036 struct gfs2_glock *gl = gh->gh_gl; 1040 struct gfs2_glock *gl = gh->gh_gl;
1037 struct gfs2_holder *existing; 1041 struct gfs2_holder *existing;
1038 1042
1039 BUG_ON(!gh->gh_owner); 1043 BUG_ON(!gh->gh_owner_pid);
1040 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1044 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1041 BUG(); 1045 BUG();
1042 1046
1043 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); 1047 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
1044 if (existing) { 1048 if (existing) {
1045 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); 1049 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1046 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid); 1050 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1047 printk(KERN_INFO "lock type : %d lock state : %d\n", 1051 printk(KERN_INFO "lock type : %d lock state : %d\n",
1048 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state); 1052 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1049 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 1053 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1050 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid); 1054 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1051 printk(KERN_INFO "lock type : %d lock state : %d\n", 1055 printk(KERN_INFO "lock type : %d lock state : %d\n",
1052 gl->gl_name.ln_type, gl->gl_state); 1056 gl->gl_name.ln_type, gl->gl_state);
1053 BUG(); 1057 BUG();
1054 } 1058 }
1055 1059
1056 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner); 1060 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
1057 if (existing) { 1061 if (existing) {
1058 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); 1062 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1059 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 1063 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
@@ -1756,6 +1760,22 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1756 * Diagnostic routines to help debug distributed deadlock 1760 * Diagnostic routines to help debug distributed deadlock
1757 */ 1761 */
1758 1762
1763static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1764 unsigned long address)
1765{
1766/* when sprint_symbol becomes available in the new kernel, replace this */
1767/* function with:
1768 char buffer[KSYM_SYMBOL_LEN];
1769
1770 sprint_symbol(buffer, address);
1771 print_dbg(gi, fmt, buffer);
1772*/
1773 if (gi)
1774 print_dbg(gi, fmt, address);
1775 else
1776 print_symbol(fmt, address);
1777}
1778
1759/** 1779/**
1760 * dump_holder - print information about a glock holder 1780 * dump_holder - print information about a glock holder
1761 * @str: a string naming the type of holder 1781 * @str: a string naming the type of holder
@@ -1768,10 +1788,18 @@ static int dump_holder(struct glock_iter *gi, char *str,
1768 struct gfs2_holder *gh) 1788 struct gfs2_holder *gh)
1769{ 1789{
1770 unsigned int x; 1790 unsigned int x;
1791 struct task_struct *gh_owner;
1771 1792
1772 print_dbg(gi, " %s\n", str); 1793 print_dbg(gi, " %s\n", str);
1773 print_dbg(gi, " owner = %ld\n", 1794 if (gh->gh_owner_pid) {
1774 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1); 1795 print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid);
1796 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1797 if (gh_owner)
1798 print_dbg(gi, "(%s)\n", gh_owner->comm);
1799 else
1800 print_dbg(gi, "(ended)\n");
1801 } else
1802 print_dbg(gi, " owner = -1\n");
1775 print_dbg(gi, " gh_state = %u\n", gh->gh_state); 1803 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1776 print_dbg(gi, " gh_flags ="); 1804 print_dbg(gi, " gh_flags =");
1777 for (x = 0; x < 32; x++) 1805 for (x = 0; x < 32; x++)
@@ -1784,10 +1812,7 @@ static int dump_holder(struct glock_iter *gi, char *str,
1784 if (test_bit(x, &gh->gh_iflags)) 1812 if (test_bit(x, &gh->gh_iflags))
1785 print_dbg(gi, " %u", x); 1813 print_dbg(gi, " %u", x);
1786 print_dbg(gi, " \n"); 1814 print_dbg(gi, " \n");
1787 if (gi) 1815 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
1788 print_dbg(gi, " initialized at: 0x%x\n", gh->gh_ip);
1789 else
1790 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
1791 1816
1792 return 0; 1817 return 0;
1793} 1818}
@@ -1828,6 +1853,7 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1828 struct gfs2_holder *gh; 1853 struct gfs2_holder *gh;
1829 unsigned int x; 1854 unsigned int x;
1830 int error = -ENOBUFS; 1855 int error = -ENOBUFS;
1856 struct task_struct *gl_owner;
1831 1857
1832 spin_lock(&gl->gl_spin); 1858 spin_lock(&gl->gl_spin);
1833 1859
@@ -1838,10 +1864,21 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1838 if (test_bit(x, &gl->gl_flags)) 1864 if (test_bit(x, &gl->gl_flags))
1839 print_dbg(gi, " %u", x); 1865 print_dbg(gi, " %u", x);
1840 } 1866 }
1867 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1868 print_dbg(gi, " (unlocked)");
1841 print_dbg(gi, " \n"); 1869 print_dbg(gi, " \n");
1842 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); 1870 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1843 print_dbg(gi, " gl_state = %u\n", gl->gl_state); 1871 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
1844 print_dbg(gi, " gl_owner = %s\n", gl->gl_owner->comm); 1872 if (gl->gl_owner_pid) {
1873 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1874 if (gl_owner)
1875 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1876 gl->gl_owner_pid, gl_owner->comm);
1877 else
1878 print_dbg(gi, " gl_owner = %d (ended)\n",
1879 gl->gl_owner_pid);
1880 } else
1881 print_dbg(gi, " gl_owner = -1\n");
1845 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); 1882 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1846 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); 1883 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1847 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); 1884 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 5e662eadc6f2..11477ca3a3c0 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -38,7 +38,7 @@ static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
38 /* Look in glock's list of holders for one with current task as owner */ 38 /* Look in glock's list of holders for one with current task as owner */
39 spin_lock(&gl->gl_spin); 39 spin_lock(&gl->gl_spin);
40 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 40 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
41 if (gh->gh_owner == current) { 41 if (gh->gh_owner_pid == current->pid) {
42 locked = 1; 42 locked = 1;
43 break; 43 break;
44 } 44 }
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 9c125823d760..fdf04705906f 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -127,7 +127,7 @@ struct gfs2_holder {
127 struct list_head gh_list; 127 struct list_head gh_list;
128 128
129 struct gfs2_glock *gh_gl; 129 struct gfs2_glock *gh_gl;
130 struct task_struct *gh_owner; 130 pid_t gh_owner_pid;
131 unsigned int gh_state; 131 unsigned int gh_state;
132 unsigned gh_flags; 132 unsigned gh_flags;
133 133
@@ -155,7 +155,7 @@ struct gfs2_glock {
155 unsigned int gl_hash; 155 unsigned int gl_hash;
156 unsigned int gl_demote_state; /* state requested by remote node */ 156 unsigned int gl_demote_state; /* state requested by remote node */
157 unsigned long gl_demote_time; /* time of first demote request */ 157 unsigned long gl_demote_time; /* time of first demote request */
158 struct task_struct *gl_owner; 158 pid_t gl_owner_pid;
159 unsigned long gl_ip; 159 unsigned long gl_ip;
160 struct list_head gl_holders; 160 struct list_head gl_holders;
161 struct list_head gl_waiters1; /* HIF_MUTEX */ 161 struct list_head gl_waiters1; /* HIF_MUTEX */