aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2012-01-20 05:38:36 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2012-02-28 12:09:42 -0500
commita245769f254bbbea868e2cf8dc42daa061cd276f (patch)
tree1280ab339924584dba6aaf6e0c9e5a6f5ec0580b /fs/gfs2/glock.c
parent891003abb0db6bfffd61b76ad0ed39bb7c3db8e1 (diff)
GFS2: glock statistics gathering
The stats are divided into two sets: those relating to the super block and those relating to an individual glock. The super block stats are done on a per cpu basis in order to try and reduce the overhead of gathering them. They are also further divided by glock type. In the case of both the super block and glock statistics, the same information is gathered in each case. The super block statistics are used to provide default values for most of the glock statistics, so that newly created glocks should have, as far as possible, a sensible starting point. The statistics are divided into three pairs of mean and variance, plus two counters. The mean/variance pairs are smoothed exponential estimates and the algorithm used is one which will be very familiar to those used to calculation of round trip times in network code. The three pairs of mean/variance measure the following things: 1. DLM lock time (non-blocking requests) 2. DLM lock time (blocking requests) 3. Inter-request time (again to the DLM) A non-blocking request is one which will complete right away, whatever the state of the DLM lock in question. That currently means any requests when (a) the current state of the lock is exclusive (b) the requested state is either null or unlocked or (c) the "try lock" flag is set. A blocking request covers all the other lock requests. There are two counters. The first is there primarily to show how many lock requests have been made, and thus how much data has gone into the mean/variance calculations. The other counter is counting queueing of holders at the top layer of the glock code. Hopefully that number will be a lot larger than the number of dlm lock requests issued. So why gather these statistics? There are several reasons we'd like to get a better idea of these timings: 1. To be able to better set the glock "min hold time" 2. To spot performance issues more easily 3. To improve the algorithm for selecting resource groups for allocation (to base it on lock wait time, rather than blindly using a "try lock") Due to the smoothing action of the updates, a step change in some input quantity being sampled will only fully be taken into account after 8 samples (or 4 for the variance) and this needs to be carefully considered when interpreting the results. Knowing both the time it takes a lock request to complete and the average time between lock requests for a glock means we can compute the total percentage of the time for which the node is able to use a glock vs. time that the rest of the cluster has its share. That will be very useful when setting the lock min hold time. The other point to remember is that all times are in nanoseconds. Great care has been taken to ensure that we measure exactly the quantities that we want, as accurately as possible. There are always inaccuracies in any measuring system, but I hope this is as accurate as we can reasonably make it. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c210
1 files changed, 202 insertions, 8 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 351a3e797789..dab2526071cc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -29,6 +29,7 @@
29#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h> 30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h> 31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h>
32 33
33#include "gfs2.h" 34#include "gfs2.h"
34#include "incore.h" 35#include "incore.h"
@@ -543,6 +544,11 @@ __acquires(&gl->gl_spin)
543 do_error(gl, 0); /* Fail queued try locks */ 544 do_error(gl, 0); /* Fail queued try locks */
544 } 545 }
545 gl->gl_req = target; 546 gl->gl_req = target;
547 set_bit(GLF_BLOCKING, &gl->gl_flags);
548 if ((gl->gl_req == LM_ST_UNLOCKED) ||
549 (gl->gl_state == LM_ST_EXCLUSIVE) ||
550 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
551 clear_bit(GLF_BLOCKING, &gl->gl_flags);
546 spin_unlock(&gl->gl_spin); 552 spin_unlock(&gl->gl_spin);
547 if (glops->go_xmote_th) 553 if (glops->go_xmote_th)
548 glops->go_xmote_th(gl); 554 glops->go_xmote_th(gl);
@@ -744,6 +750,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
744 return -ENOMEM; 750 return -ENOMEM;
745 751
746 atomic_inc(&sdp->sd_glock_disposal); 752 atomic_inc(&sdp->sd_glock_disposal);
753 gl->gl_sbd = sdp;
747 gl->gl_flags = 0; 754 gl->gl_flags = 0;
748 gl->gl_name = name; 755 gl->gl_name = name;
749 atomic_set(&gl->gl_ref, 1); 756 atomic_set(&gl->gl_ref, 1);
@@ -752,12 +759,17 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
752 gl->gl_demote_state = LM_ST_EXCLUSIVE; 759 gl->gl_demote_state = LM_ST_EXCLUSIVE;
753 gl->gl_hash = hash; 760 gl->gl_hash = hash;
754 gl->gl_ops = glops; 761 gl->gl_ops = glops;
755 snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number); 762 gl->gl_dstamp = ktime_set(0, 0);
763 preempt_disable();
764 /* We use the global stats to estimate the initial per-glock stats */
765 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
766 preempt_enable();
767 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
768 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
756 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 769 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
757 gl->gl_lksb.sb_lvbptr = gl->gl_lvb; 770 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
758 gl->gl_tchange = jiffies; 771 gl->gl_tchange = jiffies;
759 gl->gl_object = NULL; 772 gl->gl_object = NULL;
760 gl->gl_sbd = sdp;
761 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 773 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
762 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 774 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
763 INIT_WORK(&gl->gl_delete, delete_work_func); 775 INIT_WORK(&gl->gl_delete, delete_work_func);
@@ -999,6 +1011,8 @@ fail:
999 } 1011 }
1000 set_bit(GLF_QUEUED, &gl->gl_flags); 1012 set_bit(GLF_QUEUED, &gl->gl_flags);
1001 trace_gfs2_glock_queue(gh, 1); 1013 trace_gfs2_glock_queue(gh, 1);
1014 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1015 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1002 if (likely(insert_pt == NULL)) { 1016 if (likely(insert_pt == NULL)) {
1003 list_add_tail(&gh->gh_list, &gl->gl_holders); 1017 list_add_tail(&gh->gh_list, &gl->gl_holders);
1004 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1018 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
@@ -1658,6 +1672,8 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1658 *p++ = 'L'; 1672 *p++ = 'L';
1659 if (gl->gl_object) 1673 if (gl->gl_object)
1660 *p++ = 'o'; 1674 *p++ = 'o';
1675 if (test_bit(GLF_BLOCKING, gflags))
1676 *p++ = 'b';
1661 *p = 0; 1677 *p = 0;
1662 return buf; 1678 return buf;
1663} 1679}
@@ -1714,8 +1730,78 @@ out:
1714 return error; 1730 return error;
1715} 1731}
1716 1732
1733static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1734{
1735 struct gfs2_glock *gl = iter_ptr;
1736
1737 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1738 gl->gl_name.ln_type,
1739 (unsigned long long)gl->gl_name.ln_number,
1740 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1741 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1742 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1743 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1744 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1745 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1746 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1747 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1748 return 0;
1749}
1750
1751static const char *gfs2_gltype[] = {
1752 "type",
1753 "reserved",
1754 "nondisk",
1755 "inode",
1756 "rgrp",
1757 "meta",
1758 "iopen",
1759 "flock",
1760 "plock",
1761 "quota",
1762 "journal",
1763};
1764
1765static const char *gfs2_stype[] = {
1766 [GFS2_LKS_SRTT] = "srtt",
1767 [GFS2_LKS_SRTTVAR] = "srttvar",
1768 [GFS2_LKS_SRTTB] = "srttb",
1769 [GFS2_LKS_SRTTVARB] = "srttvarb",
1770 [GFS2_LKS_SIRT] = "sirt",
1771 [GFS2_LKS_SIRTVAR] = "sirtvar",
1772 [GFS2_LKS_DCOUNT] = "dlm",
1773 [GFS2_LKS_QCOUNT] = "queue",
1774};
1775
1776#define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1777
1778static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1779{
1780 struct gfs2_glock_iter *gi = seq->private;
1781 struct gfs2_sbd *sdp = gi->sdp;
1782 unsigned index = gi->hash >> 3;
1783 unsigned subindex = gi->hash & 0x07;
1784 s64 value;
1785 int i;
1786
1787 if (index == 0 && subindex != 0)
1788 return 0;
1717 1789
1790 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1791 (index == 0) ? "cpu": gfs2_stype[subindex]);
1718 1792
1793 for_each_possible_cpu(i) {
1794 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1795 if (index == 0) {
1796 value = i;
1797 } else {
1798 value = lkstats->lkstats[index - 1].stats[subindex];
1799 }
1800 seq_printf(seq, " %15lld", (long long)value);
1801 }
1802 seq_putc(seq, '\n');
1803 return 0;
1804}
1719 1805
1720int __init gfs2_glock_init(void) 1806int __init gfs2_glock_init(void)
1721{ 1807{
@@ -1828,6 +1914,35 @@ static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1828 return dump_glock(seq, iter_ptr); 1914 return dump_glock(seq, iter_ptr);
1829} 1915}
1830 1916
1917static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1918{
1919 struct gfs2_glock_iter *gi = seq->private;
1920
1921 gi->hash = *pos;
1922 if (*pos >= GFS2_NR_SBSTATS)
1923 return NULL;
1924 preempt_disable();
1925 return SEQ_START_TOKEN;
1926}
1927
1928static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1929 loff_t *pos)
1930{
1931 struct gfs2_glock_iter *gi = seq->private;
1932 (*pos)++;
1933 gi->hash++;
1934 if (gi->hash >= GFS2_NR_SBSTATS) {
1935 preempt_enable();
1936 return NULL;
1937 }
1938 return SEQ_START_TOKEN;
1939}
1940
1941static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1942{
1943 preempt_enable();
1944}
1945
1831static const struct seq_operations gfs2_glock_seq_ops = { 1946static const struct seq_operations gfs2_glock_seq_ops = {
1832 .start = gfs2_glock_seq_start, 1947 .start = gfs2_glock_seq_start,
1833 .next = gfs2_glock_seq_next, 1948 .next = gfs2_glock_seq_next,
@@ -1835,7 +1950,21 @@ static const struct seq_operations gfs2_glock_seq_ops = {
1835 .show = gfs2_glock_seq_show, 1950 .show = gfs2_glock_seq_show,
1836}; 1951};
1837 1952
1838static int gfs2_debugfs_open(struct inode *inode, struct file *file) 1953static const struct seq_operations gfs2_glstats_seq_ops = {
1954 .start = gfs2_glock_seq_start,
1955 .next = gfs2_glock_seq_next,
1956 .stop = gfs2_glock_seq_stop,
1957 .show = gfs2_glstats_seq_show,
1958};
1959
1960static const struct seq_operations gfs2_sbstats_seq_ops = {
1961 .start = gfs2_sbstats_seq_start,
1962 .next = gfs2_sbstats_seq_next,
1963 .stop = gfs2_sbstats_seq_stop,
1964 .show = gfs2_sbstats_seq_show,
1965};
1966
1967static int gfs2_glocks_open(struct inode *inode, struct file *file)
1839{ 1968{
1840 int ret = seq_open_private(file, &gfs2_glock_seq_ops, 1969 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1841 sizeof(struct gfs2_glock_iter)); 1970 sizeof(struct gfs2_glock_iter));
@@ -1847,9 +1976,49 @@ static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1847 return ret; 1976 return ret;
1848} 1977}
1849 1978
1850static const struct file_operations gfs2_debug_fops = { 1979static int gfs2_glstats_open(struct inode *inode, struct file *file)
1980{
1981 int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1982 sizeof(struct gfs2_glock_iter));
1983 if (ret == 0) {
1984 struct seq_file *seq = file->private_data;
1985 struct gfs2_glock_iter *gi = seq->private;
1986 gi->sdp = inode->i_private;
1987 }
1988 return ret;
1989}
1990
1991static int gfs2_sbstats_open(struct inode *inode, struct file *file)
1992{
1993 int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
1994 sizeof(struct gfs2_glock_iter));
1995 if (ret == 0) {
1996 struct seq_file *seq = file->private_data;
1997 struct gfs2_glock_iter *gi = seq->private;
1998 gi->sdp = inode->i_private;
1999 }
2000 return ret;
2001}
2002
2003static const struct file_operations gfs2_glocks_fops = {
2004 .owner = THIS_MODULE,
2005 .open = gfs2_glocks_open,
2006 .read = seq_read,
2007 .llseek = seq_lseek,
2008 .release = seq_release_private,
2009};
2010
2011static const struct file_operations gfs2_glstats_fops = {
1851 .owner = THIS_MODULE, 2012 .owner = THIS_MODULE,
1852 .open = gfs2_debugfs_open, 2013 .open = gfs2_glstats_open,
2014 .read = seq_read,
2015 .llseek = seq_lseek,
2016 .release = seq_release_private,
2017};
2018
2019static const struct file_operations gfs2_sbstats_fops = {
2020 .owner = THIS_MODULE,
2021 .open = gfs2_sbstats_open,
1853 .read = seq_read, 2022 .read = seq_read,
1854 .llseek = seq_lseek, 2023 .llseek = seq_lseek,
1855 .release = seq_release_private, 2024 .release = seq_release_private,
@@ -1863,20 +2032,45 @@ int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1863 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks", 2032 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1864 S_IFREG | S_IRUGO, 2033 S_IFREG | S_IRUGO,
1865 sdp->debugfs_dir, sdp, 2034 sdp->debugfs_dir, sdp,
1866 &gfs2_debug_fops); 2035 &gfs2_glocks_fops);
1867 if (!sdp->debugfs_dentry_glocks) 2036 if (!sdp->debugfs_dentry_glocks)
1868 return -ENOMEM; 2037 goto fail;
2038
2039 sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
2040 S_IFREG | S_IRUGO,
2041 sdp->debugfs_dir, sdp,
2042 &gfs2_glstats_fops);
2043 if (!sdp->debugfs_dentry_glstats)
2044 goto fail;
2045
2046 sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
2047 S_IFREG | S_IRUGO,
2048 sdp->debugfs_dir, sdp,
2049 &gfs2_sbstats_fops);
2050 if (!sdp->debugfs_dentry_sbstats)
2051 goto fail;
1869 2052
1870 return 0; 2053 return 0;
2054fail:
2055 gfs2_delete_debugfs_file(sdp);
2056 return -ENOMEM;
1871} 2057}
1872 2058
1873void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2059void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1874{ 2060{
1875 if (sdp && sdp->debugfs_dir) { 2061 if (sdp->debugfs_dir) {
1876 if (sdp->debugfs_dentry_glocks) { 2062 if (sdp->debugfs_dentry_glocks) {
1877 debugfs_remove(sdp->debugfs_dentry_glocks); 2063 debugfs_remove(sdp->debugfs_dentry_glocks);
1878 sdp->debugfs_dentry_glocks = NULL; 2064 sdp->debugfs_dentry_glocks = NULL;
1879 } 2065 }
2066 if (sdp->debugfs_dentry_glstats) {
2067 debugfs_remove(sdp->debugfs_dentry_glstats);
2068 sdp->debugfs_dentry_glstats = NULL;
2069 }
2070 if (sdp->debugfs_dentry_sbstats) {
2071 debugfs_remove(sdp->debugfs_dentry_sbstats);
2072 sdp->debugfs_dentry_sbstats = NULL;
2073 }
1880 debugfs_remove(sdp->debugfs_dir); 2074 debugfs_remove(sdp->debugfs_dir);
1881 sdp->debugfs_dir = NULL; 2075 sdp->debugfs_dir = NULL;
1882 } 2076 }