aboutsummaryrefslogtreecommitdiffstats
path: root/net/ceph
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 13:25:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 13:25:24 -0500
commita1703154200c390ab03c10224c586e815d3e31e8 (patch)
treedf90865eed3cfdf7af8664b5453a90e09d17480a /net/ceph
parent67b5ad9a63caa2ce56ddd2b22b802dae00d72c13 (diff)
parent766fc43973b16f9becb6b7402b3e052dbb84adee (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client: rbd: fix cleanup when trying to mount inexistent image net/ceph: make ceph_msgr_wq non-reentrant ceph: fsc->*_wq's aren't used in memory reclaim path ceph: Always free allocated memory in osdmap_decode() ceph: Makefile: Remove unnessary code ceph: associate requests with opening sessions ceph: drop redundant r_mds field ceph: implement DIRLAYOUTHASH feature to get dir layout from MDS ceph: add dir_layout to inode
Diffstat (limited to 'net/ceph')
-rw-r--r--net/ceph/ceph_hash.c3
-rw-r--r--net/ceph/messenger.c46
-rw-r--r--net/ceph/osdmap.c4
3 files changed, 8 insertions, 45 deletions
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 815ef8826796..0a1b53bce76d 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -1,5 +1,6 @@
1 1
2#include <linux/ceph/types.h> 2#include <linux/ceph/types.h>
3#include <linux/module.h>
3 4
4/* 5/*
5 * Robert Jenkin's hash function. 6 * Robert Jenkin's hash function.
@@ -104,6 +105,7 @@ unsigned ceph_str_hash(int type, const char *s, unsigned len)
104 return -1; 105 return -1;
105 } 106 }
106} 107}
108EXPORT_SYMBOL(ceph_str_hash);
107 109
108const char *ceph_str_hash_name(int type) 110const char *ceph_str_hash_name(int type)
109{ 111{
@@ -116,3 +118,4 @@ const char *ceph_str_hash_name(int type)
116 return "unknown"; 118 return "unknown";
117 } 119 }
118} 120}
121EXPORT_SYMBOL(ceph_str_hash_name);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b6ff4a1519ab..dff633d62e5b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -96,7 +96,7 @@ struct workqueue_struct *ceph_msgr_wq;
96 96
97int ceph_msgr_init(void) 97int ceph_msgr_init(void)
98{ 98{
99 ceph_msgr_wq = create_workqueue("ceph-msgr"); 99 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
100 if (!ceph_msgr_wq) { 100 if (!ceph_msgr_wq) {
101 pr_err("msgr_init failed to create workqueue\n"); 101 pr_err("msgr_init failed to create workqueue\n");
102 return -ENOMEM; 102 return -ENOMEM;
@@ -1920,20 +1920,6 @@ bad_tag:
1920/* 1920/*
1921 * Atomically queue work on a connection. Bump @con reference to 1921 * Atomically queue work on a connection. Bump @con reference to
1922 * avoid races with connection teardown. 1922 * avoid races with connection teardown.
1923 *
1924 * There is some trickery going on with QUEUED and BUSY because we
1925 * only want a _single_ thread operating on each connection at any
1926 * point in time, but we want to use all available CPUs.
1927 *
1928 * The worker thread only proceeds if it can atomically set BUSY. It
1929 * clears QUEUED and does it's thing. When it thinks it's done, it
1930 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1931 * (tries again to set BUSY).
1932 *
1933 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1934 * try to queue work. If that fails (work is already queued, or BUSY)
1935 * we give up (work also already being done or is queued) but leave QUEUED
1936 * set so that the worker thread will loop if necessary.
1937 */ 1923 */
1938static void queue_con(struct ceph_connection *con) 1924static void queue_con(struct ceph_connection *con)
1939{ 1925{
@@ -1948,11 +1934,7 @@ static void queue_con(struct ceph_connection *con)
1948 return; 1934 return;
1949 } 1935 }
1950 1936
1951 set_bit(QUEUED, &con->state); 1937 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
1952 if (test_bit(BUSY, &con->state)) {
1953 dout("queue_con %p - already BUSY\n", con);
1954 con->ops->put(con);
1955 } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
1956 dout("queue_con %p - already queued\n", con); 1938 dout("queue_con %p - already queued\n", con);
1957 con->ops->put(con); 1939 con->ops->put(con);
1958 } else { 1940 } else {
@@ -1967,15 +1949,6 @@ static void con_work(struct work_struct *work)
1967{ 1949{
1968 struct ceph_connection *con = container_of(work, struct ceph_connection, 1950 struct ceph_connection *con = container_of(work, struct ceph_connection,
1969 work.work); 1951 work.work);
1970 int backoff = 0;
1971
1972more:
1973 if (test_and_set_bit(BUSY, &con->state) != 0) {
1974 dout("con_work %p BUSY already set\n", con);
1975 goto out;
1976 }
1977 dout("con_work %p start, clearing QUEUED\n", con);
1978 clear_bit(QUEUED, &con->state);
1979 1952
1980 mutex_lock(&con->mutex); 1953 mutex_lock(&con->mutex);
1981 1954
@@ -1994,28 +1967,13 @@ more:
1994 try_read(con) < 0 || 1967 try_read(con) < 0 ||
1995 try_write(con) < 0) { 1968 try_write(con) < 0) {
1996 mutex_unlock(&con->mutex); 1969 mutex_unlock(&con->mutex);
1997 backoff = 1;
1998 ceph_fault(con); /* error/fault path */ 1970 ceph_fault(con); /* error/fault path */
1999 goto done_unlocked; 1971 goto done_unlocked;
2000 } 1972 }
2001 1973
2002done: 1974done:
2003 mutex_unlock(&con->mutex); 1975 mutex_unlock(&con->mutex);
2004
2005done_unlocked: 1976done_unlocked:
2006 clear_bit(BUSY, &con->state);
2007 dout("con->state=%lu\n", con->state);
2008 if (test_bit(QUEUED, &con->state)) {
2009 if (!backoff || test_bit(OPENING, &con->state)) {
2010 dout("con_work %p QUEUED reset, looping\n", con);
2011 goto more;
2012 }
2013 dout("con_work %p QUEUED reset, but just faulted\n", con);
2014 clear_bit(QUEUED, &con->state);
2015 }
2016 dout("con_work %p done\n", con);
2017
2018out:
2019 con->ops->put(con); 1977 con->ops->put(con);
2020} 1978}
2021 1979
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d73f3f6efa36..71603ac3dff5 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -605,8 +605,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
605 goto bad; 605 goto bad;
606 } 606 }
607 err = __decode_pool(p, end, pi); 607 err = __decode_pool(p, end, pi);
608 if (err < 0) 608 if (err < 0) {
609 kfree(pi);
609 goto bad; 610 goto bad;
611 }
610 __insert_pg_pool(&map->pg_pools, pi); 612 __insert_pg_pool(&map->pg_pools, pi);
611 } 613 }
612 614