diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-05 20:26:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-05 20:26:24 -0400 |
commit | 17447717a3266965e257d3eae79d89539ce3ec0a (patch) | |
tree | e54d7cd4a78d264e2cdf9a077ac023b413edd336 /net/sunrpc | |
parent | 22365979ab15f8500254cb90037b0b4c17554739 (diff) | |
parent | a457974f1b9524a6e7d0a0be10df760e7802d32f (diff) |
Merge tag 'nfsd-4.3' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Nothing major, but:
- Add Jeff Layton as an nfsd co-maintainer: no change to existing
practice, just an acknowledgement of the status quo.
- Two patches ("nfsd: ensure that...") for a race overlooked by the
state locking rewrite, causing a crash noticed by multiple users.
- Lots of smaller bugfixes all over from Kinglong Mee.
- From Jeff, some cleanup of server rpc code in preparation for
possible shift of nfsd threads to workqueues"
* tag 'nfsd-4.3' of git://linux-nfs.org/~bfields/linux: (52 commits)
nfsd: deal with DELEGRETURN racing with CB_RECALL
nfsd: return CLID_INUSE for unexpected SETCLIENTID_CONFIRM case
nfsd: ensure that delegation stateid hash references are only put once
nfsd: ensure that the ol stateid hash reference is only put once
net: sunrpc: fix tracepoint Warning: unknown op '->'
nfsd: allow more than one laundry job to run at a time
nfsd: don't WARN/backtrace for invalid container deployment.
fs: fix fs/locks.c kernel-doc warning
nfsd: Add Jeff Layton as co-maintainer
NFSD: Return word2 bitmask if setting security label in OPEN/CREATE
NFSD: Set the attributes used to store the verifier for EXCLUSIVE4_1
nfsd: SUPPATTR_EXCLCREAT must be encoded before SECURITY_LABEL.
nfsd: Fix an FS_LAYOUT_TYPES/LAYOUT_TYPES encode bug
NFSD: Store parent's stat in a separate value
nfsd: Fix two typos in comments
lockd: NLM grace period shouldn't block NFSv4 opens
nfsd: include linux/nfs4.h in export.h
sunrpc: Switch to using hash list instead single list
sunrpc/nfsd: Remove redundant code by exports seq_operations functions
sunrpc: Store cache_detail in seq_file's private directly
...
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/cache.c | 103 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 113 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 10 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 83 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 37 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 1 |
6 files changed, 197 insertions, 150 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 2928afffbb81..4a2340a54401 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -44,7 +44,7 @@ static void cache_revisit_request(struct cache_head *item); | |||
44 | static void cache_init(struct cache_head *h) | 44 | static void cache_init(struct cache_head *h) |
45 | { | 45 | { |
46 | time_t now = seconds_since_boot(); | 46 | time_t now = seconds_since_boot(); |
47 | h->next = NULL; | 47 | INIT_HLIST_NODE(&h->cache_list); |
48 | h->flags = 0; | 48 | h->flags = 0; |
49 | kref_init(&h->ref); | 49 | kref_init(&h->ref); |
50 | h->expiry_time = now + CACHE_NEW_EXPIRY; | 50 | h->expiry_time = now + CACHE_NEW_EXPIRY; |
@@ -54,15 +54,14 @@ static void cache_init(struct cache_head *h) | |||
54 | struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | 54 | struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, |
55 | struct cache_head *key, int hash) | 55 | struct cache_head *key, int hash) |
56 | { | 56 | { |
57 | struct cache_head **head, **hp; | 57 | struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL; |
58 | struct cache_head *new = NULL, *freeme = NULL; | 58 | struct hlist_head *head; |
59 | 59 | ||
60 | head = &detail->hash_table[hash]; | 60 | head = &detail->hash_table[hash]; |
61 | 61 | ||
62 | read_lock(&detail->hash_lock); | 62 | read_lock(&detail->hash_lock); |
63 | 63 | ||
64 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { | 64 | hlist_for_each_entry(tmp, head, cache_list) { |
65 | struct cache_head *tmp = *hp; | ||
66 | if (detail->match(tmp, key)) { | 65 | if (detail->match(tmp, key)) { |
67 | if (cache_is_expired(detail, tmp)) | 66 | if (cache_is_expired(detail, tmp)) |
68 | /* This entry is expired, we will discard it. */ | 67 | /* This entry is expired, we will discard it. */ |
@@ -88,12 +87,10 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
88 | write_lock(&detail->hash_lock); | 87 | write_lock(&detail->hash_lock); |
89 | 88 | ||
90 | /* check if entry appeared while we slept */ | 89 | /* check if entry appeared while we slept */ |
91 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { | 90 | hlist_for_each_entry(tmp, head, cache_list) { |
92 | struct cache_head *tmp = *hp; | ||
93 | if (detail->match(tmp, key)) { | 91 | if (detail->match(tmp, key)) { |
94 | if (cache_is_expired(detail, tmp)) { | 92 | if (cache_is_expired(detail, tmp)) { |
95 | *hp = tmp->next; | 93 | hlist_del_init(&tmp->cache_list); |
96 | tmp->next = NULL; | ||
97 | detail->entries --; | 94 | detail->entries --; |
98 | freeme = tmp; | 95 | freeme = tmp; |
99 | break; | 96 | break; |
@@ -104,8 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
104 | return tmp; | 101 | return tmp; |
105 | } | 102 | } |
106 | } | 103 | } |
107 | new->next = *head; | 104 | |
108 | *head = new; | 105 | hlist_add_head(&new->cache_list, head); |
109 | detail->entries++; | 106 | detail->entries++; |
110 | cache_get(new); | 107 | cache_get(new); |
111 | write_unlock(&detail->hash_lock); | 108 | write_unlock(&detail->hash_lock); |
@@ -143,7 +140,6 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
143 | * If 'old' is not VALID, we update it directly, | 140 | * If 'old' is not VALID, we update it directly, |
144 | * otherwise we need to replace it | 141 | * otherwise we need to replace it |
145 | */ | 142 | */ |
146 | struct cache_head **head; | ||
147 | struct cache_head *tmp; | 143 | struct cache_head *tmp; |
148 | 144 | ||
149 | if (!test_bit(CACHE_VALID, &old->flags)) { | 145 | if (!test_bit(CACHE_VALID, &old->flags)) { |
@@ -168,15 +164,13 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, | |||
168 | } | 164 | } |
169 | cache_init(tmp); | 165 | cache_init(tmp); |
170 | detail->init(tmp, old); | 166 | detail->init(tmp, old); |
171 | head = &detail->hash_table[hash]; | ||
172 | 167 | ||
173 | write_lock(&detail->hash_lock); | 168 | write_lock(&detail->hash_lock); |
174 | if (test_bit(CACHE_NEGATIVE, &new->flags)) | 169 | if (test_bit(CACHE_NEGATIVE, &new->flags)) |
175 | set_bit(CACHE_NEGATIVE, &tmp->flags); | 170 | set_bit(CACHE_NEGATIVE, &tmp->flags); |
176 | else | 171 | else |
177 | detail->update(tmp, new); | 172 | detail->update(tmp, new); |
178 | tmp->next = *head; | 173 | hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); |
179 | *head = tmp; | ||
180 | detail->entries++; | 174 | detail->entries++; |
181 | cache_get(tmp); | 175 | cache_get(tmp); |
182 | cache_fresh_locked(tmp, new->expiry_time); | 176 | cache_fresh_locked(tmp, new->expiry_time); |
@@ -416,28 +410,29 @@ static int cache_clean(void) | |||
416 | /* find a non-empty bucket in the table */ | 410 | /* find a non-empty bucket in the table */ |
417 | while (current_detail && | 411 | while (current_detail && |
418 | current_index < current_detail->hash_size && | 412 | current_index < current_detail->hash_size && |
419 | current_detail->hash_table[current_index] == NULL) | 413 | hlist_empty(¤t_detail->hash_table[current_index])) |
420 | current_index++; | 414 | current_index++; |
421 | 415 | ||
422 | /* find a cleanable entry in the bucket and clean it, or set to next bucket */ | 416 | /* find a cleanable entry in the bucket and clean it, or set to next bucket */ |
423 | 417 | ||
424 | if (current_detail && current_index < current_detail->hash_size) { | 418 | if (current_detail && current_index < current_detail->hash_size) { |
425 | struct cache_head *ch, **cp; | 419 | struct cache_head *ch = NULL; |
426 | struct cache_detail *d; | 420 | struct cache_detail *d; |
421 | struct hlist_head *head; | ||
422 | struct hlist_node *tmp; | ||
427 | 423 | ||
428 | write_lock(¤t_detail->hash_lock); | 424 | write_lock(¤t_detail->hash_lock); |
429 | 425 | ||
430 | /* Ok, now to clean this strand */ | 426 | /* Ok, now to clean this strand */ |
431 | 427 | ||
432 | cp = & current_detail->hash_table[current_index]; | 428 | head = ¤t_detail->hash_table[current_index]; |
433 | for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) { | 429 | hlist_for_each_entry_safe(ch, tmp, head, cache_list) { |
434 | if (current_detail->nextcheck > ch->expiry_time) | 430 | if (current_detail->nextcheck > ch->expiry_time) |
435 | current_detail->nextcheck = ch->expiry_time+1; | 431 | current_detail->nextcheck = ch->expiry_time+1; |
436 | if (!cache_is_expired(current_detail, ch)) | 432 | if (!cache_is_expired(current_detail, ch)) |
437 | continue; | 433 | continue; |
438 | 434 | ||
439 | *cp = ch->next; | 435 | hlist_del_init(&ch->cache_list); |
440 | ch->next = NULL; | ||
441 | current_detail->entries--; | 436 | current_detail->entries--; |
442 | rv = 1; | 437 | rv = 1; |
443 | break; | 438 | break; |
@@ -1270,18 +1265,13 @@ EXPORT_SYMBOL_GPL(qword_get); | |||
1270 | * get a header, then pass each real item in the cache | 1265 | * get a header, then pass each real item in the cache |
1271 | */ | 1266 | */ |
1272 | 1267 | ||
1273 | struct handle { | 1268 | void *cache_seq_start(struct seq_file *m, loff_t *pos) |
1274 | struct cache_detail *cd; | ||
1275 | }; | ||
1276 | |||
1277 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
1278 | __acquires(cd->hash_lock) | 1269 | __acquires(cd->hash_lock) |
1279 | { | 1270 | { |
1280 | loff_t n = *pos; | 1271 | loff_t n = *pos; |
1281 | unsigned int hash, entry; | 1272 | unsigned int hash, entry; |
1282 | struct cache_head *ch; | 1273 | struct cache_head *ch; |
1283 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | 1274 | struct cache_detail *cd = m->private; |
1284 | |||
1285 | 1275 | ||
1286 | read_lock(&cd->hash_lock); | 1276 | read_lock(&cd->hash_lock); |
1287 | if (!n--) | 1277 | if (!n--) |
@@ -1289,7 +1279,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) | |||
1289 | hash = n >> 32; | 1279 | hash = n >> 32; |
1290 | entry = n & ((1LL<<32) - 1); | 1280 | entry = n & ((1LL<<32) - 1); |
1291 | 1281 | ||
1292 | for (ch=cd->hash_table[hash]; ch; ch=ch->next) | 1282 | hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list) |
1293 | if (!entry--) | 1283 | if (!entry--) |
1294 | return ch; | 1284 | return ch; |
1295 | n &= ~((1LL<<32) - 1); | 1285 | n &= ~((1LL<<32) - 1); |
@@ -1297,51 +1287,57 @@ static void *c_start(struct seq_file *m, loff_t *pos) | |||
1297 | hash++; | 1287 | hash++; |
1298 | n += 1LL<<32; | 1288 | n += 1LL<<32; |
1299 | } while(hash < cd->hash_size && | 1289 | } while(hash < cd->hash_size && |
1300 | cd->hash_table[hash]==NULL); | 1290 | hlist_empty(&cd->hash_table[hash])); |
1301 | if (hash >= cd->hash_size) | 1291 | if (hash >= cd->hash_size) |
1302 | return NULL; | 1292 | return NULL; |
1303 | *pos = n+1; | 1293 | *pos = n+1; |
1304 | return cd->hash_table[hash]; | 1294 | return hlist_entry_safe(cd->hash_table[hash].first, |
1295 | struct cache_head, cache_list); | ||
1305 | } | 1296 | } |
1297 | EXPORT_SYMBOL_GPL(cache_seq_start); | ||
1306 | 1298 | ||
1307 | static void *c_next(struct seq_file *m, void *p, loff_t *pos) | 1299 | void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos) |
1308 | { | 1300 | { |
1309 | struct cache_head *ch = p; | 1301 | struct cache_head *ch = p; |
1310 | int hash = (*pos >> 32); | 1302 | int hash = (*pos >> 32); |
1311 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | 1303 | struct cache_detail *cd = m->private; |
1312 | 1304 | ||
1313 | if (p == SEQ_START_TOKEN) | 1305 | if (p == SEQ_START_TOKEN) |
1314 | hash = 0; | 1306 | hash = 0; |
1315 | else if (ch->next == NULL) { | 1307 | else if (ch->cache_list.next == NULL) { |
1316 | hash++; | 1308 | hash++; |
1317 | *pos += 1LL<<32; | 1309 | *pos += 1LL<<32; |
1318 | } else { | 1310 | } else { |
1319 | ++*pos; | 1311 | ++*pos; |
1320 | return ch->next; | 1312 | return hlist_entry_safe(ch->cache_list.next, |
1313 | struct cache_head, cache_list); | ||
1321 | } | 1314 | } |
1322 | *pos &= ~((1LL<<32) - 1); | 1315 | *pos &= ~((1LL<<32) - 1); |
1323 | while (hash < cd->hash_size && | 1316 | while (hash < cd->hash_size && |
1324 | cd->hash_table[hash] == NULL) { | 1317 | hlist_empty(&cd->hash_table[hash])) { |
1325 | hash++; | 1318 | hash++; |
1326 | *pos += 1LL<<32; | 1319 | *pos += 1LL<<32; |
1327 | } | 1320 | } |
1328 | if (hash >= cd->hash_size) | 1321 | if (hash >= cd->hash_size) |
1329 | return NULL; | 1322 | return NULL; |
1330 | ++*pos; | 1323 | ++*pos; |
1331 | return cd->hash_table[hash]; | 1324 | return hlist_entry_safe(cd->hash_table[hash].first, |
1325 | struct cache_head, cache_list); | ||
1332 | } | 1326 | } |
1327 | EXPORT_SYMBOL_GPL(cache_seq_next); | ||
1333 | 1328 | ||
1334 | static void c_stop(struct seq_file *m, void *p) | 1329 | void cache_seq_stop(struct seq_file *m, void *p) |
1335 | __releases(cd->hash_lock) | 1330 | __releases(cd->hash_lock) |
1336 | { | 1331 | { |
1337 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | 1332 | struct cache_detail *cd = m->private; |
1338 | read_unlock(&cd->hash_lock); | 1333 | read_unlock(&cd->hash_lock); |
1339 | } | 1334 | } |
1335 | EXPORT_SYMBOL_GPL(cache_seq_stop); | ||
1340 | 1336 | ||
1341 | static int c_show(struct seq_file *m, void *p) | 1337 | static int c_show(struct seq_file *m, void *p) |
1342 | { | 1338 | { |
1343 | struct cache_head *cp = p; | 1339 | struct cache_head *cp = p; |
1344 | struct cache_detail *cd = ((struct handle*)m->private)->cd; | 1340 | struct cache_detail *cd = m->private; |
1345 | 1341 | ||
1346 | if (p == SEQ_START_TOKEN) | 1342 | if (p == SEQ_START_TOKEN) |
1347 | return cd->cache_show(m, cd, NULL); | 1343 | return cd->cache_show(m, cd, NULL); |
@@ -1364,33 +1360,36 @@ static int c_show(struct seq_file *m, void *p) | |||
1364 | } | 1360 | } |
1365 | 1361 | ||
1366 | static const struct seq_operations cache_content_op = { | 1362 | static const struct seq_operations cache_content_op = { |
1367 | .start = c_start, | 1363 | .start = cache_seq_start, |
1368 | .next = c_next, | 1364 | .next = cache_seq_next, |
1369 | .stop = c_stop, | 1365 | .stop = cache_seq_stop, |
1370 | .show = c_show, | 1366 | .show = c_show, |
1371 | }; | 1367 | }; |
1372 | 1368 | ||
1373 | static int content_open(struct inode *inode, struct file *file, | 1369 | static int content_open(struct inode *inode, struct file *file, |
1374 | struct cache_detail *cd) | 1370 | struct cache_detail *cd) |
1375 | { | 1371 | { |
1376 | struct handle *han; | 1372 | struct seq_file *seq; |
1373 | int err; | ||
1377 | 1374 | ||
1378 | if (!cd || !try_module_get(cd->owner)) | 1375 | if (!cd || !try_module_get(cd->owner)) |
1379 | return -EACCES; | 1376 | return -EACCES; |
1380 | han = __seq_open_private(file, &cache_content_op, sizeof(*han)); | 1377 | |
1381 | if (han == NULL) { | 1378 | err = seq_open(file, &cache_content_op); |
1379 | if (err) { | ||
1382 | module_put(cd->owner); | 1380 | module_put(cd->owner); |
1383 | return -ENOMEM; | 1381 | return err; |
1384 | } | 1382 | } |
1385 | 1383 | ||
1386 | han->cd = cd; | 1384 | seq = file->private_data; |
1385 | seq->private = cd; | ||
1387 | return 0; | 1386 | return 0; |
1388 | } | 1387 | } |
1389 | 1388 | ||
1390 | static int content_release(struct inode *inode, struct file *file, | 1389 | static int content_release(struct inode *inode, struct file *file, |
1391 | struct cache_detail *cd) | 1390 | struct cache_detail *cd) |
1392 | { | 1391 | { |
1393 | int ret = seq_release_private(inode, file); | 1392 | int ret = seq_release(inode, file); |
1394 | module_put(cd->owner); | 1393 | module_put(cd->owner); |
1395 | return ret; | 1394 | return ret; |
1396 | } | 1395 | } |
@@ -1665,17 +1664,21 @@ EXPORT_SYMBOL_GPL(cache_unregister_net); | |||
1665 | struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net) | 1664 | struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net) |
1666 | { | 1665 | { |
1667 | struct cache_detail *cd; | 1666 | struct cache_detail *cd; |
1667 | int i; | ||
1668 | 1668 | ||
1669 | cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); | 1669 | cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); |
1670 | if (cd == NULL) | 1670 | if (cd == NULL) |
1671 | return ERR_PTR(-ENOMEM); | 1671 | return ERR_PTR(-ENOMEM); |
1672 | 1672 | ||
1673 | cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *), | 1673 | cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head), |
1674 | GFP_KERNEL); | 1674 | GFP_KERNEL); |
1675 | if (cd->hash_table == NULL) { | 1675 | if (cd->hash_table == NULL) { |
1676 | kfree(cd); | 1676 | kfree(cd); |
1677 | return ERR_PTR(-ENOMEM); | 1677 | return ERR_PTR(-ENOMEM); |
1678 | } | 1678 | } |
1679 | |||
1680 | for (i = 0; i < cd->hash_size; i++) | ||
1681 | INIT_HLIST_HEAD(&cd->hash_table[i]); | ||
1679 | cd->net = net; | 1682 | cd->net = net; |
1680 | return cd; | 1683 | return cd; |
1681 | } | 1684 | } |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 5a16d8d8c831..a8f579df14d8 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -34,36 +34,19 @@ | |||
34 | 34 | ||
35 | static void svc_unregister(const struct svc_serv *serv, struct net *net); | 35 | static void svc_unregister(const struct svc_serv *serv, struct net *net); |
36 | 36 | ||
37 | #define svc_serv_is_pooled(serv) ((serv)->sv_function) | 37 | #define svc_serv_is_pooled(serv) ((serv)->sv_ops->svo_function) |
38 | 38 | ||
39 | /* | ||
40 | * Mode for mapping cpus to pools. | ||
41 | */ | ||
42 | enum { | ||
43 | SVC_POOL_AUTO = -1, /* choose one of the others */ | ||
44 | SVC_POOL_GLOBAL, /* no mapping, just a single global pool | ||
45 | * (legacy & UP mode) */ | ||
46 | SVC_POOL_PERCPU, /* one pool per cpu */ | ||
47 | SVC_POOL_PERNODE /* one pool per numa node */ | ||
48 | }; | ||
49 | #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL | 39 | #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL |
50 | 40 | ||
51 | /* | 41 | /* |
52 | * Structure for mapping cpus to pools and vice versa. | 42 | * Structure for mapping cpus to pools and vice versa. |
53 | * Setup once during sunrpc initialisation. | 43 | * Setup once during sunrpc initialisation. |
54 | */ | 44 | */ |
55 | static struct svc_pool_map { | 45 | struct svc_pool_map svc_pool_map = { |
56 | int count; /* How many svc_servs use us */ | ||
57 | int mode; /* Note: int not enum to avoid | ||
58 | * warnings about "enumeration value | ||
59 | * not handled in switch" */ | ||
60 | unsigned int npools; | ||
61 | unsigned int *pool_to; /* maps pool id to cpu or node */ | ||
62 | unsigned int *to_pool; /* maps cpu or node to pool id */ | ||
63 | } svc_pool_map = { | ||
64 | .count = 0, | ||
65 | .mode = SVC_POOL_DEFAULT | 46 | .mode = SVC_POOL_DEFAULT |
66 | }; | 47 | }; |
48 | EXPORT_SYMBOL_GPL(svc_pool_map); | ||
49 | |||
67 | static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ | 50 | static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ |
68 | 51 | ||
69 | static int | 52 | static int |
@@ -236,7 +219,7 @@ svc_pool_map_init_pernode(struct svc_pool_map *m) | |||
236 | * vice versa). Initialise the map if we're the first user. | 219 | * vice versa). Initialise the map if we're the first user. |
237 | * Returns the number of pools. | 220 | * Returns the number of pools. |
238 | */ | 221 | */ |
239 | static unsigned int | 222 | unsigned int |
240 | svc_pool_map_get(void) | 223 | svc_pool_map_get(void) |
241 | { | 224 | { |
242 | struct svc_pool_map *m = &svc_pool_map; | 225 | struct svc_pool_map *m = &svc_pool_map; |
@@ -271,7 +254,7 @@ svc_pool_map_get(void) | |||
271 | mutex_unlock(&svc_pool_map_mutex); | 254 | mutex_unlock(&svc_pool_map_mutex); |
272 | return m->npools; | 255 | return m->npools; |
273 | } | 256 | } |
274 | 257 | EXPORT_SYMBOL_GPL(svc_pool_map_get); | |
275 | 258 | ||
276 | /* | 259 | /* |
277 | * Drop a reference to the global map of cpus to pools. | 260 | * Drop a reference to the global map of cpus to pools. |
@@ -280,7 +263,7 @@ svc_pool_map_get(void) | |||
280 | * mode using the pool_mode module option without | 263 | * mode using the pool_mode module option without |
281 | * rebooting or re-loading sunrpc.ko. | 264 | * rebooting or re-loading sunrpc.ko. |
282 | */ | 265 | */ |
283 | static void | 266 | void |
284 | svc_pool_map_put(void) | 267 | svc_pool_map_put(void) |
285 | { | 268 | { |
286 | struct svc_pool_map *m = &svc_pool_map; | 269 | struct svc_pool_map *m = &svc_pool_map; |
@@ -297,7 +280,7 @@ svc_pool_map_put(void) | |||
297 | 280 | ||
298 | mutex_unlock(&svc_pool_map_mutex); | 281 | mutex_unlock(&svc_pool_map_mutex); |
299 | } | 282 | } |
300 | 283 | EXPORT_SYMBOL_GPL(svc_pool_map_put); | |
301 | 284 | ||
302 | static int svc_pool_map_get_node(unsigned int pidx) | 285 | static int svc_pool_map_get_node(unsigned int pidx) |
303 | { | 286 | { |
@@ -423,7 +406,7 @@ EXPORT_SYMBOL_GPL(svc_bind); | |||
423 | */ | 406 | */ |
424 | static struct svc_serv * | 407 | static struct svc_serv * |
425 | __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, | 408 | __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, |
426 | void (*shutdown)(struct svc_serv *serv, struct net *net)) | 409 | struct svc_serv_ops *ops) |
427 | { | 410 | { |
428 | struct svc_serv *serv; | 411 | struct svc_serv *serv; |
429 | unsigned int vers; | 412 | unsigned int vers; |
@@ -440,7 +423,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, | |||
440 | bufsize = RPCSVC_MAXPAYLOAD; | 423 | bufsize = RPCSVC_MAXPAYLOAD; |
441 | serv->sv_max_payload = bufsize? bufsize : 4096; | 424 | serv->sv_max_payload = bufsize? bufsize : 4096; |
442 | serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); | 425 | serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); |
443 | serv->sv_shutdown = shutdown; | 426 | serv->sv_ops = ops; |
444 | xdrsize = 0; | 427 | xdrsize = 0; |
445 | while (prog) { | 428 | while (prog) { |
446 | prog->pg_lovers = prog->pg_nvers-1; | 429 | prog->pg_lovers = prog->pg_nvers-1; |
@@ -486,26 +469,22 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, | |||
486 | 469 | ||
487 | struct svc_serv * | 470 | struct svc_serv * |
488 | svc_create(struct svc_program *prog, unsigned int bufsize, | 471 | svc_create(struct svc_program *prog, unsigned int bufsize, |
489 | void (*shutdown)(struct svc_serv *serv, struct net *net)) | 472 | struct svc_serv_ops *ops) |
490 | { | 473 | { |
491 | return __svc_create(prog, bufsize, /*npools*/1, shutdown); | 474 | return __svc_create(prog, bufsize, /*npools*/1, ops); |
492 | } | 475 | } |
493 | EXPORT_SYMBOL_GPL(svc_create); | 476 | EXPORT_SYMBOL_GPL(svc_create); |
494 | 477 | ||
495 | struct svc_serv * | 478 | struct svc_serv * |
496 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | 479 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, |
497 | void (*shutdown)(struct svc_serv *serv, struct net *net), | 480 | struct svc_serv_ops *ops) |
498 | svc_thread_fn func, struct module *mod) | ||
499 | { | 481 | { |
500 | struct svc_serv *serv; | 482 | struct svc_serv *serv; |
501 | unsigned int npools = svc_pool_map_get(); | 483 | unsigned int npools = svc_pool_map_get(); |
502 | 484 | ||
503 | serv = __svc_create(prog, bufsize, npools, shutdown); | 485 | serv = __svc_create(prog, bufsize, npools, ops); |
504 | if (!serv) | 486 | if (!serv) |
505 | goto out_err; | 487 | goto out_err; |
506 | |||
507 | serv->sv_function = func; | ||
508 | serv->sv_module = mod; | ||
509 | return serv; | 488 | return serv; |
510 | out_err: | 489 | out_err: |
511 | svc_pool_map_put(); | 490 | svc_pool_map_put(); |
@@ -517,8 +496,8 @@ void svc_shutdown_net(struct svc_serv *serv, struct net *net) | |||
517 | { | 496 | { |
518 | svc_close_net(serv, net); | 497 | svc_close_net(serv, net); |
519 | 498 | ||
520 | if (serv->sv_shutdown) | 499 | if (serv->sv_ops->svo_shutdown) |
521 | serv->sv_shutdown(serv, net); | 500 | serv->sv_ops->svo_shutdown(serv, net); |
522 | } | 501 | } |
523 | EXPORT_SYMBOL_GPL(svc_shutdown_net); | 502 | EXPORT_SYMBOL_GPL(svc_shutdown_net); |
524 | 503 | ||
@@ -604,40 +583,52 @@ svc_release_buffer(struct svc_rqst *rqstp) | |||
604 | } | 583 | } |
605 | 584 | ||
606 | struct svc_rqst * | 585 | struct svc_rqst * |
607 | svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) | 586 | svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) |
608 | { | 587 | { |
609 | struct svc_rqst *rqstp; | 588 | struct svc_rqst *rqstp; |
610 | 589 | ||
611 | rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); | 590 | rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); |
612 | if (!rqstp) | 591 | if (!rqstp) |
613 | goto out_enomem; | 592 | return rqstp; |
614 | 593 | ||
615 | serv->sv_nrthreads++; | ||
616 | __set_bit(RQ_BUSY, &rqstp->rq_flags); | 594 | __set_bit(RQ_BUSY, &rqstp->rq_flags); |
617 | spin_lock_init(&rqstp->rq_lock); | 595 | spin_lock_init(&rqstp->rq_lock); |
618 | rqstp->rq_server = serv; | 596 | rqstp->rq_server = serv; |
619 | rqstp->rq_pool = pool; | 597 | rqstp->rq_pool = pool; |
620 | spin_lock_bh(&pool->sp_lock); | ||
621 | pool->sp_nrthreads++; | ||
622 | list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); | ||
623 | spin_unlock_bh(&pool->sp_lock); | ||
624 | 598 | ||
625 | rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); | 599 | rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); |
626 | if (!rqstp->rq_argp) | 600 | if (!rqstp->rq_argp) |
627 | goto out_thread; | 601 | goto out_enomem; |
628 | 602 | ||
629 | rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); | 603 | rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); |
630 | if (!rqstp->rq_resp) | 604 | if (!rqstp->rq_resp) |
631 | goto out_thread; | 605 | goto out_enomem; |
632 | 606 | ||
633 | if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) | 607 | if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) |
634 | goto out_thread; | 608 | goto out_enomem; |
635 | 609 | ||
636 | return rqstp; | 610 | return rqstp; |
637 | out_thread: | ||
638 | svc_exit_thread(rqstp); | ||
639 | out_enomem: | 611 | out_enomem: |
640 | return ERR_PTR(-ENOMEM); | 612 | svc_rqst_free(rqstp); |
613 | return NULL; | ||
614 | } | ||
615 | EXPORT_SYMBOL_GPL(svc_rqst_alloc); | ||
616 | |||
617 | struct svc_rqst * | ||
618 | svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) | ||
619 | { | ||
620 | struct svc_rqst *rqstp; | ||
621 | |||
622 | rqstp = svc_rqst_alloc(serv, pool, node); | ||
623 | if (!rqstp) | ||
624 | return ERR_PTR(-ENOMEM); | ||
625 | |||
626 | serv->sv_nrthreads++; | ||
627 | spin_lock_bh(&pool->sp_lock); | ||
628 | pool->sp_nrthreads++; | ||
629 | list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); | ||
630 | spin_unlock_bh(&pool->sp_lock); | ||
631 | return rqstp; | ||
641 | } | 632 | } |
642 | EXPORT_SYMBOL_GPL(svc_prepare_thread); | 633 | EXPORT_SYMBOL_GPL(svc_prepare_thread); |
643 | 634 | ||
@@ -739,12 +730,12 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
739 | break; | 730 | break; |
740 | } | 731 | } |
741 | 732 | ||
742 | __module_get(serv->sv_module); | 733 | __module_get(serv->sv_ops->svo_module); |
743 | task = kthread_create_on_node(serv->sv_function, rqstp, | 734 | task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp, |
744 | node, "%s", serv->sv_name); | 735 | node, "%s", serv->sv_name); |
745 | if (IS_ERR(task)) { | 736 | if (IS_ERR(task)) { |
746 | error = PTR_ERR(task); | 737 | error = PTR_ERR(task); |
747 | module_put(serv->sv_module); | 738 | module_put(serv->sv_ops->svo_module); |
748 | svc_exit_thread(rqstp); | 739 | svc_exit_thread(rqstp); |
749 | break; | 740 | break; |
750 | } | 741 | } |
@@ -772,15 +763,21 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads); | |||
772 | * mutex" for the service. | 763 | * mutex" for the service. |
773 | */ | 764 | */ |
774 | void | 765 | void |
775 | svc_exit_thread(struct svc_rqst *rqstp) | 766 | svc_rqst_free(struct svc_rqst *rqstp) |
776 | { | 767 | { |
777 | struct svc_serv *serv = rqstp->rq_server; | ||
778 | struct svc_pool *pool = rqstp->rq_pool; | ||
779 | |||
780 | svc_release_buffer(rqstp); | 768 | svc_release_buffer(rqstp); |
781 | kfree(rqstp->rq_resp); | 769 | kfree(rqstp->rq_resp); |
782 | kfree(rqstp->rq_argp); | 770 | kfree(rqstp->rq_argp); |
783 | kfree(rqstp->rq_auth_data); | 771 | kfree(rqstp->rq_auth_data); |
772 | kfree_rcu(rqstp, rq_rcu_head); | ||
773 | } | ||
774 | EXPORT_SYMBOL_GPL(svc_rqst_free); | ||
775 | |||
776 | void | ||
777 | svc_exit_thread(struct svc_rqst *rqstp) | ||
778 | { | ||
779 | struct svc_serv *serv = rqstp->rq_server; | ||
780 | struct svc_pool *pool = rqstp->rq_pool; | ||
784 | 781 | ||
785 | spin_lock_bh(&pool->sp_lock); | 782 | spin_lock_bh(&pool->sp_lock); |
786 | pool->sp_nrthreads--; | 783 | pool->sp_nrthreads--; |
@@ -788,7 +785,7 @@ svc_exit_thread(struct svc_rqst *rqstp) | |||
788 | list_del_rcu(&rqstp->rq_all); | 785 | list_del_rcu(&rqstp->rq_all); |
789 | spin_unlock_bh(&pool->sp_lock); | 786 | spin_unlock_bh(&pool->sp_lock); |
790 | 787 | ||
791 | kfree_rcu(rqstp, rq_rcu_head); | 788 | svc_rqst_free(rqstp); |
792 | 789 | ||
793 | /* Release the server */ | 790 | /* Release the server */ |
794 | if (serv) | 791 | if (serv) |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 163ac45c3639..a6cbb2104667 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -24,7 +24,6 @@ static int svc_deferred_recv(struct svc_rqst *rqstp); | |||
24 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | 24 | static struct cache_deferred_req *svc_defer(struct cache_req *req); |
25 | static void svc_age_temp_xprts(unsigned long closure); | 25 | static void svc_age_temp_xprts(unsigned long closure); |
26 | static void svc_delete_xprt(struct svc_xprt *xprt); | 26 | static void svc_delete_xprt(struct svc_xprt *xprt); |
27 | static void svc_xprt_do_enqueue(struct svc_xprt *xprt); | ||
28 | 27 | ||
29 | /* apparently the "standard" is that clients close | 28 | /* apparently the "standard" is that clients close |
30 | * idle connections after 5 minutes, servers after | 29 | * idle connections after 5 minutes, servers after |
@@ -225,12 +224,12 @@ static void svc_xprt_received(struct svc_xprt *xprt) | |||
225 | } | 224 | } |
226 | 225 | ||
227 | /* As soon as we clear busy, the xprt could be closed and | 226 | /* As soon as we clear busy, the xprt could be closed and |
228 | * 'put', so we need a reference to call svc_xprt_do_enqueue with: | 227 | * 'put', so we need a reference to call svc_enqueue_xprt with: |
229 | */ | 228 | */ |
230 | svc_xprt_get(xprt); | 229 | svc_xprt_get(xprt); |
231 | smp_mb__before_atomic(); | 230 | smp_mb__before_atomic(); |
232 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | 231 | clear_bit(XPT_BUSY, &xprt->xpt_flags); |
233 | svc_xprt_do_enqueue(xprt); | 232 | xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); |
234 | svc_xprt_put(xprt); | 233 | svc_xprt_put(xprt); |
235 | } | 234 | } |
236 | 235 | ||
@@ -320,7 +319,7 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) | |||
320 | return false; | 319 | return false; |
321 | } | 320 | } |
322 | 321 | ||
323 | static void svc_xprt_do_enqueue(struct svc_xprt *xprt) | 322 | void svc_xprt_do_enqueue(struct svc_xprt *xprt) |
324 | { | 323 | { |
325 | struct svc_pool *pool; | 324 | struct svc_pool *pool; |
326 | struct svc_rqst *rqstp = NULL; | 325 | struct svc_rqst *rqstp = NULL; |
@@ -402,6 +401,7 @@ redo_search: | |||
402 | out: | 401 | out: |
403 | trace_svc_xprt_do_enqueue(xprt, rqstp); | 402 | trace_svc_xprt_do_enqueue(xprt, rqstp); |
404 | } | 403 | } |
404 | EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); | ||
405 | 405 | ||
406 | /* | 406 | /* |
407 | * Queue up a transport with data pending. If there are idle nfsd | 407 | * Queue up a transport with data pending. If there are idle nfsd |
@@ -412,7 +412,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
412 | { | 412 | { |
413 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) | 413 | if (test_bit(XPT_BUSY, &xprt->xpt_flags)) |
414 | return; | 414 | return; |
415 | svc_xprt_do_enqueue(xprt); | 415 | xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); |
416 | } | 416 | } |
417 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); | 417 | EXPORT_SYMBOL_GPL(svc_xprt_enqueue); |
418 | 418 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index d25cd430f9ff..1dfae8317065 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -136,6 +136,79 @@ static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt, | |||
136 | return dma_addr; | 136 | return dma_addr; |
137 | } | 137 | } |
138 | 138 | ||
139 | /* Returns the address of the first read chunk or <nul> if no read chunk | ||
140 | * is present | ||
141 | */ | ||
142 | struct rpcrdma_read_chunk * | ||
143 | svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp) | ||
144 | { | ||
145 | struct rpcrdma_read_chunk *ch = | ||
146 | (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; | ||
147 | |||
148 | if (ch->rc_discrim == xdr_zero) | ||
149 | return NULL; | ||
150 | return ch; | ||
151 | } | ||
152 | |||
153 | /* Returns the address of the first read write array element or <nul> | ||
154 | * if no write array list is present | ||
155 | */ | ||
156 | static struct rpcrdma_write_array * | ||
157 | svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp) | ||
158 | { | ||
159 | if (rmsgp->rm_body.rm_chunks[0] != xdr_zero || | ||
160 | rmsgp->rm_body.rm_chunks[1] == xdr_zero) | ||
161 | return NULL; | ||
162 | return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1]; | ||
163 | } | ||
164 | |||
165 | /* Returns the address of the first reply array element or <nul> if no | ||
166 | * reply array is present | ||
167 | */ | ||
168 | static struct rpcrdma_write_array * | ||
169 | svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp) | ||
170 | { | ||
171 | struct rpcrdma_read_chunk *rch; | ||
172 | struct rpcrdma_write_array *wr_ary; | ||
173 | struct rpcrdma_write_array *rp_ary; | ||
174 | |||
175 | /* XXX: Need to fix when reply chunk may occur with read list | ||
176 | * and/or write list. | ||
177 | */ | ||
178 | if (rmsgp->rm_body.rm_chunks[0] != xdr_zero || | ||
179 | rmsgp->rm_body.rm_chunks[1] != xdr_zero) | ||
180 | return NULL; | ||
181 | |||
182 | rch = svc_rdma_get_read_chunk(rmsgp); | ||
183 | if (rch) { | ||
184 | while (rch->rc_discrim != xdr_zero) | ||
185 | rch++; | ||
186 | |||
187 | /* The reply chunk follows an empty write array located | ||
188 | * at 'rc_position' here. The reply array is at rc_target. | ||
189 | */ | ||
190 | rp_ary = (struct rpcrdma_write_array *)&rch->rc_target; | ||
191 | goto found_it; | ||
192 | } | ||
193 | |||
194 | wr_ary = svc_rdma_get_write_array(rmsgp); | ||
195 | if (wr_ary) { | ||
196 | int chunk = be32_to_cpu(wr_ary->wc_nchunks); | ||
197 | |||
198 | rp_ary = (struct rpcrdma_write_array *) | ||
199 | &wr_ary->wc_array[chunk].wc_target.rs_length; | ||
200 | goto found_it; | ||
201 | } | ||
202 | |||
203 | /* No read list, no write list */ | ||
204 | rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2]; | ||
205 | |||
206 | found_it: | ||
207 | if (rp_ary->wc_discrim == xdr_zero) | ||
208 | return NULL; | ||
209 | return rp_ary; | ||
210 | } | ||
211 | |||
139 | /* Assumptions: | 212 | /* Assumptions: |
140 | * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE | 213 | * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE |
141 | */ | 214 | */ |
@@ -384,6 +457,7 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
384 | int byte_count) | 457 | int byte_count) |
385 | { | 458 | { |
386 | struct ib_send_wr send_wr; | 459 | struct ib_send_wr send_wr; |
460 | u32 xdr_off; | ||
387 | int sge_no; | 461 | int sge_no; |
388 | int sge_bytes; | 462 | int sge_bytes; |
389 | int page_no; | 463 | int page_no; |
@@ -418,8 +492,8 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
418 | ctxt->direction = DMA_TO_DEVICE; | 492 | ctxt->direction = DMA_TO_DEVICE; |
419 | 493 | ||
420 | /* Map the payload indicated by 'byte_count' */ | 494 | /* Map the payload indicated by 'byte_count' */ |
495 | xdr_off = 0; | ||
421 | for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { | 496 | for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { |
422 | int xdr_off = 0; | ||
423 | sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); | 497 | sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); |
424 | byte_count -= sge_bytes; | 498 | byte_count -= sge_bytes; |
425 | ctxt->sge[sge_no].addr = | 499 | ctxt->sge[sge_no].addr = |
@@ -457,6 +531,13 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
457 | } | 531 | } |
458 | rqstp->rq_next_page = rqstp->rq_respages + 1; | 532 | rqstp->rq_next_page = rqstp->rq_respages + 1; |
459 | 533 | ||
534 | /* The loop above bumps sc_dma_used for each sge. The | ||
535 | * xdr_buf.tail gets a separate sge, but resides in the | ||
536 | * same page as xdr_buf.head. Don't count it twice. | ||
537 | */ | ||
538 | if (sge_no > ctxt->count) | ||
539 | atomic_dec(&rdma->sc_dma_used); | ||
540 | |||
460 | if (sge_no > rdma->sc_max_sge) { | 541 | if (sge_no > rdma->sc_max_sge) { |
461 | pr_err("svcrdma: Too many sges (%d)\n", sge_no); | 542 | pr_err("svcrdma: Too many sges (%d)\n", sge_no); |
462 | goto err; | 543 | goto err; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 6b36279e4288..21e40365042c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -91,7 +91,7 @@ struct svc_xprt_class svc_rdma_class = { | |||
91 | .xcl_name = "rdma", | 91 | .xcl_name = "rdma", |
92 | .xcl_owner = THIS_MODULE, | 92 | .xcl_owner = THIS_MODULE, |
93 | .xcl_ops = &svc_rdma_ops, | 93 | .xcl_ops = &svc_rdma_ops, |
94 | .xcl_max_payload = RPCRDMA_MAXPAYLOAD, | 94 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA, |
95 | .xcl_ident = XPRT_TRANSPORT_RDMA, | 95 | .xcl_ident = XPRT_TRANSPORT_RDMA, |
96 | }; | 96 | }; |
97 | 97 | ||
@@ -659,6 +659,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id, | |||
659 | if (xprt) { | 659 | if (xprt) { |
660 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 660 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
661 | svc_xprt_enqueue(xprt); | 661 | svc_xprt_enqueue(xprt); |
662 | svc_xprt_put(xprt); | ||
662 | } | 663 | } |
663 | break; | 664 | break; |
664 | default: | 665 | default: |
@@ -1201,40 +1202,6 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp) | |||
1201 | return 1; | 1202 | return 1; |
1202 | } | 1203 | } |
1203 | 1204 | ||
1204 | /* | ||
1205 | * Attempt to register the kvec representing the RPC memory with the | ||
1206 | * device. | ||
1207 | * | ||
1208 | * Returns: | ||
1209 | * NULL : The device does not support fastreg or there were no more | ||
1210 | * fastreg mr. | ||
1211 | * frmr : The kvec register request was successfully posted. | ||
1212 | * <0 : An error was encountered attempting to register the kvec. | ||
1213 | */ | ||
1214 | int svc_rdma_fastreg(struct svcxprt_rdma *xprt, | ||
1215 | struct svc_rdma_fastreg_mr *frmr) | ||
1216 | { | ||
1217 | struct ib_send_wr fastreg_wr; | ||
1218 | u8 key; | ||
1219 | |||
1220 | /* Bump the key */ | ||
1221 | key = (u8)(frmr->mr->lkey & 0x000000FF); | ||
1222 | ib_update_fast_reg_key(frmr->mr, ++key); | ||
1223 | |||
1224 | /* Prepare FASTREG WR */ | ||
1225 | memset(&fastreg_wr, 0, sizeof fastreg_wr); | ||
1226 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; | ||
1227 | fastreg_wr.send_flags = IB_SEND_SIGNALED; | ||
1228 | fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva; | ||
1229 | fastreg_wr.wr.fast_reg.page_list = frmr->page_list; | ||
1230 | fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len; | ||
1231 | fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | ||
1232 | fastreg_wr.wr.fast_reg.length = frmr->map_len; | ||
1233 | fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags; | ||
1234 | fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey; | ||
1235 | return svc_rdma_send(xprt, &fastreg_wr); | ||
1236 | } | ||
1237 | |||
1238 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) | 1205 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) |
1239 | { | 1206 | { |
1240 | struct ib_send_wr *bad_wr, *n_wr; | 1207 | struct ib_send_wr *bad_wr, *n_wr; |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index f49dd8b38122..e718d0959af3 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -51,7 +51,6 @@ | |||
51 | #include <linux/sunrpc/clnt.h> /* rpc_xprt */ | 51 | #include <linux/sunrpc/clnt.h> /* rpc_xprt */ |
52 | #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ | 52 | #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ |
53 | #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ | 53 | #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ |
54 | #include <linux/sunrpc/svc.h> /* RPCSVC_MAXPAYLOAD */ | ||
55 | 54 | ||
56 | #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ | 55 | #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ |
57 | #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ | 56 | #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ |