aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/xlist.h
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-06-11 14:17:59 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:15:28 -0400
commit6fa70da6081bbcf948801fd5ee0be4d222298a43 (patch)
tree519fef6c63abe15d80173ad25c7cd5aae673df55 /net/rds/xlist.h
parent0f4b1c7e89e699f588807a914ec6e6396c851a72 (diff)
rds: recycle FMRs through lockless lists
FRM allocation and recycling is performance critical and fairly lock intensive. The current code has a per connection lock that all processes bang on and it becomes a major bottleneck on large systems. This changes things to use a number of cmpxchg based lists instead, allowing us to go through the whole FMR lifecycle without locking inside RDS. Zach Brown pointed out that our usage of cmpxchg for xlist removal is racey if someone manages to remove and add back an FMR struct into the list while another CPU can see the FMR's address at the head of the list. The second CPU might assume the list hasn't changed when in fact any number of operations might have happened in between the deletion and reinsertion. This commit maintains a per cpu count of CPUs that are currently in xlist removal, and establishes a grace period to make sure that nobody can see an entry we have just removed from the list. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'net/rds/xlist.h')
-rw-r--r--net/rds/xlist.h110
1 files changed, 110 insertions, 0 deletions
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
new file mode 100644
index 000000000000..8c21aca49d50
--- /dev/null
+++ b/net/rds/xlist.h
@@ -0,0 +1,110 @@
1#ifndef _LINUX_XLIST_H
2#define _LINUX_XLIST_H
3
4#include <linux/stddef.h>
5#include <linux/poison.h>
6#include <linux/prefetch.h>
7#include <asm/system.h>
8
9struct xlist_head {
10 struct xlist_head *next;
11};
12
13/*
14 * XLIST_PTR_TAIL can be used to prevent double insertion. See
15 * xlist_protect()
16 */
17#define XLIST_PTR_TAIL ((struct xlist_head *)0x1)
18
19static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail, struct xlist_head *head)
20{
21 struct xlist_head *cur;
22 struct xlist_head *check;
23
24 while (1) {
25 cur = head->next;
26 tail->next = cur;
27 check = cmpxchg(&head->next, cur, new);
28 if (check == cur)
29 break;
30 }
31}
32
33/*
34 * To avoid duplicate insertion by two CPUs of the same xlist item
35 * you can call xlist_protect. It will stuff XLIST_PTR_TAIL
36 * into the entry->next pointer with xchg, and only return 1
37 * if there was a NULL there before.
38 *
39 * if xlist_protect returns zero, someone else is busy working
40 * on this entry. Getting a NULL into the entry in a race
41 * free manner is the caller's job.
42 */
43static inline int xlist_protect(struct xlist_head *entry)
44{
45 struct xlist_head *val;
46
47 val = xchg(&entry->next, XLIST_PTR_TAIL);
48 if (val == NULL)
49 return 1;
50 return 0;
51}
52
53static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
54{
55 struct xlist_head *cur;
56 struct xlist_head *check;
57 struct xlist_head *next;
58
59 while (1) {
60 cur = head->next;
61 if (!cur)
62 goto out;
63
64 if (cur == XLIST_PTR_TAIL) {
65 cur = NULL;
66 goto out;
67 }
68
69 next = cur->next;
70 check = cmpxchg(&head->next, cur, next);
71 if (check == cur)
72 goto out;
73 }
74out:
75 return cur;
76}
77
78static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
79{
80 struct xlist_head *cur;
81
82 cur = head->next;
83 if (!cur || cur == XLIST_PTR_TAIL)
84 return NULL;
85
86 head->next = cur->next;
87 return cur;
88}
89
90static inline void xlist_splice(struct xlist_head *list,
91 struct xlist_head *head)
92{
93 struct xlist_head *cur;
94
95 WARN_ON(head->next);
96 cur = xchg(&list->next, NULL);
97 head->next = cur;
98}
99
100static inline void INIT_XLIST_HEAD(struct xlist_head *list)
101{
102 list->next = NULL;
103}
104
105static inline int xlist_empty(struct xlist_head *head)
106{
107 return head->next == NULL || head->next == XLIST_PTR_TAIL;
108}
109
110#endif