summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2017-02-16 23:57:17 -0500
committerMike Snitzer <snitzer@redhat.com>2017-02-17 00:54:09 -0500
commit37a098e9d10db6e2efc05fe61e3a6ff2e9802c53 (patch)
tree3800aa0a250b90b0325476a7b5ab1680dda5aac7 /drivers/md
parent6085831883c25860264721df15f05bbded45e2a2 (diff)
dm round robin: revert "use percpu 'repeat_count' and 'current_path'"
The sloppy nature of lockless access to percpu pointers (s->current_path) in rr_select_path(), from multiple threads, is causing some paths to used more than others -- which results in less IO performance being observed. Revert these upstream commits to restore truly symmetric round-robin IO submission in DM multipath: b0b477c dm round robin: use percpu 'repeat_count' and 'current_path' 802934b dm round robin: do not use this_cpu_ptr() without having preemption disabled There is no benefit to all this complexity if repeat_count = 1 (which is the recommended default). Cc: stable@vger.kernel.org # 4.6+ Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-round-robin.c67
1 files changed, 14 insertions, 53 deletions
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 6c25213ab38c..bdbb7e6e8212 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -17,8 +17,8 @@
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19#define DM_MSG_PREFIX "multipath round-robin" 19#define DM_MSG_PREFIX "multipath round-robin"
20#define RR_MIN_IO 1000 20#define RR_MIN_IO 1
21#define RR_VERSION "1.1.0" 21#define RR_VERSION "1.2.0"
22 22
23/*----------------------------------------------------------------- 23/*-----------------------------------------------------------------
24 * Path-handling code, paths are held in lists 24 * Path-handling code, paths are held in lists
@@ -47,44 +47,19 @@ struct selector {
47 struct list_head valid_paths; 47 struct list_head valid_paths;
48 struct list_head invalid_paths; 48 struct list_head invalid_paths;
49 spinlock_t lock; 49 spinlock_t lock;
50 struct dm_path * __percpu *current_path;
51 struct percpu_counter repeat_count;
52}; 50};
53 51
54static void set_percpu_current_path(struct selector *s, struct dm_path *path)
55{
56 int cpu;
57
58 for_each_possible_cpu(cpu)
59 *per_cpu_ptr(s->current_path, cpu) = path;
60}
61
62static struct selector *alloc_selector(void) 52static struct selector *alloc_selector(void)
63{ 53{
64 struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); 54 struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
65 55
66 if (!s) 56 if (s) {
67 return NULL; 57 INIT_LIST_HEAD(&s->valid_paths);
68 58 INIT_LIST_HEAD(&s->invalid_paths);
69 INIT_LIST_HEAD(&s->valid_paths); 59 spin_lock_init(&s->lock);
70 INIT_LIST_HEAD(&s->invalid_paths); 60 }
71 spin_lock_init(&s->lock);
72
73 s->current_path = alloc_percpu(struct dm_path *);
74 if (!s->current_path)
75 goto out_current_path;
76 set_percpu_current_path(s, NULL);
77
78 if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL))
79 goto out_repeat_count;
80 61
81 return s; 62 return s;
82
83out_repeat_count:
84 free_percpu(s->current_path);
85out_current_path:
86 kfree(s);
87 return NULL;;
88} 63}
89 64
90static int rr_create(struct path_selector *ps, unsigned argc, char **argv) 65static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
@@ -105,8 +80,6 @@ static void rr_destroy(struct path_selector *ps)
105 80
106 free_paths(&s->valid_paths); 81 free_paths(&s->valid_paths);
107 free_paths(&s->invalid_paths); 82 free_paths(&s->invalid_paths);
108 free_percpu(s->current_path);
109 percpu_counter_destroy(&s->repeat_count);
110 kfree(s); 83 kfree(s);
111 ps->context = NULL; 84 ps->context = NULL;
112} 85}
@@ -157,6 +130,11 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
157 return -EINVAL; 130 return -EINVAL;
158 } 131 }
159 132
133 if (repeat_count > 1) {
134 DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
135 repeat_count = 1;
136 }
137
160 /* allocate the path */ 138 /* allocate the path */
161 pi = kmalloc(sizeof(*pi), GFP_KERNEL); 139 pi = kmalloc(sizeof(*pi), GFP_KERNEL);
162 if (!pi) { 140 if (!pi) {
@@ -183,9 +161,6 @@ static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
183 struct path_info *pi = p->pscontext; 161 struct path_info *pi = p->pscontext;
184 162
185 spin_lock_irqsave(&s->lock, flags); 163 spin_lock_irqsave(&s->lock, flags);
186 if (p == *this_cpu_ptr(s->current_path))
187 set_percpu_current_path(s, NULL);
188
189 list_move(&pi->list, &s->invalid_paths); 164 list_move(&pi->list, &s->invalid_paths);
190 spin_unlock_irqrestore(&s->lock, flags); 165 spin_unlock_irqrestore(&s->lock, flags);
191} 166}
@@ -208,29 +183,15 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
208 unsigned long flags; 183 unsigned long flags;
209 struct selector *s = ps->context; 184 struct selector *s = ps->context;
210 struct path_info *pi = NULL; 185 struct path_info *pi = NULL;
211 struct dm_path *current_path = NULL;
212
213 local_irq_save(flags);
214 current_path = *this_cpu_ptr(s->current_path);
215 if (current_path) {
216 percpu_counter_dec(&s->repeat_count);
217 if (percpu_counter_read_positive(&s->repeat_count) > 0) {
218 local_irq_restore(flags);
219 return current_path;
220 }
221 }
222 186
223 spin_lock(&s->lock); 187 spin_lock_irqsave(&s->lock, flags);
224 if (!list_empty(&s->valid_paths)) { 188 if (!list_empty(&s->valid_paths)) {
225 pi = list_entry(s->valid_paths.next, struct path_info, list); 189 pi = list_entry(s->valid_paths.next, struct path_info, list);
226 list_move_tail(&pi->list, &s->valid_paths); 190 list_move_tail(&pi->list, &s->valid_paths);
227 percpu_counter_set(&s->repeat_count, pi->repeat_count);
228 set_percpu_current_path(s, pi->path);
229 current_path = pi->path;
230 } 191 }
231 spin_unlock_irqrestore(&s->lock, flags); 192 spin_unlock_irqrestore(&s->lock, flags);
232 193
233 return current_path; 194 return pi ? pi->path : NULL;
234} 195}
235 196
236static struct path_selector_type rr_ps = { 197static struct path_selector_type rr_ps = {