diff options
author | Mike Snitzer <snitzer@redhat.com> | 2016-03-17 17:10:15 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-05-05 15:25:51 -0400 |
commit | 91e968aa6015d7366281b532dad2e48855b91fe3 (patch) | |
tree | 04dc336330a587a11ae3870d201225f43b3cb012 | |
parent | 518257b13276d07a19e6ae0608b8e5ee73383ce4 (diff) |
dm mpath: use atomic_t for counting members of 'struct multipath'
The use of atomic_t for nr_valid_paths, pg_init_in_progress and
pg_init_count will allow relaxing the use of the m->lock spinlock.
Suggested-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Tested-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-mpath.c | 61 |
1 files changed, 33 insertions, 28 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 598d4a1123e0..780e5d0a066f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -76,9 +76,6 @@ struct multipath { | |||
76 | 76 | ||
77 | wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ | 77 | wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ |
78 | 78 | ||
79 | unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ | ||
80 | |||
81 | unsigned nr_valid_paths; /* Total number of usable paths */ | ||
82 | struct pgpath *current_pgpath; | 79 | struct pgpath *current_pgpath; |
83 | struct priority_group *current_pg; | 80 | struct priority_group *current_pg; |
84 | struct priority_group *next_pg; /* Switch to this PG if set */ | 81 | struct priority_group *next_pg; /* Switch to this PG if set */ |
@@ -86,9 +83,12 @@ struct multipath { | |||
86 | unsigned long flags; /* Multipath state flags */ | 83 | unsigned long flags; /* Multipath state flags */ |
87 | 84 | ||
88 | unsigned pg_init_retries; /* Number of times to retry pg_init */ | 85 | unsigned pg_init_retries; /* Number of times to retry pg_init */ |
89 | unsigned pg_init_count; /* Number of times pg_init called */ | ||
90 | unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ | 86 | unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ |
91 | 87 | ||
88 | atomic_t nr_valid_paths; /* Total number of usable paths */ | ||
89 | atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ | ||
90 | atomic_t pg_init_count; /* Number of times pg_init called */ | ||
91 | |||
92 | struct work_struct trigger_event; | 92 | struct work_struct trigger_event; |
93 | 93 | ||
94 | /* | 94 | /* |
@@ -195,6 +195,9 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq) | |||
195 | INIT_LIST_HEAD(&m->priority_groups); | 195 | INIT_LIST_HEAD(&m->priority_groups); |
196 | spin_lock_init(&m->lock); | 196 | spin_lock_init(&m->lock); |
197 | set_bit(MPATHF_QUEUE_IO, &m->flags); | 197 | set_bit(MPATHF_QUEUE_IO, &m->flags); |
198 | atomic_set(&m->nr_valid_paths, 0); | ||
199 | atomic_set(&m->pg_init_in_progress, 0); | ||
200 | atomic_set(&m->pg_init_count, 0); | ||
198 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | 201 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; |
199 | INIT_WORK(&m->trigger_event, trigger_event); | 202 | INIT_WORK(&m->trigger_event, trigger_event); |
200 | init_waitqueue_head(&m->pg_init_wait); | 203 | init_waitqueue_head(&m->pg_init_wait); |
@@ -279,10 +282,10 @@ static int __pg_init_all_paths(struct multipath *m) | |||
279 | struct pgpath *pgpath; | 282 | struct pgpath *pgpath; |
280 | unsigned long pg_init_delay = 0; | 283 | unsigned long pg_init_delay = 0; |
281 | 284 | ||
282 | if (m->pg_init_in_progress || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) | 285 | if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) |
283 | return 0; | 286 | return 0; |
284 | 287 | ||
285 | m->pg_init_count++; | 288 | atomic_inc(&m->pg_init_count); |
286 | clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); | 289 | clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); |
287 | 290 | ||
288 | /* Check here to reset pg_init_required */ | 291 | /* Check here to reset pg_init_required */ |
@@ -298,9 +301,9 @@ static int __pg_init_all_paths(struct multipath *m) | |||
298 | continue; | 301 | continue; |
299 | if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, | 302 | if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, |
300 | pg_init_delay)) | 303 | pg_init_delay)) |
301 | m->pg_init_in_progress++; | 304 | atomic_inc(&m->pg_init_in_progress); |
302 | } | 305 | } |
303 | return m->pg_init_in_progress; | 306 | return atomic_read(&m->pg_init_in_progress); |
304 | } | 307 | } |
305 | 308 | ||
306 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | 309 | static void __switch_pg(struct multipath *m, struct pgpath *pgpath) |
@@ -316,7 +319,7 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath) | |||
316 | clear_bit(MPATHF_QUEUE_IO, &m->flags); | 319 | clear_bit(MPATHF_QUEUE_IO, &m->flags); |
317 | } | 320 | } |
318 | 321 | ||
319 | m->pg_init_count = 0; | 322 | atomic_set(&m->pg_init_count, 0); |
320 | } | 323 | } |
321 | 324 | ||
322 | static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, | 325 | static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, |
@@ -341,7 +344,7 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
341 | struct priority_group *pg; | 344 | struct priority_group *pg; |
342 | bool bypassed = true; | 345 | bool bypassed = true; |
343 | 346 | ||
344 | if (!m->nr_valid_paths) { | 347 | if (!atomic_read(&m->nr_valid_paths)) { |
345 | clear_bit(MPATHF_QUEUE_IO, &m->flags); | 348 | clear_bit(MPATHF_QUEUE_IO, &m->flags); |
346 | goto failed; | 349 | goto failed; |
347 | } | 350 | } |
@@ -902,6 +905,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
902 | /* parse the priority groups */ | 905 | /* parse the priority groups */ |
903 | while (as.argc) { | 906 | while (as.argc) { |
904 | struct priority_group *pg; | 907 | struct priority_group *pg; |
908 | unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths); | ||
905 | 909 | ||
906 | pg = parse_priority_group(&as, m); | 910 | pg = parse_priority_group(&as, m); |
907 | if (IS_ERR(pg)) { | 911 | if (IS_ERR(pg)) { |
@@ -909,7 +913,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
909 | goto bad; | 913 | goto bad; |
910 | } | 914 | } |
911 | 915 | ||
912 | m->nr_valid_paths += pg->nr_pgpaths; | 916 | nr_valid_paths += pg->nr_pgpaths; |
917 | atomic_set(&m->nr_valid_paths, nr_valid_paths); | ||
918 | |||
913 | list_add_tail(&pg->list, &m->priority_groups); | 919 | list_add_tail(&pg->list, &m->priority_groups); |
914 | pg_count++; | 920 | pg_count++; |
915 | pg->pg_num = pg_count; | 921 | pg->pg_num = pg_count; |
@@ -939,19 +945,14 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, | |||
939 | static void multipath_wait_for_pg_init_completion(struct multipath *m) | 945 | static void multipath_wait_for_pg_init_completion(struct multipath *m) |
940 | { | 946 | { |
941 | DECLARE_WAITQUEUE(wait, current); | 947 | DECLARE_WAITQUEUE(wait, current); |
942 | unsigned long flags; | ||
943 | 948 | ||
944 | add_wait_queue(&m->pg_init_wait, &wait); | 949 | add_wait_queue(&m->pg_init_wait, &wait); |
945 | 950 | ||
946 | while (1) { | 951 | while (1) { |
947 | set_current_state(TASK_UNINTERRUPTIBLE); | 952 | set_current_state(TASK_UNINTERRUPTIBLE); |
948 | 953 | ||
949 | spin_lock_irqsave(&m->lock, flags); | 954 | if (!atomic_read(&m->pg_init_in_progress)) |
950 | if (!m->pg_init_in_progress) { | ||
951 | spin_unlock_irqrestore(&m->lock, flags); | ||
952 | break; | 955 | break; |
953 | } | ||
954 | spin_unlock_irqrestore(&m->lock, flags); | ||
955 | 956 | ||
956 | io_schedule(); | 957 | io_schedule(); |
957 | } | 958 | } |
@@ -1001,13 +1002,13 @@ static int fail_path(struct pgpath *pgpath) | |||
1001 | pgpath->is_active = false; | 1002 | pgpath->is_active = false; |
1002 | pgpath->fail_count++; | 1003 | pgpath->fail_count++; |
1003 | 1004 | ||
1004 | m->nr_valid_paths--; | 1005 | atomic_dec(&m->nr_valid_paths); |
1005 | 1006 | ||
1006 | if (pgpath == m->current_pgpath) | 1007 | if (pgpath == m->current_pgpath) |
1007 | m->current_pgpath = NULL; | 1008 | m->current_pgpath = NULL; |
1008 | 1009 | ||
1009 | dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, | 1010 | dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, |
1010 | pgpath->path.dev->name, m->nr_valid_paths); | 1011 | pgpath->path.dev->name, atomic_read(&m->nr_valid_paths)); |
1011 | 1012 | ||
1012 | schedule_work(&m->trigger_event); | 1013 | schedule_work(&m->trigger_event); |
1013 | 1014 | ||
@@ -1025,6 +1026,7 @@ static int reinstate_path(struct pgpath *pgpath) | |||
1025 | int r = 0, run_queue = 0; | 1026 | int r = 0, run_queue = 0; |
1026 | unsigned long flags; | 1027 | unsigned long flags; |
1027 | struct multipath *m = pgpath->pg->m; | 1028 | struct multipath *m = pgpath->pg->m; |
1029 | unsigned nr_valid_paths; | ||
1028 | 1030 | ||
1029 | spin_lock_irqsave(&m->lock, flags); | 1031 | spin_lock_irqsave(&m->lock, flags); |
1030 | 1032 | ||
@@ -1039,16 +1041,17 @@ static int reinstate_path(struct pgpath *pgpath) | |||
1039 | 1041 | ||
1040 | pgpath->is_active = true; | 1042 | pgpath->is_active = true; |
1041 | 1043 | ||
1042 | if (!m->nr_valid_paths++) { | 1044 | nr_valid_paths = atomic_inc_return(&m->nr_valid_paths); |
1045 | if (nr_valid_paths == 1) { | ||
1043 | m->current_pgpath = NULL; | 1046 | m->current_pgpath = NULL; |
1044 | run_queue = 1; | 1047 | run_queue = 1; |
1045 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { | 1048 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { |
1046 | if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) | 1049 | if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) |
1047 | m->pg_init_in_progress++; | 1050 | atomic_inc(&m->pg_init_in_progress); |
1048 | } | 1051 | } |
1049 | 1052 | ||
1050 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, | 1053 | dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, |
1051 | pgpath->path.dev->name, m->nr_valid_paths); | 1054 | pgpath->path.dev->name, nr_valid_paths); |
1052 | 1055 | ||
1053 | schedule_work(&m->trigger_event); | 1056 | schedule_work(&m->trigger_event); |
1054 | 1057 | ||
@@ -1166,7 +1169,8 @@ static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) | |||
1166 | 1169 | ||
1167 | spin_lock_irqsave(&m->lock, flags); | 1170 | spin_lock_irqsave(&m->lock, flags); |
1168 | 1171 | ||
1169 | if (m->pg_init_count <= m->pg_init_retries && !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) | 1172 | if (atomic_read(&m->pg_init_count) <= m->pg_init_retries && |
1173 | !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) | ||
1170 | set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); | 1174 | set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); |
1171 | else | 1175 | else |
1172 | limit_reached = true; | 1176 | limit_reached = true; |
@@ -1236,7 +1240,7 @@ static void pg_init_done(void *data, int errors) | |||
1236 | } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) | 1240 | } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) |
1237 | pg->bypassed = false; | 1241 | pg->bypassed = false; |
1238 | 1242 | ||
1239 | if (--m->pg_init_in_progress) | 1243 | if (atomic_dec_return(&m->pg_init_in_progress) > 0) |
1240 | /* Activations of other paths are still on going */ | 1244 | /* Activations of other paths are still on going */ |
1241 | goto out; | 1245 | goto out; |
1242 | 1246 | ||
@@ -1317,7 +1321,7 @@ static int do_end_io(struct multipath *m, struct request *clone, | |||
1317 | fail_path(mpio->pgpath); | 1321 | fail_path(mpio->pgpath); |
1318 | 1322 | ||
1319 | spin_lock_irqsave(&m->lock, flags); | 1323 | spin_lock_irqsave(&m->lock, flags); |
1320 | if (!m->nr_valid_paths) { | 1324 | if (!atomic_read(&m->nr_valid_paths)) { |
1321 | if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | 1325 | if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
1322 | if (!__must_push_back(m)) | 1326 | if (!__must_push_back(m)) |
1323 | r = -EIO; | 1327 | r = -EIO; |
@@ -1421,7 +1425,8 @@ static void multipath_status(struct dm_target *ti, status_type_t type, | |||
1421 | 1425 | ||
1422 | /* Features */ | 1426 | /* Features */ |
1423 | if (type == STATUSTYPE_INFO) | 1427 | if (type == STATUSTYPE_INFO) |
1424 | DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), m->pg_init_count); | 1428 | DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), |
1429 | atomic_read(&m->pg_init_count)); | ||
1425 | else { | 1430 | else { |
1426 | DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + | 1431 | DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + |
1427 | (m->pg_init_retries > 0) * 2 + | 1432 | (m->pg_init_retries > 0) * 2 + |
@@ -1675,8 +1680,8 @@ static int multipath_busy(struct dm_target *ti) | |||
1675 | spin_lock_irqsave(&m->lock, flags); | 1680 | spin_lock_irqsave(&m->lock, flags); |
1676 | 1681 | ||
1677 | /* pg_init in progress or no paths available */ | 1682 | /* pg_init in progress or no paths available */ |
1678 | if (m->pg_init_in_progress || | 1683 | if (atomic_read(&m->pg_init_in_progress) || |
1679 | (!m->nr_valid_paths && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { | 1684 | (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { |
1680 | busy = true; | 1685 | busy = true; |
1681 | goto out; | 1686 | goto out; |
1682 | } | 1687 | } |