diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 16:05:25 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 16:05:25 -0500 |
commit | 6d6e352c80f22c446d933ca8103e02bac1f09129 (patch) | |
tree | 248a6a7ebc5ea95986da5bccdd6d75b255cf28e4 /drivers/md/raid5.h | |
parent | b4789b8e6be3151a955ade74872822f30e8cd914 (diff) | |
parent | 60aaf933854511630e16be4efe0f96485e132de4 (diff) |
Merge tag 'md/3.13' of git://neil.brown.name/md
Pull md update from Neil Brown:
"Mostly optimisations and obscure bug fixes.
- raid5 gets less lock contention
- raid1 gets less contention between normal-io and resync-io during
resync"
* tag 'md/3.13' of git://neil.brown.name/md:
md/raid5: Use conf->device_lock protect changing of multi-thread resources.
md/raid5: Before freeing old multi-thread worker, it should flush them.
md/raid5: For stripe with R5_ReadNoMerge, we replace REQ_FLUSH with REQ_NOMERGE.
UAPI: include <asm/byteorder.h> in linux/raid/md_p.h
raid1: Rewrite the implementation of iobarrier.
raid1: Add some macros to make code clearly.
raid1: Replace raise_barrier/lower_barrier with freeze_array/unfreeze_array when reconfiguring the array.
raid1: Add a field array_frozen to indicate whether raid in freeze state.
md: Convert use of typedef ctl_table to struct ctl_table
md/raid5: avoid deadlock when raid5 array has unack badblocks during md_stop_writes.
md: use MD_RECOVERY_INTR instead of kthread_should_stop in resync thread.
md: fix some places where mddev_lock return value is not checked.
raid5: Retry R5_ReadNoMerge flag when hit a read error.
raid5: relieve lock contention in get_active_stripe()
raid5: relieve lock contention in get_active_stripe()
wait: add wait_event_cmd()
md/raid5.c: add proper locking to error path of raid5_start_reshape.
md: fix calculation of stacking limits on level change.
raid5: Use slow_path to release stripe when mddev->thread is null
Diffstat (limited to 'drivers/md/raid5.h')
-rw-r--r-- | drivers/md/raid5.h | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index b42e6b462eda..01ad8ae8f578 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -205,6 +205,7 @@ struct stripe_head { | |||
205 | short pd_idx; /* parity disk index */ | 205 | short pd_idx; /* parity disk index */ |
206 | short qd_idx; /* 'Q' disk index for raid6 */ | 206 | short qd_idx; /* 'Q' disk index for raid6 */ |
207 | short ddf_layout;/* use DDF ordering to calculate Q */ | 207 | short ddf_layout;/* use DDF ordering to calculate Q */ |
208 | short hash_lock_index; | ||
208 | unsigned long state; /* state flags */ | 209 | unsigned long state; /* state flags */ |
209 | atomic_t count; /* nr of active thread/requests */ | 210 | atomic_t count; /* nr of active thread/requests */ |
210 | int bm_seq; /* sequence number for bitmap flushes */ | 211 | int bm_seq; /* sequence number for bitmap flushes */ |
@@ -367,9 +368,18 @@ struct disk_info { | |||
367 | struct md_rdev *rdev, *replacement; | 368 | struct md_rdev *rdev, *replacement; |
368 | }; | 369 | }; |
369 | 370 | ||
371 | /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. | ||
372 | * This is because we sometimes take all the spinlocks | ||
373 | * and creating that much locking depth can cause | ||
374 | * problems. | ||
375 | */ | ||
376 | #define NR_STRIPE_HASH_LOCKS 8 | ||
377 | #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1) | ||
378 | |||
370 | struct r5worker { | 379 | struct r5worker { |
371 | struct work_struct work; | 380 | struct work_struct work; |
372 | struct r5worker_group *group; | 381 | struct r5worker_group *group; |
382 | struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; | ||
373 | bool working; | 383 | bool working; |
374 | }; | 384 | }; |
375 | 385 | ||
@@ -382,6 +392,8 @@ struct r5worker_group { | |||
382 | 392 | ||
383 | struct r5conf { | 393 | struct r5conf { |
384 | struct hlist_head *stripe_hashtbl; | 394 | struct hlist_head *stripe_hashtbl; |
395 | /* only protect corresponding hash list and inactive_list */ | ||
396 | spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS]; | ||
385 | struct mddev *mddev; | 397 | struct mddev *mddev; |
386 | int chunk_sectors; | 398 | int chunk_sectors; |
387 | int level, algorithm; | 399 | int level, algorithm; |
@@ -462,7 +474,8 @@ struct r5conf { | |||
462 | * Free stripes pool | 474 | * Free stripes pool |
463 | */ | 475 | */ |
464 | atomic_t active_stripes; | 476 | atomic_t active_stripes; |
465 | struct list_head inactive_list; | 477 | struct list_head inactive_list[NR_STRIPE_HASH_LOCKS]; |
478 | atomic_t empty_inactive_list_nr; | ||
466 | struct llist_head released_stripes; | 479 | struct llist_head released_stripes; |
467 | wait_queue_head_t wait_for_stripe; | 480 | wait_queue_head_t wait_for_stripe; |
468 | wait_queue_head_t wait_for_overlap; | 481 | wait_queue_head_t wait_for_overlap; |
@@ -477,6 +490,7 @@ struct r5conf { | |||
477 | * the new thread here until we fully activate the array. | 490 | * the new thread here until we fully activate the array. |
478 | */ | 491 | */ |
479 | struct md_thread *thread; | 492 | struct md_thread *thread; |
493 | struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; | ||
480 | struct r5worker_group *worker_groups; | 494 | struct r5worker_group *worker_groups; |
481 | int group_cnt; | 495 | int group_cnt; |
482 | int worker_cnt_per_group; | 496 | int worker_cnt_per_group; |