summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2018-05-22 18:26:20 -0400
committerMike Snitzer <snitzer@redhat.com>2018-06-08 11:53:14 -0400
commit72d711c8768805b5f8cf2d23c575dfd188993e12 (patch)
tree1a442b3c9c5d7eb4fb61c2bdd10c19386ae19d28
parentb2b04e7e2d3bffd301d1769700ba013f58ca01b7 (diff)
dm: adjust structure members to improve alignment
Eliminate most holes in DM data structures that were modified by commit 6f1c819c21 ("dm: convert to bioset_init()/mempool_init()"). Also prevent structure members from unnecessarily spanning cache lines. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-bio-prison-v1.c2
-rw-r--r--drivers/md/dm-bio-prison-v2.c2
-rw-r--r--drivers/md/dm-cache-target.c61
-rw-r--r--drivers/md/dm-core.h38
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-kcopyd.c3
-rw-r--r--drivers/md/dm-region-hash.c13
-rw-r--r--drivers/md/dm-thin.c5
-rw-r--r--drivers/md/dm-zoned-target.c2
9 files changed, 79 insertions, 73 deletions
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index e794e3662fdd..b5389890bbc3 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -19,8 +19,8 @@
19 19
20struct dm_bio_prison { 20struct dm_bio_prison {
21 spinlock_t lock; 21 spinlock_t lock;
22 mempool_t cell_pool;
23 struct rb_root cells; 22 struct rb_root cells;
23 mempool_t cell_pool;
24}; 24};
25 25
26static struct kmem_cache *_cell_cache; 26static struct kmem_cache *_cell_cache;
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index f866bc97b032..b092cdc8e1ae 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -21,8 +21,8 @@ struct dm_bio_prison_v2 {
21 struct workqueue_struct *wq; 21 struct workqueue_struct *wq;
22 22
23 spinlock_t lock; 23 spinlock_t lock;
24 mempool_t cell_pool;
25 struct rb_root cells; 24 struct rb_root cells;
25 mempool_t cell_pool;
26}; 26};
27 27
28static struct kmem_cache *_cell_cache; 28static struct kmem_cache *_cell_cache;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 001c71248246..ce14a3d1f609 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -371,7 +371,13 @@ struct cache_stats {
371 371
372struct cache { 372struct cache {
373 struct dm_target *ti; 373 struct dm_target *ti;
374 struct dm_target_callbacks callbacks; 374 spinlock_t lock;
375
376 /*
377 * Fields for converting from sectors to blocks.
378 */
379 int sectors_per_block_shift;
380 sector_t sectors_per_block;
375 381
376 struct dm_cache_metadata *cmd; 382 struct dm_cache_metadata *cmd;
377 383
@@ -402,13 +408,11 @@ struct cache {
402 dm_cblock_t cache_size; 408 dm_cblock_t cache_size;
403 409
404 /* 410 /*
405 * Fields for converting from sectors to blocks. 411 * Invalidation fields.
406 */ 412 */
407 sector_t sectors_per_block; 413 spinlock_t invalidation_lock;
408 int sectors_per_block_shift; 414 struct list_head invalidation_requests;
409 415
410 spinlock_t lock;
411 struct bio_list deferred_bios;
412 sector_t migration_threshold; 416 sector_t migration_threshold;
413 wait_queue_head_t migration_wait; 417 wait_queue_head_t migration_wait;
414 atomic_t nr_allocated_migrations; 418 atomic_t nr_allocated_migrations;
@@ -419,13 +423,11 @@ struct cache {
419 */ 423 */
420 atomic_t nr_io_migrations; 424 atomic_t nr_io_migrations;
421 425
426 struct bio_list deferred_bios;
427
422 struct rw_semaphore quiesce_lock; 428 struct rw_semaphore quiesce_lock;
423 429
424 /* 430 struct dm_target_callbacks callbacks;
425 * cache_size entries, dirty if set
426 */
427 atomic_t nr_dirty;
428 unsigned long *dirty_bitset;
429 431
430 /* 432 /*
431 * origin_blocks entries, discarded if set. 433 * origin_blocks entries, discarded if set.
@@ -442,17 +444,27 @@ struct cache {
442 const char **ctr_args; 444 const char **ctr_args;
443 445
444 struct dm_kcopyd_client *copier; 446 struct dm_kcopyd_client *copier;
445 struct workqueue_struct *wq;
446 struct work_struct deferred_bio_worker; 447 struct work_struct deferred_bio_worker;
447 struct work_struct migration_worker; 448 struct work_struct migration_worker;
449 struct workqueue_struct *wq;
448 struct delayed_work waker; 450 struct delayed_work waker;
449 struct dm_bio_prison_v2 *prison; 451 struct dm_bio_prison_v2 *prison;
450 struct bio_set bs;
451 452
452 mempool_t migration_pool; 453 /*
454 * cache_size entries, dirty if set
455 */
456 unsigned long *dirty_bitset;
457 atomic_t nr_dirty;
453 458
454 struct dm_cache_policy *policy;
455 unsigned policy_nr_args; 459 unsigned policy_nr_args;
460 struct dm_cache_policy *policy;
461
462 /*
463 * Cache features such as write-through.
464 */
465 struct cache_features features;
466
467 struct cache_stats stats;
456 468
457 bool need_tick_bio:1; 469 bool need_tick_bio:1;
458 bool sized:1; 470 bool sized:1;
@@ -461,25 +473,16 @@ struct cache {
461 bool loaded_mappings:1; 473 bool loaded_mappings:1;
462 bool loaded_discards:1; 474 bool loaded_discards:1;
463 475
464 /* 476 struct rw_semaphore background_work_lock;
465 * Cache features such as write-through.
466 */
467 struct cache_features features;
468
469 struct cache_stats stats;
470 477
471 /* 478 struct batcher committer;
472 * Invalidation fields. 479 struct work_struct commit_ws;
473 */
474 spinlock_t invalidation_lock;
475 struct list_head invalidation_requests;
476 480
477 struct io_tracker tracker; 481 struct io_tracker tracker;
478 482
479 struct work_struct commit_ws; 483 mempool_t migration_pool;
480 struct batcher committer;
481 484
482 struct rw_semaphore background_work_lock; 485 struct bio_set bs;
483}; 486};
484 487
485struct per_bio_data { 488struct per_bio_data {
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index f21c5d21bf1b..7d480c930eaf 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -31,6 +31,9 @@ struct dm_kobject_holder {
31struct mapped_device { 31struct mapped_device {
32 struct mutex suspend_lock; 32 struct mutex suspend_lock;
33 33
34 struct mutex table_devices_lock;
35 struct list_head table_devices;
36
34 /* 37 /*
35 * The current mapping (struct dm_table *). 38 * The current mapping (struct dm_table *).
36 * Use dm_get_live_table{_fast} or take suspend_lock for 39 * Use dm_get_live_table{_fast} or take suspend_lock for
@@ -38,17 +41,14 @@ struct mapped_device {
38 */ 41 */
39 void __rcu *map; 42 void __rcu *map;
40 43
41 struct list_head table_devices;
42 struct mutex table_devices_lock;
43
44 unsigned long flags; 44 unsigned long flags;
45 45
46 struct request_queue *queue;
47 int numa_node_id;
48
49 enum dm_queue_mode type;
50 /* Protect queue and type against concurrent access. */ 46 /* Protect queue and type against concurrent access. */
51 struct mutex type_lock; 47 struct mutex type_lock;
48 enum dm_queue_mode type;
49
50 int numa_node_id;
51 struct request_queue *queue;
52 52
53 atomic_t holders; 53 atomic_t holders;
54 atomic_t open_count; 54 atomic_t open_count;
@@ -56,21 +56,21 @@ struct mapped_device {
56 struct dm_target *immutable_target; 56 struct dm_target *immutable_target;
57 struct target_type *immutable_target_type; 57 struct target_type *immutable_target_type;
58 58
59 char name[16];
59 struct gendisk *disk; 60 struct gendisk *disk;
60 struct dax_device *dax_dev; 61 struct dax_device *dax_dev;
61 char name[16];
62
63 void *interface_ptr;
64 62
65 /* 63 /*
66 * A list of ios that arrived while we were suspended. 64 * A list of ios that arrived while we were suspended.
67 */ 65 */
68 atomic_t pending[2];
69 wait_queue_head_t wait;
70 struct work_struct work; 66 struct work_struct work;
67 wait_queue_head_t wait;
68 atomic_t pending[2];
71 spinlock_t deferred_lock; 69 spinlock_t deferred_lock;
72 struct bio_list deferred; 70 struct bio_list deferred;
73 71
72 void *interface_ptr;
73
74 /* 74 /*
75 * Event handling. 75 * Event handling.
76 */ 76 */
@@ -84,17 +84,17 @@ struct mapped_device {
84 unsigned internal_suspend_count; 84 unsigned internal_suspend_count;
85 85
86 /* 86 /*
87 * Processing queue (flush)
88 */
89 struct workqueue_struct *wq;
90
91 /*
92 * io objects are allocated from here. 87 * io objects are allocated from here.
93 */ 88 */
94 struct bio_set io_bs; 89 struct bio_set io_bs;
95 struct bio_set bs; 90 struct bio_set bs;
96 91
97 /* 92 /*
93 * Processing queue (flush)
94 */
95 struct workqueue_struct *wq;
96
97 /*
98 * freeze/thaw support require holding onto a super block 98 * freeze/thaw support require holding onto a super block
99 */ 99 */
100 struct super_block *frozen_sb; 100 struct super_block *frozen_sb;
@@ -102,11 +102,11 @@ struct mapped_device {
102 /* forced geometry settings */ 102 /* forced geometry settings */
103 struct hd_geometry geometry; 103 struct hd_geometry geometry;
104 104
105 struct block_device *bdev;
106
107 /* kobject and completion */ 105 /* kobject and completion */
108 struct dm_kobject_holder kobj_holder; 106 struct dm_kobject_holder kobj_holder;
109 107
108 struct block_device *bdev;
109
110 /* zero-length flush that will be cloned and submitted to targets */ 110 /* zero-length flush that will be cloned and submitted to targets */
111 struct bio flush_bio; 111 struct bio flush_bio;
112 112
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index da02f4d8e4b9..4939fbc34ff2 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -139,25 +139,13 @@ struct crypt_config {
139 struct dm_dev *dev; 139 struct dm_dev *dev;
140 sector_t start; 140 sector_t start;
141 141
142 /*
143 * pool for per bio private data, crypto requests,
144 * encryption requeusts/buffer pages and integrity tags
145 */
146 mempool_t req_pool;
147 mempool_t page_pool;
148 mempool_t tag_pool;
149 unsigned tag_pool_max_sectors;
150
151 struct percpu_counter n_allocated_pages; 142 struct percpu_counter n_allocated_pages;
152 143
153 struct bio_set bs;
154 struct mutex bio_alloc_lock;
155
156 struct workqueue_struct *io_queue; 144 struct workqueue_struct *io_queue;
157 struct workqueue_struct *crypt_queue; 145 struct workqueue_struct *crypt_queue;
158 146
159 struct task_struct *write_thread;
160 wait_queue_head_t write_thread_wait; 147 wait_queue_head_t write_thread_wait;
148 struct task_struct *write_thread;
161 struct rb_root write_tree; 149 struct rb_root write_tree;
162 150
163 char *cipher; 151 char *cipher;
@@ -213,6 +201,18 @@ struct crypt_config {
213 unsigned int integrity_iv_size; 201 unsigned int integrity_iv_size;
214 unsigned int on_disk_tag_size; 202 unsigned int on_disk_tag_size;
215 203
204 /*
205 * pool for per bio private data, crypto requests,
206 * encryption requeusts/buffer pages and integrity tags
207 */
208 unsigned tag_pool_max_sectors;
209 mempool_t tag_pool;
210 mempool_t req_pool;
211 mempool_t page_pool;
212
213 struct bio_set bs;
214 struct mutex bio_alloc_lock;
215
216 u8 *authenc_key; /* space for keys in authenc() format (if used) */ 216 u8 *authenc_key; /* space for keys in authenc() format (if used) */
217 u8 key[0]; 217 u8 key[0];
218}; 218};
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index ce7efc7434be..3c7547a3c371 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -45,7 +45,6 @@ struct dm_kcopyd_client {
45 struct dm_io_client *io_client; 45 struct dm_io_client *io_client;
46 46
47 wait_queue_head_t destroyq; 47 wait_queue_head_t destroyq;
48 atomic_t nr_jobs;
49 48
50 mempool_t job_pool; 49 mempool_t job_pool;
51 50
@@ -54,6 +53,8 @@ struct dm_kcopyd_client {
54 53
55 struct dm_kcopyd_throttle *throttle; 54 struct dm_kcopyd_throttle *throttle;
56 55
56 atomic_t nr_jobs;
57
57/* 58/*
58 * We maintain three lists of jobs: 59 * We maintain three lists of jobs:
59 * 60 *
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index abf3521b80a8..c832ec398f02 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -63,27 +63,28 @@ struct dm_region_hash {
63 63
64 /* hash table */ 64 /* hash table */
65 rwlock_t hash_lock; 65 rwlock_t hash_lock;
66 mempool_t region_pool;
67 unsigned mask; 66 unsigned mask;
68 unsigned nr_buckets; 67 unsigned nr_buckets;
69 unsigned prime; 68 unsigned prime;
70 unsigned shift; 69 unsigned shift;
71 struct list_head *buckets; 70 struct list_head *buckets;
72 71
72 /*
73 * If there was a flush failure no regions can be marked clean.
74 */
75 int flush_failure;
76
73 unsigned max_recovery; /* Max # of regions to recover in parallel */ 77 unsigned max_recovery; /* Max # of regions to recover in parallel */
74 78
75 spinlock_t region_lock; 79 spinlock_t region_lock;
76 atomic_t recovery_in_flight; 80 atomic_t recovery_in_flight;
77 struct semaphore recovery_count;
78 struct list_head clean_regions; 81 struct list_head clean_regions;
79 struct list_head quiesced_regions; 82 struct list_head quiesced_regions;
80 struct list_head recovered_regions; 83 struct list_head recovered_regions;
81 struct list_head failed_recovered_regions; 84 struct list_head failed_recovered_regions;
85 struct semaphore recovery_count;
82 86
83 /* 87 mempool_t region_pool;
84 * If there was a flush failure no regions can be marked clean.
85 */
86 int flush_failure;
87 88
88 void *context; 89 void *context;
89 sector_t target_begin; 90 sector_t target_begin;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5772756c63c1..6cf9c9364103 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -240,9 +240,9 @@ struct pool {
240 struct dm_bio_prison *prison; 240 struct dm_bio_prison *prison;
241 struct dm_kcopyd_client *copier; 241 struct dm_kcopyd_client *copier;
242 242
243 struct work_struct worker;
243 struct workqueue_struct *wq; 244 struct workqueue_struct *wq;
244 struct throttle throttle; 245 struct throttle throttle;
245 struct work_struct worker;
246 struct delayed_work waker; 246 struct delayed_work waker;
247 struct delayed_work no_space_timeout; 247 struct delayed_work no_space_timeout;
248 248
@@ -260,7 +260,6 @@ struct pool {
260 struct dm_deferred_set *all_io_ds; 260 struct dm_deferred_set *all_io_ds;
261 261
262 struct dm_thin_new_mapping *next_mapping; 262 struct dm_thin_new_mapping *next_mapping;
263 mempool_t mapping_pool;
264 263
265 process_bio_fn process_bio; 264 process_bio_fn process_bio;
266 process_bio_fn process_discard; 265 process_bio_fn process_discard;
@@ -273,6 +272,8 @@ struct pool {
273 process_mapping_fn process_prepared_discard_pt2; 272 process_mapping_fn process_prepared_discard_pt2;
274 273
275 struct dm_bio_prison_cell **cell_sort_array; 274 struct dm_bio_prison_cell **cell_sort_array;
275
276 mempool_t mapping_pool;
276}; 277};
277 278
278static enum pool_mode get_pool_mode(struct pool *pool); 279static enum pool_mode get_pool_mode(struct pool *pool);
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 30602d15ad9a..3c0e45f4dcf5 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -52,9 +52,9 @@ struct dmz_target {
52 struct dmz_reclaim *reclaim; 52 struct dmz_reclaim *reclaim;
53 53
54 /* For chunk work */ 54 /* For chunk work */
55 struct mutex chunk_lock;
56 struct radix_tree_root chunk_rxtree; 55 struct radix_tree_root chunk_rxtree;
57 struct workqueue_struct *chunk_wq; 56 struct workqueue_struct *chunk_wq;
57 struct mutex chunk_lock;
58 58
59 /* For cloned BIOs to zones */ 59 /* For cloned BIOs to zones */
60 struct bio_set bio_set; 60 struct bio_set bio_set;