diff options
author | Mike Snitzer <snitzer@redhat.com> | 2009-06-22 05:12:32 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-06-22 05:12:32 -0400 |
commit | 5ab97588fb266187b88d1ad893251c94388f18ba (patch) | |
tree | 0dfd693fb3a31e22353e5640db8c17c989b5c65d /drivers | |
parent | be6d4305db093ad1cc623f7dd3d2470b7bd73fa4 (diff) |
dm table: replace struct io_restrictions with struct queue_limits
Use blk_stack_limits() to stack block limits (including topology) rather
than duplicate the equivalent within Device Mapper.
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-table.c | 138 |
1 files changed, 43 insertions, 95 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e3bcfb8b15a1..41ec2bf9fbe9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -66,7 +66,7 @@ struct dm_table { | |||
66 | * These are optimistic limits taken from all the | 66 | * These are optimistic limits taken from all the |
67 | * targets, some targets will need smaller limits. | 67 | * targets, some targets will need smaller limits. |
68 | */ | 68 | */ |
69 | struct io_restrictions limits; | 69 | struct queue_limits limits; |
70 | 70 | ||
71 | /* events get handed up using this callback */ | 71 | /* events get handed up using this callback */ |
72 | void (*event_fn)(void *); | 72 | void (*event_fn)(void *); |
@@ -89,43 +89,6 @@ static unsigned int int_log(unsigned int n, unsigned int base) | |||
89 | } | 89 | } |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
93 | */ | ||
94 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
95 | |||
96 | /* | ||
97 | * Combine two io_restrictions, always taking the lower value. | ||
98 | */ | ||
99 | static void combine_restrictions_low(struct io_restrictions *lhs, | ||
100 | struct io_restrictions *rhs) | ||
101 | { | ||
102 | lhs->max_sectors = | ||
103 | min_not_zero(lhs->max_sectors, rhs->max_sectors); | ||
104 | |||
105 | lhs->max_phys_segments = | ||
106 | min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); | ||
107 | |||
108 | lhs->max_hw_segments = | ||
109 | min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); | ||
110 | |||
111 | lhs->logical_block_size = max(lhs->logical_block_size, | ||
112 | rhs->logical_block_size); | ||
113 | |||
114 | lhs->max_segment_size = | ||
115 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | ||
116 | |||
117 | lhs->max_hw_sectors = | ||
118 | min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); | ||
119 | |||
120 | lhs->seg_boundary_mask = | ||
121 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | ||
122 | |||
123 | lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); | ||
124 | |||
125 | lhs->no_cluster |= rhs->no_cluster; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Calculate the index of the child node of the n'th node k'th key. | 92 | * Calculate the index of the child node of the n'th node k'th key. |
130 | */ | 93 | */ |
131 | static inline unsigned int get_child(unsigned int n, unsigned int k) | 94 | static inline unsigned int get_child(unsigned int n, unsigned int k) |
@@ -511,10 +474,14 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, | |||
511 | return 0; | 474 | return 0; |
512 | } | 475 | } |
513 | 476 | ||
477 | /* | ||
478 | * Returns the minimum that is _not_ zero, unless both are zero. | ||
479 | */ | ||
480 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | ||
481 | |||
514 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | 482 | void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) |
515 | { | 483 | { |
516 | struct request_queue *q = bdev_get_queue(bdev); | 484 | struct request_queue *q = bdev_get_queue(bdev); |
517 | struct io_restrictions *rs = &ti->limits; | ||
518 | char b[BDEVNAME_SIZE]; | 485 | char b[BDEVNAME_SIZE]; |
519 | 486 | ||
520 | if (unlikely(!q)) { | 487 | if (unlikely(!q)) { |
@@ -523,15 +490,9 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
523 | return; | 490 | return; |
524 | } | 491 | } |
525 | 492 | ||
526 | /* | 493 | if (blk_stack_limits(&ti->limits, &q->limits, 0) < 0) |
527 | * Combine the device limits low. | 494 | DMWARN("%s: target device %s is misaligned", |
528 | * | 495 | dm_device_name(ti->table->md), bdevname(bdev, b)); |
529 | * FIXME: if we move an io_restriction struct | ||
530 | * into q this would just be a call to | ||
531 | * combine_restrictions_low() | ||
532 | */ | ||
533 | rs->max_sectors = | ||
534 | min_not_zero(rs->max_sectors, queue_max_sectors(q)); | ||
535 | 496 | ||
536 | /* | 497 | /* |
537 | * Check if merge fn is supported. | 498 | * Check if merge fn is supported. |
@@ -540,33 +501,9 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
540 | */ | 501 | */ |
541 | 502 | ||
542 | if (q->merge_bvec_fn && !ti->type->merge) | 503 | if (q->merge_bvec_fn && !ti->type->merge) |
543 | rs->max_sectors = | 504 | ti->limits.max_sectors = |
544 | min_not_zero(rs->max_sectors, | 505 | min_not_zero(ti->limits.max_sectors, |
545 | (unsigned int) (PAGE_SIZE >> 9)); | 506 | (unsigned int) (PAGE_SIZE >> 9)); |
546 | |||
547 | rs->max_phys_segments = | ||
548 | min_not_zero(rs->max_phys_segments, | ||
549 | queue_max_phys_segments(q)); | ||
550 | |||
551 | rs->max_hw_segments = | ||
552 | min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q)); | ||
553 | |||
554 | rs->logical_block_size = max(rs->logical_block_size, | ||
555 | queue_logical_block_size(q)); | ||
556 | |||
557 | rs->max_segment_size = | ||
558 | min_not_zero(rs->max_segment_size, queue_max_segment_size(q)); | ||
559 | |||
560 | rs->max_hw_sectors = | ||
561 | min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q)); | ||
562 | |||
563 | rs->seg_boundary_mask = | ||
564 | min_not_zero(rs->seg_boundary_mask, | ||
565 | queue_segment_boundary(q)); | ||
566 | |||
567 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q)); | ||
568 | |||
569 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | ||
570 | } | 507 | } |
571 | EXPORT_SYMBOL_GPL(dm_set_device_limits); | 508 | EXPORT_SYMBOL_GPL(dm_set_device_limits); |
572 | 509 | ||
@@ -704,24 +641,32 @@ int dm_split_args(int *argc, char ***argvp, char *input) | |||
704 | return 0; | 641 | return 0; |
705 | } | 642 | } |
706 | 643 | ||
707 | static void check_for_valid_limits(struct io_restrictions *rs) | 644 | static void init_valid_queue_limits(struct queue_limits *limits) |
708 | { | 645 | { |
709 | if (!rs->max_sectors) | 646 | if (!limits->max_sectors) |
710 | rs->max_sectors = SAFE_MAX_SECTORS; | 647 | limits->max_sectors = SAFE_MAX_SECTORS; |
711 | if (!rs->max_hw_sectors) | 648 | if (!limits->max_hw_sectors) |
712 | rs->max_hw_sectors = SAFE_MAX_SECTORS; | 649 | limits->max_hw_sectors = SAFE_MAX_SECTORS; |
713 | if (!rs->max_phys_segments) | 650 | if (!limits->max_phys_segments) |
714 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | 651 | limits->max_phys_segments = MAX_PHYS_SEGMENTS; |
715 | if (!rs->max_hw_segments) | 652 | if (!limits->max_hw_segments) |
716 | rs->max_hw_segments = MAX_HW_SEGMENTS; | 653 | limits->max_hw_segments = MAX_HW_SEGMENTS; |
717 | if (!rs->logical_block_size) | 654 | if (!limits->logical_block_size) |
718 | rs->logical_block_size = 1 << SECTOR_SHIFT; | 655 | limits->logical_block_size = 1 << SECTOR_SHIFT; |
719 | if (!rs->max_segment_size) | 656 | if (!limits->physical_block_size) |
720 | rs->max_segment_size = MAX_SEGMENT_SIZE; | 657 | limits->physical_block_size = 1 << SECTOR_SHIFT; |
721 | if (!rs->seg_boundary_mask) | 658 | if (!limits->io_min) |
722 | rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 659 | limits->io_min = 1 << SECTOR_SHIFT; |
723 | if (!rs->bounce_pfn) | 660 | if (!limits->max_segment_size) |
724 | rs->bounce_pfn = -1; | 661 | limits->max_segment_size = MAX_SEGMENT_SIZE; |
662 | if (!limits->seg_boundary_mask) | ||
663 | limits->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | ||
664 | if (!limits->bounce_pfn) | ||
665 | limits->bounce_pfn = -1; | ||
666 | /* | ||
667 | * The other fields (alignment_offset, io_opt, misaligned) | ||
668 | * hold 0 from the kzalloc(). | ||
669 | */ | ||
725 | } | 670 | } |
726 | 671 | ||
727 | /* | 672 | /* |
@@ -841,9 +786,12 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
841 | 786 | ||
842 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | 787 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; |
843 | 788 | ||
844 | /* FIXME: the plan is to combine high here and then have | 789 | if (blk_stack_limits(&t->limits, &tgt->limits, 0) < 0) |
845 | * the merge fn apply the target level restrictions. */ | 790 | DMWARN("%s: target device (start sect %llu len %llu) " |
846 | combine_restrictions_low(&t->limits, &tgt->limits); | 791 | "is misaligned", |
792 | dm_device_name(t->md), | ||
793 | (unsigned long long) tgt->begin, | ||
794 | (unsigned long long) tgt->len); | ||
847 | return 0; | 795 | return 0; |
848 | 796 | ||
849 | bad: | 797 | bad: |
@@ -886,7 +834,7 @@ int dm_table_complete(struct dm_table *t) | |||
886 | int r = 0; | 834 | int r = 0; |
887 | unsigned int leaf_nodes; | 835 | unsigned int leaf_nodes; |
888 | 836 | ||
889 | check_for_valid_limits(&t->limits); | 837 | init_valid_queue_limits(&t->limits); |
890 | 838 | ||
891 | r = validate_hardware_logical_block_alignment(t); | 839 | r = validate_hardware_logical_block_alignment(t); |
892 | if (r) | 840 | if (r) |