diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2009-06-22 05:12:36 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-06-22 05:12:36 -0400 |
commit | e6ee8c0b767540f59e20da3ced282601db8aa502 (patch) | |
tree | 101cb830994734eb45a4a47cd5988f24da67fa4f /drivers/md/dm-table.c | |
parent | cec47e3d4a861e1d942b3a580d0bbef2700d2bb2 (diff) |
dm: enable request based option
This patch enables request-based dm.
o Request-based dm and bio-based dm coexist, since there are
some target drivers which are more fitting to bio-based dm.
Also, there are other bio-based devices in the kernel
(e.g. md, loop).
Since bio-based device can't receive struct request,
there are some limitations on device stacking between
bio-based and request-based.
type of underlying device
bio-based request-based
----------------------------------------------
bio-based OK OK
request-based -- OK
The device type is recognized by the queue flag in the kernel,
so dm follows that.
o The type of a dm device is decided at the first table binding time.
Once the type of a dm device is decided, the type can't be changed.
o Mempool allocations are deferred to at the table loading time, since
mempools for request-based dm are different from those for bio-based
dm and needed mempool type is fixed by the type of table.
o Currently, request-based dm supports only tables that have a single
target. To support multiple targets, we need to support request
splitting or prevent bio/request from spanning multiple targets.
The former needs lots of changes in the block layer, and the latter
needs that all target drivers support merge() function.
Both will take a time.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r-- | drivers/md/dm-table.c | 111 |
1 files changed, 111 insertions, 0 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index c5f784419f23..aaeb82ed2852 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -41,6 +41,7 @@ | |||
41 | struct dm_table { | 41 | struct dm_table { |
42 | struct mapped_device *md; | 42 | struct mapped_device *md; |
43 | atomic_t holders; | 43 | atomic_t holders; |
44 | unsigned type; | ||
44 | 45 | ||
45 | /* btree table */ | 46 | /* btree table */ |
46 | unsigned int depth; | 47 | unsigned int depth; |
@@ -65,6 +66,8 @@ struct dm_table { | |||
65 | /* events get handed up using this callback */ | 66 | /* events get handed up using this callback */ |
66 | void (*event_fn)(void *); | 67 | void (*event_fn)(void *); |
67 | void *event_context; | 68 | void *event_context; |
69 | |||
70 | struct dm_md_mempools *mempools; | ||
68 | }; | 71 | }; |
69 | 72 | ||
70 | /* | 73 | /* |
@@ -258,6 +261,8 @@ void dm_table_destroy(struct dm_table *t) | |||
258 | if (t->devices.next != &t->devices) | 261 | if (t->devices.next != &t->devices) |
259 | free_devices(&t->devices); | 262 | free_devices(&t->devices); |
260 | 263 | ||
264 | dm_free_md_mempools(t->mempools); | ||
265 | |||
261 | kfree(t); | 266 | kfree(t); |
262 | } | 267 | } |
263 | 268 | ||
@@ -764,6 +769,99 @@ int dm_table_add_target(struct dm_table *t, const char *type, | |||
764 | return r; | 769 | return r; |
765 | } | 770 | } |
766 | 771 | ||
772 | int dm_table_set_type(struct dm_table *t) | ||
773 | { | ||
774 | unsigned i; | ||
775 | unsigned bio_based = 0, request_based = 0; | ||
776 | struct dm_target *tgt; | ||
777 | struct dm_dev_internal *dd; | ||
778 | struct list_head *devices; | ||
779 | |||
780 | for (i = 0; i < t->num_targets; i++) { | ||
781 | tgt = t->targets + i; | ||
782 | if (dm_target_request_based(tgt)) | ||
783 | request_based = 1; | ||
784 | else | ||
785 | bio_based = 1; | ||
786 | |||
787 | if (bio_based && request_based) { | ||
788 | DMWARN("Inconsistent table: different target types" | ||
789 | " can't be mixed up"); | ||
790 | return -EINVAL; | ||
791 | } | ||
792 | } | ||
793 | |||
794 | if (bio_based) { | ||
795 | /* We must use this table as bio-based */ | ||
796 | t->type = DM_TYPE_BIO_BASED; | ||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | BUG_ON(!request_based); /* No targets in this table */ | ||
801 | |||
802 | /* Non-request-stackable devices can't be used for request-based dm */ | ||
803 | devices = dm_table_get_devices(t); | ||
804 | list_for_each_entry(dd, devices, list) { | ||
805 | if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) { | ||
806 | DMWARN("table load rejected: including" | ||
807 | " non-request-stackable devices"); | ||
808 | return -EINVAL; | ||
809 | } | ||
810 | } | ||
811 | |||
812 | /* | ||
813 | * Request-based dm supports only tables that have a single target now. | ||
814 | * To support multiple targets, request splitting support is needed, | ||
815 | * and that needs lots of changes in the block-layer. | ||
816 | * (e.g. request completion process for partial completion.) | ||
817 | */ | ||
818 | if (t->num_targets > 1) { | ||
819 | DMWARN("Request-based dm doesn't support multiple targets yet"); | ||
820 | return -EINVAL; | ||
821 | } | ||
822 | |||
823 | t->type = DM_TYPE_REQUEST_BASED; | ||
824 | |||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | unsigned dm_table_get_type(struct dm_table *t) | ||
829 | { | ||
830 | return t->type; | ||
831 | } | ||
832 | |||
833 | bool dm_table_request_based(struct dm_table *t) | ||
834 | { | ||
835 | return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; | ||
836 | } | ||
837 | |||
838 | int dm_table_alloc_md_mempools(struct dm_table *t) | ||
839 | { | ||
840 | unsigned type = dm_table_get_type(t); | ||
841 | |||
842 | if (unlikely(type == DM_TYPE_NONE)) { | ||
843 | DMWARN("no table type is set, can't allocate mempools"); | ||
844 | return -EINVAL; | ||
845 | } | ||
846 | |||
847 | t->mempools = dm_alloc_md_mempools(type); | ||
848 | if (!t->mempools) | ||
849 | return -ENOMEM; | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | void dm_table_free_md_mempools(struct dm_table *t) | ||
855 | { | ||
856 | dm_free_md_mempools(t->mempools); | ||
857 | t->mempools = NULL; | ||
858 | } | ||
859 | |||
860 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) | ||
861 | { | ||
862 | return t->mempools; | ||
863 | } | ||
864 | |||
767 | static int setup_indexes(struct dm_table *t) | 865 | static int setup_indexes(struct dm_table *t) |
768 | { | 866 | { |
769 | int i; | 867 | int i; |
@@ -985,6 +1083,19 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
985 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); | 1083 | queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); |
986 | 1084 | ||
987 | dm_table_set_integrity(t); | 1085 | dm_table_set_integrity(t); |
1086 | |||
1087 | /* | ||
1088 | * QUEUE_FLAG_STACKABLE must be set after all queue settings are | ||
1089 | * visible to other CPUs because, once the flag is set, incoming bios | ||
1090 | * are processed by request-based dm, which refers to the queue | ||
1091 | * settings. | ||
1092 | * Until the flag set, bios are passed to bio-based dm and queued to | ||
1093 | * md->deferred where queue settings are not needed yet. | ||
1094 | * Those bios are passed to request-based dm at the resume time. | ||
1095 | */ | ||
1096 | smp_mb(); | ||
1097 | if (dm_table_request_based(t)) | ||
1098 | queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); | ||
988 | } | 1099 | } |
989 | 1100 | ||
990 | unsigned int dm_table_get_num_targets(struct dm_table *t) | 1101 | unsigned int dm_table_get_num_targets(struct dm_table *t) |