diff options
-rw-r--r-- | drivers/md/Kconfig | 2 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 31 | ||||
-rw-r--r-- | drivers/md/dm-ioctl.c | 12 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 16 | ||||
-rw-r--r-- | drivers/md/dm.c | 31 | ||||
-rw-r--r-- | drivers/md/dm.h | 7 | ||||
-rw-r--r-- | fs/binfmt_aout.c | 1 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 1 |
8 files changed, 68 insertions, 33 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 9b6fbf044fd8..3fa7c77d9bd9 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -269,7 +269,7 @@ config DM_MULTIPATH_RDAC | |||
269 | 269 | ||
270 | config DM_MULTIPATH_HP | 270 | config DM_MULTIPATH_HP |
271 | tristate "HP MSA multipath support (EXPERIMENTAL)" | 271 | tristate "HP MSA multipath support (EXPERIMENTAL)" |
272 | depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL | 272 | depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL |
273 | ---help--- | 273 | ---help--- |
274 | Multipath support for HP MSA (Active/Passive) series hardware. | 274 | Multipath support for HP MSA (Active/Passive) series hardware. |
275 | 275 | ||
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 28c6ae095c56..6b66ee46b87d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -398,7 +398,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) | |||
398 | struct bio *clone; | 398 | struct bio *clone; |
399 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 399 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
400 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | 400 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
401 | unsigned int i; | 401 | unsigned i, len; |
402 | struct page *page; | ||
402 | 403 | ||
403 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); | 404 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
404 | if (!clone) | 405 | if (!clone) |
@@ -407,10 +408,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) | |||
407 | clone_init(io, clone); | 408 | clone_init(io, clone); |
408 | 409 | ||
409 | for (i = 0; i < nr_iovecs; i++) { | 410 | for (i = 0; i < nr_iovecs; i++) { |
410 | struct bio_vec *bv = bio_iovec_idx(clone, i); | 411 | page = mempool_alloc(cc->page_pool, gfp_mask); |
411 | 412 | if (!page) | |
412 | bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); | ||
413 | if (!bv->bv_page) | ||
414 | break; | 413 | break; |
415 | 414 | ||
416 | /* | 415 | /* |
@@ -421,15 +420,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) | |||
421 | if (i == (MIN_BIO_PAGES - 1)) | 420 | if (i == (MIN_BIO_PAGES - 1)) |
422 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | 421 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
423 | 422 | ||
424 | bv->bv_offset = 0; | 423 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
425 | if (size > PAGE_SIZE) | 424 | |
426 | bv->bv_len = PAGE_SIZE; | 425 | if (!bio_add_page(clone, page, len, 0)) { |
427 | else | 426 | mempool_free(page, cc->page_pool); |
428 | bv->bv_len = size; | 427 | break; |
428 | } | ||
429 | 429 | ||
430 | clone->bi_size += bv->bv_len; | 430 | size -= len; |
431 | clone->bi_vcnt++; | ||
432 | size -= bv->bv_len; | ||
433 | } | 431 | } |
434 | 432 | ||
435 | if (!clone->bi_size) { | 433 | if (!clone->bi_size) { |
@@ -511,6 +509,9 @@ static void crypt_endio(struct bio *clone, int error) | |||
511 | struct crypt_config *cc = io->target->private; | 509 | struct crypt_config *cc = io->target->private; |
512 | unsigned read_io = bio_data_dir(clone) == READ; | 510 | unsigned read_io = bio_data_dir(clone) == READ; |
513 | 511 | ||
512 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) | ||
513 | error = -EIO; | ||
514 | |||
514 | /* | 515 | /* |
515 | * free the processed pages | 516 | * free the processed pages |
516 | */ | 517 | */ |
@@ -519,10 +520,8 @@ static void crypt_endio(struct bio *clone, int error) | |||
519 | goto out; | 520 | goto out; |
520 | } | 521 | } |
521 | 522 | ||
522 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { | 523 | if (unlikely(error)) |
523 | error = -EIO; | ||
524 | goto out; | 524 | goto out; |
525 | } | ||
526 | 525 | ||
527 | bio_put(clone); | 526 | bio_put(clone); |
528 | kcryptd_queue_crypt(io); | 527 | kcryptd_queue_crypt(io); |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 138200bf5e0b..9627fa0f9470 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -332,6 +332,8 @@ static int dm_hash_rename(const char *old, const char *new) | |||
332 | dm_table_put(table); | 332 | dm_table_put(table); |
333 | } | 333 | } |
334 | 334 | ||
335 | dm_kobject_uevent(hc->md); | ||
336 | |||
335 | dm_put(hc->md); | 337 | dm_put(hc->md); |
336 | up_write(&_hash_lock); | 338 | up_write(&_hash_lock); |
337 | kfree(old_name); | 339 | kfree(old_name); |
@@ -1250,21 +1252,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size) | |||
1250 | if (!table) | 1252 | if (!table) |
1251 | goto out_argv; | 1253 | goto out_argv; |
1252 | 1254 | ||
1253 | if (tmsg->sector >= dm_table_get_size(table)) { | 1255 | ti = dm_table_find_target(table, tmsg->sector); |
1256 | if (!dm_target_is_valid(ti)) { | ||
1254 | DMWARN("Target message sector outside device."); | 1257 | DMWARN("Target message sector outside device."); |
1255 | r = -EINVAL; | 1258 | r = -EINVAL; |
1256 | goto out_table; | 1259 | } else if (ti->type->message) |
1257 | } | ||
1258 | |||
1259 | ti = dm_table_find_target(table, tmsg->sector); | ||
1260 | if (ti->type->message) | ||
1261 | r = ti->type->message(ti, argc, argv); | 1260 | r = ti->type->message(ti, argc, argv); |
1262 | else { | 1261 | else { |
1263 | DMWARN("Target type does not support messages"); | 1262 | DMWARN("Target type does not support messages"); |
1264 | r = -EINVAL; | 1263 | r = -EINVAL; |
1265 | } | 1264 | } |
1266 | 1265 | ||
1267 | out_table: | ||
1268 | dm_table_put(table); | 1266 | dm_table_put(table); |
1269 | out_argv: | 1267 | out_argv: |
1270 | kfree(argv); | 1268 | kfree(argv); |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e298d8d11f24..47818d8249cb 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -99,6 +99,9 @@ static void combine_restrictions_low(struct io_restrictions *lhs, | |||
99 | lhs->max_segment_size = | 99 | lhs->max_segment_size = |
100 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); | 100 | min_not_zero(lhs->max_segment_size, rhs->max_segment_size); |
101 | 101 | ||
102 | lhs->max_hw_sectors = | ||
103 | min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); | ||
104 | |||
102 | lhs->seg_boundary_mask = | 105 | lhs->seg_boundary_mask = |
103 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); | 106 | min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); |
104 | 107 | ||
@@ -189,8 +192,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num) | |||
189 | 192 | ||
190 | /* | 193 | /* |
191 | * Allocate both the target array and offset array at once. | 194 | * Allocate both the target array and offset array at once. |
195 | * Append an empty entry to catch sectors beyond the end of | ||
196 | * the device. | ||
192 | */ | 197 | */ |
193 | n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + | 198 | n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + |
194 | sizeof(sector_t)); | 199 | sizeof(sector_t)); |
195 | if (!n_highs) | 200 | if (!n_highs) |
196 | return -ENOMEM; | 201 | return -ENOMEM; |
@@ -564,6 +569,9 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
564 | rs->max_segment_size = | 569 | rs->max_segment_size = |
565 | min_not_zero(rs->max_segment_size, q->max_segment_size); | 570 | min_not_zero(rs->max_segment_size, q->max_segment_size); |
566 | 571 | ||
572 | rs->max_hw_sectors = | ||
573 | min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); | ||
574 | |||
567 | rs->seg_boundary_mask = | 575 | rs->seg_boundary_mask = |
568 | min_not_zero(rs->seg_boundary_mask, | 576 | min_not_zero(rs->seg_boundary_mask, |
569 | q->seg_boundary_mask); | 577 | q->seg_boundary_mask); |
@@ -701,6 +709,8 @@ static void check_for_valid_limits(struct io_restrictions *rs) | |||
701 | { | 709 | { |
702 | if (!rs->max_sectors) | 710 | if (!rs->max_sectors) |
703 | rs->max_sectors = SAFE_MAX_SECTORS; | 711 | rs->max_sectors = SAFE_MAX_SECTORS; |
712 | if (!rs->max_hw_sectors) | ||
713 | rs->max_hw_sectors = SAFE_MAX_SECTORS; | ||
704 | if (!rs->max_phys_segments) | 714 | if (!rs->max_phys_segments) |
705 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; | 715 | rs->max_phys_segments = MAX_PHYS_SEGMENTS; |
706 | if (!rs->max_hw_segments) | 716 | if (!rs->max_hw_segments) |
@@ -867,6 +877,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | |||
867 | 877 | ||
868 | /* | 878 | /* |
869 | * Search the btree for the correct target. | 879 | * Search the btree for the correct target. |
880 | * | ||
881 | * Caller should check returned pointer with dm_target_is_valid() | ||
882 | * to trap I/O beyond end of device. | ||
870 | */ | 883 | */ |
871 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | 884 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) |
872 | { | 885 | { |
@@ -896,6 +909,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
896 | q->max_hw_segments = t->limits.max_hw_segments; | 909 | q->max_hw_segments = t->limits.max_hw_segments; |
897 | q->hardsect_size = t->limits.hardsect_size; | 910 | q->hardsect_size = t->limits.hardsect_size; |
898 | q->max_segment_size = t->limits.max_segment_size; | 911 | q->max_segment_size = t->limits.max_segment_size; |
912 | q->max_hw_sectors = t->limits.max_hw_sectors; | ||
899 | q->seg_boundary_mask = t->limits.seg_boundary_mask; | 913 | q->seg_boundary_mask = t->limits.seg_boundary_mask; |
900 | q->bounce_pfn = t->limits.bounce_pfn; | 914 | q->bounce_pfn = t->limits.bounce_pfn; |
901 | if (t->limits.no_cluster) | 915 | if (t->limits.no_cluster) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 07cbbb8eb3e0..88c0fd657825 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -672,13 +672,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
672 | return clone; | 672 | return clone; |
673 | } | 673 | } |
674 | 674 | ||
675 | static void __clone_and_map(struct clone_info *ci) | 675 | static int __clone_and_map(struct clone_info *ci) |
676 | { | 676 | { |
677 | struct bio *clone, *bio = ci->bio; | 677 | struct bio *clone, *bio = ci->bio; |
678 | struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); | 678 | struct dm_target *ti; |
679 | sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); | 679 | sector_t len = 0, max; |
680 | struct dm_target_io *tio; | 680 | struct dm_target_io *tio; |
681 | 681 | ||
682 | ti = dm_table_find_target(ci->map, ci->sector); | ||
683 | if (!dm_target_is_valid(ti)) | ||
684 | return -EIO; | ||
685 | |||
686 | max = max_io_len(ci->md, ci->sector, ti); | ||
687 | |||
682 | /* | 688 | /* |
683 | * Allocate a target io object. | 689 | * Allocate a target io object. |
684 | */ | 690 | */ |
@@ -736,6 +742,9 @@ static void __clone_and_map(struct clone_info *ci) | |||
736 | do { | 742 | do { |
737 | if (offset) { | 743 | if (offset) { |
738 | ti = dm_table_find_target(ci->map, ci->sector); | 744 | ti = dm_table_find_target(ci->map, ci->sector); |
745 | if (!dm_target_is_valid(ti)) | ||
746 | return -EIO; | ||
747 | |||
739 | max = max_io_len(ci->md, ci->sector, ti); | 748 | max = max_io_len(ci->md, ci->sector, ti); |
740 | 749 | ||
741 | tio = alloc_tio(ci->md); | 750 | tio = alloc_tio(ci->md); |
@@ -759,6 +768,8 @@ static void __clone_and_map(struct clone_info *ci) | |||
759 | 768 | ||
760 | ci->idx++; | 769 | ci->idx++; |
761 | } | 770 | } |
771 | |||
772 | return 0; | ||
762 | } | 773 | } |
763 | 774 | ||
764 | /* | 775 | /* |
@@ -767,6 +778,7 @@ static void __clone_and_map(struct clone_info *ci) | |||
767 | static int __split_bio(struct mapped_device *md, struct bio *bio) | 778 | static int __split_bio(struct mapped_device *md, struct bio *bio) |
768 | { | 779 | { |
769 | struct clone_info ci; | 780 | struct clone_info ci; |
781 | int error = 0; | ||
770 | 782 | ||
771 | ci.map = dm_get_table(md); | 783 | ci.map = dm_get_table(md); |
772 | if (unlikely(!ci.map)) | 784 | if (unlikely(!ci.map)) |
@@ -784,11 +796,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio) | |||
784 | ci.idx = bio->bi_idx; | 796 | ci.idx = bio->bi_idx; |
785 | 797 | ||
786 | start_io_acct(ci.io); | 798 | start_io_acct(ci.io); |
787 | while (ci.sector_count) | 799 | while (ci.sector_count && !error) |
788 | __clone_and_map(&ci); | 800 | error = __clone_and_map(&ci); |
789 | 801 | ||
790 | /* drop the extra reference count */ | 802 | /* drop the extra reference count */ |
791 | dec_pending(ci.io, 0); | 803 | dec_pending(ci.io, error); |
792 | dm_table_put(ci.map); | 804 | dm_table_put(ci.map); |
793 | 805 | ||
794 | return 0; | 806 | return 0; |
@@ -1502,7 +1514,7 @@ int dm_resume(struct mapped_device *md) | |||
1502 | 1514 | ||
1503 | dm_table_unplug_all(map); | 1515 | dm_table_unplug_all(map); |
1504 | 1516 | ||
1505 | kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); | 1517 | dm_kobject_uevent(md); |
1506 | 1518 | ||
1507 | r = 0; | 1519 | r = 0; |
1508 | 1520 | ||
@@ -1516,6 +1528,11 @@ out: | |||
1516 | /*----------------------------------------------------------------- | 1528 | /*----------------------------------------------------------------- |
1517 | * Event notification. | 1529 | * Event notification. |
1518 | *---------------------------------------------------------------*/ | 1530 | *---------------------------------------------------------------*/ |
1531 | void dm_kobject_uevent(struct mapped_device *md) | ||
1532 | { | ||
1533 | kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); | ||
1534 | } | ||
1535 | |||
1519 | uint32_t dm_next_uevent_seq(struct mapped_device *md) | 1536 | uint32_t dm_next_uevent_seq(struct mapped_device *md) |
1520 | { | 1537 | { |
1521 | return atomic_add_return(1, &md->uevent_seq); | 1538 | return atomic_add_return(1, &md->uevent_seq); |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 4b3faa45277e..b4584a39383b 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -112,6 +112,11 @@ int dm_table_resume_targets(struct dm_table *t); | |||
112 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); | 112 | int dm_table_any_congested(struct dm_table *t, int bdi_bits); |
113 | void dm_table_unplug_all(struct dm_table *t); | 113 | void dm_table_unplug_all(struct dm_table *t); |
114 | 114 | ||
115 | /* | ||
116 | * To check the return value from dm_table_find_target(). | ||
117 | */ | ||
118 | #define dm_target_is_valid(t) ((t)->table) | ||
119 | |||
115 | /*----------------------------------------------------------------- | 120 | /*----------------------------------------------------------------- |
116 | * A registry of target types. | 121 | * A registry of target types. |
117 | *---------------------------------------------------------------*/ | 122 | *---------------------------------------------------------------*/ |
@@ -182,4 +187,6 @@ union map_info *dm_get_mapinfo(struct bio *bio); | |||
182 | int dm_open_count(struct mapped_device *md); | 187 | int dm_open_count(struct mapped_device *md); |
183 | int dm_lock_for_deletion(struct mapped_device *md); | 188 | int dm_lock_for_deletion(struct mapped_device *md); |
184 | 189 | ||
190 | void dm_kobject_uevent(struct mapped_device *md); | ||
191 | |||
185 | #endif | 192 | #endif |
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index e176d195e7e5..7596e1e94cde 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c | |||
@@ -319,7 +319,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
319 | current->mm->free_area_cache = current->mm->mmap_base; | 319 | current->mm->free_area_cache = current->mm->mmap_base; |
320 | current->mm->cached_hole_size = 0; | 320 | current->mm->cached_hole_size = 0; |
321 | 321 | ||
322 | current->mm->mmap = NULL; | ||
323 | compute_creds(bprm); | 322 | compute_creds(bprm); |
324 | current->flags &= ~PF_FORKNOEXEC; | 323 | current->flags &= ~PF_FORKNOEXEC; |
325 | #ifdef __sparc__ | 324 | #ifdef __sparc__ |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index b8b7c51389fe..e765e191663d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -115,6 +115,7 @@ struct io_restrictions { | |||
115 | unsigned short max_hw_segments; | 115 | unsigned short max_hw_segments; |
116 | unsigned short hardsect_size; | 116 | unsigned short hardsect_size; |
117 | unsigned int max_segment_size; | 117 | unsigned int max_segment_size; |
118 | unsigned int max_hw_sectors; | ||
118 | unsigned long seg_boundary_mask; | 119 | unsigned long seg_boundary_mask; |
119 | unsigned long bounce_pfn; | 120 | unsigned long bounce_pfn; |
120 | unsigned char no_cluster; /* inverted so that 0 is default */ | 121 | unsigned char no_cluster; /* inverted so that 0 is default */ |