diff options
Diffstat (limited to 'fs/bio.c')
| -rw-r--r-- | fs/bio.c | 23 |
1 files changed, 21 insertions, 2 deletions
| @@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) | |||
| 370 | { | 370 | { |
| 371 | struct bio *bio; | 371 | struct bio *bio; |
| 372 | 372 | ||
| 373 | if (nr_iovecs > UIO_MAXIOV) | ||
| 374 | return NULL; | ||
| 375 | |||
| 373 | bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), | 376 | bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), |
| 374 | gfp_mask); | 377 | gfp_mask); |
| 375 | if (unlikely(!bio)) | 378 | if (unlikely(!bio)) |
| @@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd) | |||
| 697 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, | 700 | static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, |
| 698 | gfp_t gfp_mask) | 701 | gfp_t gfp_mask) |
| 699 | { | 702 | { |
| 700 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); | 703 | struct bio_map_data *bmd; |
| 701 | 704 | ||
| 705 | if (iov_count > UIO_MAXIOV) | ||
| 706 | return NULL; | ||
| 707 | |||
| 708 | bmd = kmalloc(sizeof(*bmd), gfp_mask); | ||
| 702 | if (!bmd) | 709 | if (!bmd) |
| 703 | return NULL; | 710 | return NULL; |
| 704 | 711 | ||
| @@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
| 827 | end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 834 | end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 828 | start = uaddr >> PAGE_SHIFT; | 835 | start = uaddr >> PAGE_SHIFT; |
| 829 | 836 | ||
| 837 | /* | ||
| 838 | * Overflow, abort | ||
| 839 | */ | ||
| 840 | if (end < start) | ||
| 841 | return ERR_PTR(-EINVAL); | ||
| 842 | |||
| 830 | nr_pages += end - start; | 843 | nr_pages += end - start; |
| 831 | len += iov[i].iov_len; | 844 | len += iov[i].iov_len; |
| 832 | } | 845 | } |
| @@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
| 955 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 968 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 956 | unsigned long start = uaddr >> PAGE_SHIFT; | 969 | unsigned long start = uaddr >> PAGE_SHIFT; |
| 957 | 970 | ||
| 971 | /* | ||
| 972 | * Overflow, abort | ||
| 973 | */ | ||
| 974 | if (end < start) | ||
| 975 | return ERR_PTR(-EINVAL); | ||
| 976 | |||
| 958 | nr_pages += end - start; | 977 | nr_pages += end - start; |
| 959 | /* | 978 | /* |
| 960 | * buffer must be aligned to at least hardsector size for now | 979 | * buffer must be aligned to at least hardsector size for now |
| @@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
| 982 | unsigned long start = uaddr >> PAGE_SHIFT; | 1001 | unsigned long start = uaddr >> PAGE_SHIFT; |
| 983 | const int local_nr_pages = end - start; | 1002 | const int local_nr_pages = end - start; |
| 984 | const int page_limit = cur_page + local_nr_pages; | 1003 | const int page_limit = cur_page + local_nr_pages; |
| 985 | 1004 | ||
| 986 | ret = get_user_pages_fast(uaddr, local_nr_pages, | 1005 | ret = get_user_pages_fast(uaddr, local_nr_pages, |
| 987 | write_to_vm, &pages[cur_page]); | 1006 | write_to_vm, &pages[cur_page]); |
| 988 | if (ret < local_nr_pages) { | 1007 | if (ret < local_nr_pages) { |
