diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-12-18 00:49:45 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-01-02 13:00:53 -0500 |
commit | 08c95832427b449ecfb357696f7b8e239b79a72c (patch) | |
tree | f790c731eb6b8f752b7fb2231bce0c23615e208f /drivers/scsi/st.c | |
parent | b3376b4aaab4c348dfd2e0b7595dc12f64c9fac9 (diff) |
[SCSI] st: kill struct st_buff_fragment
This removes struct st_buff_fragment and use reserved_pages array to
store fragment buffer.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Kai Makisara <Kai.Makisara@kolumbus.fi>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/st.c')
-rw-r--r-- | drivers/scsi/st.c | 78 |
1 files changed, 43 insertions, 35 deletions
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index ce1fd3ab243..1cfd217f890 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -3753,8 +3753,9 @@ static struct st_buffer * | |||
3753 | else | 3753 | else |
3754 | priority = GFP_KERNEL; | 3754 | priority = GFP_KERNEL; |
3755 | 3755 | ||
3756 | i = sizeof(struct st_buffer) + (max_sg - 1) * sizeof(struct scatterlist) + | 3756 | i = sizeof(struct st_buffer) + |
3757 | max_sg * sizeof(struct st_buf_fragment); | 3757 | (max_sg - 1) * sizeof(struct scatterlist); |
3758 | |||
3758 | tb = kzalloc(i, priority); | 3759 | tb = kzalloc(i, priority); |
3759 | if (!tb) { | 3760 | if (!tb) { |
3760 | printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); | 3761 | printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); |
@@ -3762,7 +3763,6 @@ static struct st_buffer * | |||
3762 | } | 3763 | } |
3763 | tb->frp_segs = tb->orig_frp_segs = 0; | 3764 | tb->frp_segs = tb->orig_frp_segs = 0; |
3764 | tb->use_sg = max_sg; | 3765 | tb->use_sg = max_sg; |
3765 | tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg); | ||
3766 | 3766 | ||
3767 | tb->dma = need_dma; | 3767 | tb->dma = need_dma; |
3768 | tb->buffer_size = got; | 3768 | tb->buffer_size = got; |
@@ -3799,9 +3799,12 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm | |||
3799 | if (need_dma) | 3799 | if (need_dma) |
3800 | priority |= GFP_DMA; | 3800 | priority |= GFP_DMA; |
3801 | 3801 | ||
3802 | if (STbuffer->cleared) | ||
3803 | priority |= __GFP_ZERO; | ||
3804 | |||
3802 | if (STbuffer->frp_segs) { | 3805 | if (STbuffer->frp_segs) { |
3803 | b_size = STbuffer->frp[0].length; | 3806 | order = STbuffer->map_data.page_order; |
3804 | order = get_order(b_size); | 3807 | b_size = PAGE_SIZE << order; |
3805 | } else { | 3808 | } else { |
3806 | for (b_size = PAGE_SIZE, order = 0; | 3809 | for (b_size = PAGE_SIZE, order = 0; |
3807 | order <= 6 && b_size < new_size; order++, b_size *= 2) | 3810 | order <= 6 && b_size < new_size; order++, b_size *= 2) |
@@ -3810,22 +3813,22 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm | |||
3810 | 3813 | ||
3811 | for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; | 3814 | for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; |
3812 | segs < max_segs && got < new_size;) { | 3815 | segs < max_segs && got < new_size;) { |
3813 | STbuffer->frp[segs].page = alloc_pages(priority, order); | 3816 | struct page *page; |
3814 | if (STbuffer->frp[segs].page == NULL) { | 3817 | |
3818 | page = alloc_pages(priority, order); | ||
3819 | if (!page) { | ||
3815 | DEB(STbuffer->buffer_size = got); | 3820 | DEB(STbuffer->buffer_size = got); |
3816 | normalize_buffer(STbuffer); | 3821 | normalize_buffer(STbuffer); |
3817 | return 0; | 3822 | return 0; |
3818 | } | 3823 | } |
3819 | STbuffer->frp[segs].length = b_size; | 3824 | |
3820 | STbuffer->frp_segs += 1; | 3825 | STbuffer->frp_segs += 1; |
3821 | got += b_size; | 3826 | got += b_size; |
3822 | STbuffer->buffer_size = got; | 3827 | STbuffer->buffer_size = got; |
3823 | if (STbuffer->cleared) | 3828 | STbuffer->reserved_pages[segs] = page; |
3824 | memset(page_address(STbuffer->frp[segs].page), 0, b_size); | ||
3825 | STbuffer->reserved_pages[segs] = STbuffer->frp[segs].page; | ||
3826 | segs++; | 3829 | segs++; |
3827 | } | 3830 | } |
3828 | STbuffer->b_data = page_address(STbuffer->frp[0].page); | 3831 | STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); |
3829 | STbuffer->map_data.page_order = order; | 3832 | STbuffer->map_data.page_order = order; |
3830 | 3833 | ||
3831 | return 1; | 3834 | return 1; |
@@ -3838,7 +3841,8 @@ static void clear_buffer(struct st_buffer * st_bp) | |||
3838 | int i; | 3841 | int i; |
3839 | 3842 | ||
3840 | for (i=0; i < st_bp->frp_segs; i++) | 3843 | for (i=0; i < st_bp->frp_segs; i++) |
3841 | memset(page_address(st_bp->frp[i].page), 0, st_bp->frp[i].length); | 3844 | memset(page_address(st_bp->reserved_pages[i]), 0, |
3845 | PAGE_SIZE << st_bp->map_data.page_order); | ||
3842 | st_bp->cleared = 1; | 3846 | st_bp->cleared = 1; |
3843 | } | 3847 | } |
3844 | 3848 | ||
@@ -3846,12 +3850,11 @@ static void clear_buffer(struct st_buffer * st_bp) | |||
3846 | /* Release the extra buffer */ | 3850 | /* Release the extra buffer */ |
3847 | static void normalize_buffer(struct st_buffer * STbuffer) | 3851 | static void normalize_buffer(struct st_buffer * STbuffer) |
3848 | { | 3852 | { |
3849 | int i, order; | 3853 | int i, order = STbuffer->map_data.page_order; |
3850 | 3854 | ||
3851 | for (i = STbuffer->orig_frp_segs; i < STbuffer->frp_segs; i++) { | 3855 | for (i = STbuffer->orig_frp_segs; i < STbuffer->frp_segs; i++) { |
3852 | order = get_order(STbuffer->frp[i].length); | 3856 | __free_pages(STbuffer->reserved_pages[i], order); |
3853 | __free_pages(STbuffer->frp[i].page, order); | 3857 | STbuffer->buffer_size -= (PAGE_SIZE << order); |
3854 | STbuffer->buffer_size -= STbuffer->frp[i].length; | ||
3855 | } | 3858 | } |
3856 | STbuffer->frp_segs = STbuffer->orig_frp_segs; | 3859 | STbuffer->frp_segs = STbuffer->orig_frp_segs; |
3857 | STbuffer->frp_sg_current = 0; | 3860 | STbuffer->frp_sg_current = 0; |
@@ -3866,18 +3869,19 @@ static void normalize_buffer(struct st_buffer * STbuffer) | |||
3866 | static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) | 3869 | static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) |
3867 | { | 3870 | { |
3868 | int i, cnt, res, offset; | 3871 | int i, cnt, res, offset; |
3872 | int length = PAGE_SIZE << st_bp->map_data.page_order; | ||
3869 | 3873 | ||
3870 | for (i = 0, offset = st_bp->buffer_bytes; | 3874 | for (i = 0, offset = st_bp->buffer_bytes; |
3871 | i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++) | 3875 | i < st_bp->frp_segs && offset >= length; i++) |
3872 | offset -= st_bp->frp[i].length; | 3876 | offset -= length; |
3873 | if (i == st_bp->frp_segs) { /* Should never happen */ | 3877 | if (i == st_bp->frp_segs) { /* Should never happen */ |
3874 | printk(KERN_WARNING "st: append_to_buffer offset overflow.\n"); | 3878 | printk(KERN_WARNING "st: append_to_buffer offset overflow.\n"); |
3875 | return (-EIO); | 3879 | return (-EIO); |
3876 | } | 3880 | } |
3877 | for (; i < st_bp->frp_segs && do_count > 0; i++) { | 3881 | for (; i < st_bp->frp_segs && do_count > 0; i++) { |
3878 | cnt = st_bp->frp[i].length - offset < do_count ? | 3882 | struct page *page = st_bp->reserved_pages[i]; |
3879 | st_bp->frp[i].length - offset : do_count; | 3883 | cnt = length - offset < do_count ? length - offset : do_count; |
3880 | res = copy_from_user(page_address(st_bp->frp[i].page) + offset, ubp, cnt); | 3884 | res = copy_from_user(page_address(page) + offset, ubp, cnt); |
3881 | if (res) | 3885 | if (res) |
3882 | return (-EFAULT); | 3886 | return (-EFAULT); |
3883 | do_count -= cnt; | 3887 | do_count -= cnt; |
@@ -3897,18 +3901,19 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in | |||
3897 | static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) | 3901 | static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) |
3898 | { | 3902 | { |
3899 | int i, cnt, res, offset; | 3903 | int i, cnt, res, offset; |
3904 | int length = PAGE_SIZE << st_bp->map_data.page_order; | ||
3900 | 3905 | ||
3901 | for (i = 0, offset = st_bp->read_pointer; | 3906 | for (i = 0, offset = st_bp->read_pointer; |
3902 | i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++) | 3907 | i < st_bp->frp_segs && offset >= length; i++) |
3903 | offset -= st_bp->frp[i].length; | 3908 | offset -= length; |
3904 | if (i == st_bp->frp_segs) { /* Should never happen */ | 3909 | if (i == st_bp->frp_segs) { /* Should never happen */ |
3905 | printk(KERN_WARNING "st: from_buffer offset overflow.\n"); | 3910 | printk(KERN_WARNING "st: from_buffer offset overflow.\n"); |
3906 | return (-EIO); | 3911 | return (-EIO); |
3907 | } | 3912 | } |
3908 | for (; i < st_bp->frp_segs && do_count > 0; i++) { | 3913 | for (; i < st_bp->frp_segs && do_count > 0; i++) { |
3909 | cnt = st_bp->frp[i].length - offset < do_count ? | 3914 | struct page *page = st_bp->reserved_pages[i]; |
3910 | st_bp->frp[i].length - offset : do_count; | 3915 | cnt = length - offset < do_count ? length - offset : do_count; |
3911 | res = copy_to_user(ubp, page_address(st_bp->frp[i].page) + offset, cnt); | 3916 | res = copy_to_user(ubp, page_address(page) + offset, cnt); |
3912 | if (res) | 3917 | if (res) |
3913 | return (-EFAULT); | 3918 | return (-EFAULT); |
3914 | do_count -= cnt; | 3919 | do_count -= cnt; |
@@ -3929,6 +3934,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset) | |||
3929 | { | 3934 | { |
3930 | int src_seg, dst_seg, src_offset = 0, dst_offset; | 3935 | int src_seg, dst_seg, src_offset = 0, dst_offset; |
3931 | int count, total; | 3936 | int count, total; |
3937 | int length = PAGE_SIZE << st_bp->map_data.page_order; | ||
3932 | 3938 | ||
3933 | if (offset == 0) | 3939 | if (offset == 0) |
3934 | return; | 3940 | return; |
@@ -3936,24 +3942,26 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset) | |||
3936 | total=st_bp->buffer_bytes - offset; | 3942 | total=st_bp->buffer_bytes - offset; |
3937 | for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) { | 3943 | for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) { |
3938 | src_offset = offset; | 3944 | src_offset = offset; |
3939 | if (src_offset < st_bp->frp[src_seg].length) | 3945 | if (src_offset < length) |
3940 | break; | 3946 | break; |
3941 | offset -= st_bp->frp[src_seg].length; | 3947 | offset -= length; |
3942 | } | 3948 | } |
3943 | 3949 | ||
3944 | st_bp->buffer_bytes = st_bp->read_pointer = total; | 3950 | st_bp->buffer_bytes = st_bp->read_pointer = total; |
3945 | for (dst_seg=dst_offset=0; total > 0; ) { | 3951 | for (dst_seg=dst_offset=0; total > 0; ) { |
3946 | count = min(st_bp->frp[dst_seg].length - dst_offset, | 3952 | struct page *dpage = st_bp->reserved_pages[dst_seg]; |
3947 | st_bp->frp[src_seg].length - src_offset); | 3953 | struct page *spage = st_bp->reserved_pages[src_seg]; |
3948 | memmove(page_address(st_bp->frp[dst_seg].page) + dst_offset, | 3954 | |
3949 | page_address(st_bp->frp[src_seg].page) + src_offset, count); | 3955 | count = min(length - dst_offset, length - src_offset); |
3956 | memmove(page_address(dpage) + dst_offset, | ||
3957 | page_address(spage) + src_offset, count); | ||
3950 | src_offset += count; | 3958 | src_offset += count; |
3951 | if (src_offset >= st_bp->frp[src_seg].length) { | 3959 | if (src_offset >= length) { |
3952 | src_seg++; | 3960 | src_seg++; |
3953 | src_offset = 0; | 3961 | src_offset = 0; |
3954 | } | 3962 | } |
3955 | dst_offset += count; | 3963 | dst_offset += count; |
3956 | if (dst_offset >= st_bp->frp[dst_seg].length) { | 3964 | if (dst_offset >= length) { |
3957 | dst_seg++; | 3965 | dst_seg++; |
3958 | dst_offset = 0; | 3966 | dst_offset = 0; |
3959 | } | 3967 | } |