diff options
author | Mike Christie <michaelc@cs.wisc.edu> | 2005-11-08 05:06:41 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-12-14 22:15:45 -0500 |
commit | d6b10348f9397943eb968419a2b7f08895e38472 (patch) | |
tree | 9721f49b0cc7a6c4b41d7ef77b11bdfe0d74faea /drivers/scsi/sg.c | |
parent | aa7b5cd750c766f66a92c9f78ba176bc77512b7e (diff) |
[SCSI] convert sg to scsi_execute_async
Convert sg to always send scatterlists, and kill scsi_request usage.
TODO:
- move DIO code to common place or make block layers usable for ULDs.
- move buffer allocation code to common place for all ULDs to use. And
make buffer allocation code obey all queue limits so we can find
out about problems before calling scsi_execute_async. Currently, sg.c
could allocate a buffer that is too large, and send the request
to scsi_execute_async. scsi_execute_async will then check it against
all the queue limits and return a failure in this case. It would nicer
to know about the queue limit violation right away.
- move indirect (copy_to/from_user) paths commone place or make block
layers usable for ULDs.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r-- | drivers/scsi/sg.c | 686 |
1 files changed, 259 insertions, 427 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index b55c2a8a547c..221e96e2620a 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -104,8 +104,6 @@ static int sg_allow_dio = SG_ALLOW_DIO_DEF; | |||
104 | static int sg_add(struct class_device *, struct class_interface *); | 104 | static int sg_add(struct class_device *, struct class_interface *); |
105 | static void sg_remove(struct class_device *, struct class_interface *); | 105 | static void sg_remove(struct class_device *, struct class_interface *); |
106 | 106 | ||
107 | static Scsi_Request *dummy_cmdp; /* only used for sizeof */ | ||
108 | |||
109 | static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock | 107 | static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock |
110 | file descriptor list for device */ | 108 | file descriptor list for device */ |
111 | 109 | ||
@@ -119,7 +117,7 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ | |||
119 | unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */ | 117 | unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */ |
120 | unsigned bufflen; /* Size of (aggregate) data buffer */ | 118 | unsigned bufflen; /* Size of (aggregate) data buffer */ |
121 | unsigned b_malloc_len; /* actual len malloc'ed in buffer */ | 119 | unsigned b_malloc_len; /* actual len malloc'ed in buffer */ |
122 | void *buffer; /* Data buffer or scatter list (k_use_sg>0) */ | 120 | struct scatterlist *buffer;/* scatter list */ |
123 | char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ | 121 | char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ |
124 | unsigned char cmd_opcode; /* first byte of command */ | 122 | unsigned char cmd_opcode; /* first byte of command */ |
125 | } Sg_scatter_hold; | 123 | } Sg_scatter_hold; |
@@ -128,12 +126,11 @@ struct sg_device; /* forward declarations */ | |||
128 | struct sg_fd; | 126 | struct sg_fd; |
129 | 127 | ||
130 | typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ | 128 | typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ |
131 | Scsi_Request *my_cmdp; /* != 0 when request with lower levels */ | ||
132 | struct sg_request *nextrp; /* NULL -> tail request (slist) */ | 129 | struct sg_request *nextrp; /* NULL -> tail request (slist) */ |
133 | struct sg_fd *parentfp; /* NULL -> not in use */ | 130 | struct sg_fd *parentfp; /* NULL -> not in use */ |
134 | Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ | 131 | Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ |
135 | sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ | 132 | sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ |
136 | unsigned char sense_b[sizeof (dummy_cmdp->sr_sense_buffer)]; | 133 | unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; |
137 | char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ | 134 | char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ |
138 | char orphan; /* 1 -> drop on sight, 0 -> normal */ | 135 | char orphan; /* 1 -> drop on sight, 0 -> normal */ |
139 | char sg_io_owned; /* 1 -> packet belongs to SG_IO */ | 136 | char sg_io_owned; /* 1 -> packet belongs to SG_IO */ |
@@ -174,7 +171,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */ | |||
174 | } Sg_device; | 171 | } Sg_device; |
175 | 172 | ||
176 | static int sg_fasync(int fd, struct file *filp, int mode); | 173 | static int sg_fasync(int fd, struct file *filp, int mode); |
177 | static void sg_cmd_done(Scsi_Cmnd * SCpnt); /* tasklet or soft irq callback */ | 174 | /* tasklet or soft irq callback */ |
175 | static void sg_cmd_done(void *data, char *sense, int result, int resid); | ||
178 | static int sg_start_req(Sg_request * srp); | 176 | static int sg_start_req(Sg_request * srp); |
179 | static void sg_finish_rem_req(Sg_request * srp); | 177 | static void sg_finish_rem_req(Sg_request * srp); |
180 | static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); | 178 | static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); |
@@ -195,8 +193,8 @@ static void sg_remove_scat(Sg_scatter_hold * schp); | |||
195 | static void sg_build_reserve(Sg_fd * sfp, int req_size); | 193 | static void sg_build_reserve(Sg_fd * sfp, int req_size); |
196 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); | 194 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); |
197 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); | 195 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); |
198 | static char *sg_page_malloc(int rqSz, int lowDma, int *retSzp); | 196 | static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp); |
199 | static void sg_page_free(char *buff, int size); | 197 | static void sg_page_free(struct page *page, int size); |
200 | static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); | 198 | static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); |
201 | static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); | 199 | static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); |
202 | static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); | 200 | static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); |
@@ -207,7 +205,6 @@ static int sg_res_in_use(Sg_fd * sfp); | |||
207 | static int sg_allow_access(unsigned char opcode, char dev_type); | 205 | static int sg_allow_access(unsigned char opcode, char dev_type); |
208 | static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); | 206 | static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); |
209 | static Sg_device *sg_get_dev(int dev); | 207 | static Sg_device *sg_get_dev(int dev); |
210 | static inline unsigned char *sg_scatg2virt(const struct scatterlist *sclp); | ||
211 | #ifdef CONFIG_SCSI_PROC_FS | 208 | #ifdef CONFIG_SCSI_PROC_FS |
212 | static int sg_last_dev(void); | 209 | static int sg_last_dev(void); |
213 | #endif | 210 | #endif |
@@ -226,6 +223,7 @@ sg_open(struct inode *inode, struct file *filp) | |||
226 | { | 223 | { |
227 | int dev = iminor(inode); | 224 | int dev = iminor(inode); |
228 | int flags = filp->f_flags; | 225 | int flags = filp->f_flags; |
226 | struct request_queue *q; | ||
229 | Sg_device *sdp; | 227 | Sg_device *sdp; |
230 | Sg_fd *sfp; | 228 | Sg_fd *sfp; |
231 | int res; | 229 | int res; |
@@ -287,7 +285,9 @@ sg_open(struct inode *inode, struct file *filp) | |||
287 | } | 285 | } |
288 | if (!sdp->headfp) { /* no existing opens on this device */ | 286 | if (!sdp->headfp) { /* no existing opens on this device */ |
289 | sdp->sgdebug = 0; | 287 | sdp->sgdebug = 0; |
290 | sdp->sg_tablesize = sdp->device->host->sg_tablesize; | 288 | q = sdp->device->request_queue; |
289 | sdp->sg_tablesize = min(q->max_hw_segments, | ||
290 | q->max_phys_segments); | ||
291 | } | 291 | } |
292 | if ((sfp = sg_add_sfp(sdp, dev))) | 292 | if ((sfp = sg_add_sfp(sdp, dev))) |
293 | filp->private_data = sfp; | 293 | filp->private_data = sfp; |
@@ -340,6 +340,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) | |||
340 | return -ENXIO; | 340 | return -ENXIO; |
341 | SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n", | 341 | SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n", |
342 | sdp->disk->disk_name, (int) count)); | 342 | sdp->disk->disk_name, (int) count)); |
343 | |||
343 | if (!access_ok(VERIFY_WRITE, buf, count)) | 344 | if (!access_ok(VERIFY_WRITE, buf, count)) |
344 | return -EFAULT; | 345 | return -EFAULT; |
345 | if (sfp->force_packid && (count >= SZ_SG_HEADER)) { | 346 | if (sfp->force_packid && (count >= SZ_SG_HEADER)) { |
@@ -491,7 +492,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) | |||
491 | if ((hp->mx_sb_len > 0) && hp->sbp) { | 492 | if ((hp->mx_sb_len > 0) && hp->sbp) { |
492 | if ((CHECK_CONDITION & hp->masked_status) || | 493 | if ((CHECK_CONDITION & hp->masked_status) || |
493 | (DRIVER_SENSE & hp->driver_status)) { | 494 | (DRIVER_SENSE & hp->driver_status)) { |
494 | int sb_len = sizeof (dummy_cmdp->sr_sense_buffer); | 495 | int sb_len = SCSI_SENSE_BUFFERSIZE; |
495 | sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; | 496 | sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; |
496 | len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ | 497 | len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ |
497 | len = (len > sb_len) ? sb_len : len; | 498 | len = (len > sb_len) ? sb_len : len; |
@@ -525,7 +526,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
525 | Sg_request *srp; | 526 | Sg_request *srp; |
526 | struct sg_header old_hdr; | 527 | struct sg_header old_hdr; |
527 | sg_io_hdr_t *hp; | 528 | sg_io_hdr_t *hp; |
528 | unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)]; | 529 | unsigned char cmnd[MAX_COMMAND_SIZE]; |
529 | 530 | ||
530 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) | 531 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
531 | return -ENXIO; | 532 | return -ENXIO; |
@@ -624,7 +625,7 @@ sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, | |||
624 | int k; | 625 | int k; |
625 | Sg_request *srp; | 626 | Sg_request *srp; |
626 | sg_io_hdr_t *hp; | 627 | sg_io_hdr_t *hp; |
627 | unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)]; | 628 | unsigned char cmnd[MAX_COMMAND_SIZE]; |
628 | int timeout; | 629 | int timeout; |
629 | unsigned long ul_timeout; | 630 | unsigned long ul_timeout; |
630 | 631 | ||
@@ -692,11 +693,9 @@ static int | |||
692 | sg_common_write(Sg_fd * sfp, Sg_request * srp, | 693 | sg_common_write(Sg_fd * sfp, Sg_request * srp, |
693 | unsigned char *cmnd, int timeout, int blocking) | 694 | unsigned char *cmnd, int timeout, int blocking) |
694 | { | 695 | { |
695 | int k; | 696 | int k, data_dir; |
696 | Scsi_Request *SRpnt; | ||
697 | Sg_device *sdp = sfp->parentdp; | 697 | Sg_device *sdp = sfp->parentdp; |
698 | sg_io_hdr_t *hp = &srp->header; | 698 | sg_io_hdr_t *hp = &srp->header; |
699 | request_queue_t *q; | ||
700 | 699 | ||
701 | srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ | 700 | srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ |
702 | hp->status = 0; | 701 | hp->status = 0; |
@@ -723,51 +722,36 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
723 | sg_finish_rem_req(srp); | 722 | sg_finish_rem_req(srp); |
724 | return -ENODEV; | 723 | return -ENODEV; |
725 | } | 724 | } |
726 | SRpnt = scsi_allocate_request(sdp->device, GFP_ATOMIC); | ||
727 | if (SRpnt == NULL) { | ||
728 | SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n")); | ||
729 | sg_finish_rem_req(srp); | ||
730 | return -ENOMEM; | ||
731 | } | ||
732 | 725 | ||
733 | srp->my_cmdp = SRpnt; | ||
734 | q = SRpnt->sr_device->request_queue; | ||
735 | SRpnt->sr_request->rq_disk = sdp->disk; | ||
736 | SRpnt->sr_sense_buffer[0] = 0; | ||
737 | SRpnt->sr_cmd_len = hp->cmd_len; | ||
738 | SRpnt->sr_use_sg = srp->data.k_use_sg; | ||
739 | SRpnt->sr_sglist_len = srp->data.sglist_len; | ||
740 | SRpnt->sr_bufflen = srp->data.bufflen; | ||
741 | SRpnt->sr_underflow = 0; | ||
742 | SRpnt->sr_buffer = srp->data.buffer; | ||
743 | switch (hp->dxfer_direction) { | 726 | switch (hp->dxfer_direction) { |
744 | case SG_DXFER_TO_FROM_DEV: | 727 | case SG_DXFER_TO_FROM_DEV: |
745 | case SG_DXFER_FROM_DEV: | 728 | case SG_DXFER_FROM_DEV: |
746 | SRpnt->sr_data_direction = DMA_FROM_DEVICE; | 729 | data_dir = DMA_FROM_DEVICE; |
747 | break; | 730 | break; |
748 | case SG_DXFER_TO_DEV: | 731 | case SG_DXFER_TO_DEV: |
749 | SRpnt->sr_data_direction = DMA_TO_DEVICE; | 732 | data_dir = DMA_TO_DEVICE; |
750 | break; | 733 | break; |
751 | case SG_DXFER_UNKNOWN: | 734 | case SG_DXFER_UNKNOWN: |
752 | SRpnt->sr_data_direction = DMA_BIDIRECTIONAL; | 735 | data_dir = DMA_BIDIRECTIONAL; |
753 | break; | 736 | break; |
754 | default: | 737 | default: |
755 | SRpnt->sr_data_direction = DMA_NONE; | 738 | data_dir = DMA_NONE; |
756 | break; | 739 | break; |
757 | } | 740 | } |
758 | SRpnt->upper_private_data = srp; | ||
759 | srp->data.k_use_sg = 0; | ||
760 | srp->data.sglist_len = 0; | ||
761 | srp->data.bufflen = 0; | ||
762 | srp->data.buffer = NULL; | ||
763 | hp->duration = jiffies_to_msecs(jiffies); | 741 | hp->duration = jiffies_to_msecs(jiffies); |
764 | /* Now send everything of to mid-level. The next time we hear about this | 742 | /* Now send everything of to mid-level. The next time we hear about this |
765 | packet is when sg_cmd_done() is called (i.e. a callback). */ | 743 | packet is when sg_cmd_done() is called (i.e. a callback). */ |
766 | scsi_do_req(SRpnt, (void *) cmnd, | 744 | if (scsi_execute_async(sdp->device, cmnd, data_dir, srp->data.buffer, |
767 | (void *) SRpnt->sr_buffer, hp->dxfer_len, | 745 | hp->dxfer_len, srp->data.k_use_sg, timeout, |
768 | sg_cmd_done, timeout, SG_DEFAULT_RETRIES); | 746 | SG_DEFAULT_RETRIES, srp, sg_cmd_done, |
769 | /* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */ | 747 | GFP_ATOMIC)) { |
770 | return 0; | 748 | SCSI_LOG_TIMEOUT(1, printk("sg_write: scsi_execute_async failed\n")); |
749 | /* | ||
750 | * most likely out of mem, but could also be a bad map | ||
751 | */ | ||
752 | return -ENOMEM; | ||
753 | } else | ||
754 | return 0; | ||
771 | } | 755 | } |
772 | 756 | ||
773 | static int | 757 | static int |
@@ -1156,45 +1140,22 @@ sg_fasync(int fd, struct file *filp, int mode) | |||
1156 | return (retval < 0) ? retval : 0; | 1140 | return (retval < 0) ? retval : 0; |
1157 | } | 1141 | } |
1158 | 1142 | ||
1159 | static inline unsigned char * | ||
1160 | sg_scatg2virt(const struct scatterlist *sclp) | ||
1161 | { | ||
1162 | return (sclp && sclp->page) ? | ||
1163 | (unsigned char *) page_address(sclp->page) + sclp->offset : NULL; | ||
1164 | } | ||
1165 | |||
1166 | /* When startFinish==1 increments page counts for pages other than the | 1143 | /* When startFinish==1 increments page counts for pages other than the |
1167 | first of scatter gather elements obtained from __get_free_pages(). | 1144 | first of scatter gather elements obtained from alloc_pages(). |
1168 | When startFinish==0 decrements ... */ | 1145 | When startFinish==0 decrements ... */ |
1169 | static void | 1146 | static void |
1170 | sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish) | 1147 | sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish) |
1171 | { | 1148 | { |
1172 | void *page_ptr; | 1149 | struct scatterlist *sg = rsv_schp->buffer; |
1173 | struct page *page; | 1150 | struct page *page; |
1174 | int k, m; | 1151 | int k, m; |
1175 | 1152 | ||
1176 | SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n", | 1153 | SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n", |
1177 | startFinish, rsv_schp->k_use_sg)); | 1154 | startFinish, rsv_schp->k_use_sg)); |
1178 | /* N.B. correction _not_ applied to base page of each allocation */ | 1155 | /* N.B. correction _not_ applied to base page of each allocation */ |
1179 | if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ | 1156 | for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { |
1180 | struct scatterlist *sclp = rsv_schp->buffer; | 1157 | for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) { |
1181 | 1158 | page = sg->page; | |
1182 | for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) { | ||
1183 | for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) { | ||
1184 | page_ptr = sg_scatg2virt(sclp) + m; | ||
1185 | page = virt_to_page(page_ptr); | ||
1186 | if (startFinish) | ||
1187 | get_page(page); | ||
1188 | else { | ||
1189 | if (page_count(page) > 0) | ||
1190 | __put_page(page); | ||
1191 | } | ||
1192 | } | ||
1193 | } | ||
1194 | } else { /* reserve buffer is just a single allocation */ | ||
1195 | for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) { | ||
1196 | page_ptr = (unsigned char *) rsv_schp->buffer + m; | ||
1197 | page = virt_to_page(page_ptr); | ||
1198 | if (startFinish) | 1159 | if (startFinish) |
1199 | get_page(page); | 1160 | get_page(page); |
1200 | else { | 1161 | else { |
@@ -1210,9 +1171,10 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) | |||
1210 | { | 1171 | { |
1211 | Sg_fd *sfp; | 1172 | Sg_fd *sfp; |
1212 | struct page *page = NOPAGE_SIGBUS; | 1173 | struct page *page = NOPAGE_SIGBUS; |
1213 | void *page_ptr = NULL; | 1174 | unsigned long offset, len, sa; |
1214 | unsigned long offset; | ||
1215 | Sg_scatter_hold *rsv_schp; | 1175 | Sg_scatter_hold *rsv_schp; |
1176 | struct scatterlist *sg; | ||
1177 | int k; | ||
1216 | 1178 | ||
1217 | if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) | 1179 | if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) |
1218 | return page; | 1180 | return page; |
@@ -1222,30 +1184,21 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) | |||
1222 | return page; | 1184 | return page; |
1223 | SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n", | 1185 | SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n", |
1224 | offset, rsv_schp->k_use_sg)); | 1186 | offset, rsv_schp->k_use_sg)); |
1225 | if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ | 1187 | sg = rsv_schp->buffer; |
1226 | int k; | 1188 | sa = vma->vm_start; |
1227 | unsigned long sa = vma->vm_start; | 1189 | for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); |
1228 | unsigned long len; | 1190 | ++k, ++sg) { |
1229 | struct scatterlist *sclp = rsv_schp->buffer; | 1191 | len = vma->vm_end - sa; |
1230 | 1192 | len = (len < sg->length) ? len : sg->length; | |
1231 | for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); | 1193 | if (offset < len) { |
1232 | ++k, ++sclp) { | 1194 | page = sg->page; |
1233 | len = vma->vm_end - sa; | 1195 | get_page(page); /* increment page count */ |
1234 | len = (len < sclp->length) ? len : sclp->length; | 1196 | break; |
1235 | if (offset < len) { | ||
1236 | page_ptr = sg_scatg2virt(sclp) + offset; | ||
1237 | page = virt_to_page(page_ptr); | ||
1238 | get_page(page); /* increment page count */ | ||
1239 | break; | ||
1240 | } | ||
1241 | sa += len; | ||
1242 | offset -= len; | ||
1243 | } | 1197 | } |
1244 | } else { /* reserve buffer is just a single allocation */ | 1198 | sa += len; |
1245 | page_ptr = (unsigned char *) rsv_schp->buffer + offset; | 1199 | offset -= len; |
1246 | page = virt_to_page(page_ptr); | ||
1247 | get_page(page); /* increment page count */ | ||
1248 | } | 1200 | } |
1201 | |||
1249 | if (type) | 1202 | if (type) |
1250 | *type = VM_FAULT_MINOR; | 1203 | *type = VM_FAULT_MINOR; |
1251 | return page; | 1204 | return page; |
@@ -1259,8 +1212,10 @@ static int | |||
1259 | sg_mmap(struct file *filp, struct vm_area_struct *vma) | 1212 | sg_mmap(struct file *filp, struct vm_area_struct *vma) |
1260 | { | 1213 | { |
1261 | Sg_fd *sfp; | 1214 | Sg_fd *sfp; |
1262 | unsigned long req_sz; | 1215 | unsigned long req_sz, len, sa; |
1263 | Sg_scatter_hold *rsv_schp; | 1216 | Sg_scatter_hold *rsv_schp; |
1217 | int k; | ||
1218 | struct scatterlist *sg; | ||
1264 | 1219 | ||
1265 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) | 1220 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) |
1266 | return -ENXIO; | 1221 | return -ENXIO; |
@@ -1273,24 +1228,15 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
1273 | if (req_sz > rsv_schp->bufflen) | 1228 | if (req_sz > rsv_schp->bufflen) |
1274 | return -ENOMEM; /* cannot map more than reserved buffer */ | 1229 | return -ENOMEM; /* cannot map more than reserved buffer */ |
1275 | 1230 | ||
1276 | if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */ | 1231 | sa = vma->vm_start; |
1277 | int k; | 1232 | sg = rsv_schp->buffer; |
1278 | unsigned long sa = vma->vm_start; | 1233 | for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); |
1279 | unsigned long len; | 1234 | ++k, ++sg) { |
1280 | struct scatterlist *sclp = rsv_schp->buffer; | 1235 | len = vma->vm_end - sa; |
1281 | 1236 | len = (len < sg->length) ? len : sg->length; | |
1282 | for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); | 1237 | sa += len; |
1283 | ++k, ++sclp) { | ||
1284 | if (0 != sclp->offset) | ||
1285 | return -EFAULT; /* non page aligned memory ?? */ | ||
1286 | len = vma->vm_end - sa; | ||
1287 | len = (len < sclp->length) ? len : sclp->length; | ||
1288 | sa += len; | ||
1289 | } | ||
1290 | } else { /* reserve buffer is just a single allocation */ | ||
1291 | if ((unsigned long) rsv_schp->buffer & (PAGE_SIZE - 1)) | ||
1292 | return -EFAULT; /* non page aligned memory ?? */ | ||
1293 | } | 1238 | } |
1239 | |||
1294 | if (0 == sfp->mmap_called) { | 1240 | if (0 == sfp->mmap_called) { |
1295 | sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */ | 1241 | sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */ |
1296 | sfp->mmap_called = 1; | 1242 | sfp->mmap_called = 1; |
@@ -1304,21 +1250,16 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
1304 | /* This function is a "bottom half" handler that is called by the | 1250 | /* This function is a "bottom half" handler that is called by the |
1305 | * mid level when a command is completed (or has failed). */ | 1251 | * mid level when a command is completed (or has failed). */ |
1306 | static void | 1252 | static void |
1307 | sg_cmd_done(Scsi_Cmnd * SCpnt) | 1253 | sg_cmd_done(void *data, char *sense, int result, int resid) |
1308 | { | 1254 | { |
1309 | Scsi_Request *SRpnt = NULL; | 1255 | Sg_request *srp = data; |
1310 | Sg_device *sdp = NULL; | 1256 | Sg_device *sdp = NULL; |
1311 | Sg_fd *sfp; | 1257 | Sg_fd *sfp; |
1312 | Sg_request *srp = NULL; | ||
1313 | unsigned long iflags; | 1258 | unsigned long iflags; |
1314 | unsigned int ms; | 1259 | unsigned int ms; |
1315 | 1260 | ||
1316 | if (SCpnt && (SRpnt = SCpnt->sc_request)) | ||
1317 | srp = (Sg_request *) SRpnt->upper_private_data; | ||
1318 | if (NULL == srp) { | 1261 | if (NULL == srp) { |
1319 | printk(KERN_ERR "sg_cmd_done: NULL request\n"); | 1262 | printk(KERN_ERR "sg_cmd_done: NULL request\n"); |
1320 | if (SRpnt) | ||
1321 | scsi_release_request(SRpnt); | ||
1322 | return; | 1263 | return; |
1323 | } | 1264 | } |
1324 | sfp = srp->parentfp; | 1265 | sfp = srp->parentfp; |
@@ -1326,49 +1267,34 @@ sg_cmd_done(Scsi_Cmnd * SCpnt) | |||
1326 | sdp = sfp->parentdp; | 1267 | sdp = sfp->parentdp; |
1327 | if ((NULL == sdp) || sdp->detached) { | 1268 | if ((NULL == sdp) || sdp->detached) { |
1328 | printk(KERN_INFO "sg_cmd_done: device detached\n"); | 1269 | printk(KERN_INFO "sg_cmd_done: device detached\n"); |
1329 | scsi_release_request(SRpnt); | ||
1330 | return; | 1270 | return; |
1331 | } | 1271 | } |
1332 | 1272 | ||
1333 | /* First transfer ownership of data buffers to sg_device object. */ | ||
1334 | srp->data.k_use_sg = SRpnt->sr_use_sg; | ||
1335 | srp->data.sglist_len = SRpnt->sr_sglist_len; | ||
1336 | srp->data.bufflen = SRpnt->sr_bufflen; | ||
1337 | srp->data.buffer = SRpnt->sr_buffer; | ||
1338 | /* now clear out request structure */ | ||
1339 | SRpnt->sr_use_sg = 0; | ||
1340 | SRpnt->sr_sglist_len = 0; | ||
1341 | SRpnt->sr_bufflen = 0; | ||
1342 | SRpnt->sr_buffer = NULL; | ||
1343 | SRpnt->sr_underflow = 0; | ||
1344 | SRpnt->sr_request->rq_disk = NULL; /* "sg" _disowns_ request blk */ | ||
1345 | |||
1346 | srp->my_cmdp = NULL; | ||
1347 | 1273 | ||
1348 | SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", | 1274 | SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", |
1349 | sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result)); | 1275 | sdp->disk->disk_name, srp->header.pack_id, result)); |
1350 | srp->header.resid = SCpnt->resid; | 1276 | srp->header.resid = resid; |
1351 | ms = jiffies_to_msecs(jiffies); | 1277 | ms = jiffies_to_msecs(jiffies); |
1352 | srp->header.duration = (ms > srp->header.duration) ? | 1278 | srp->header.duration = (ms > srp->header.duration) ? |
1353 | (ms - srp->header.duration) : 0; | 1279 | (ms - srp->header.duration) : 0; |
1354 | if (0 != SRpnt->sr_result) { | 1280 | if (0 != result) { |
1355 | struct scsi_sense_hdr sshdr; | 1281 | struct scsi_sense_hdr sshdr; |
1356 | 1282 | ||
1357 | memcpy(srp->sense_b, SRpnt->sr_sense_buffer, | 1283 | memcpy(srp->sense_b, sense, sizeof (srp->sense_b)); |
1358 | sizeof (srp->sense_b)); | 1284 | srp->header.status = 0xff & result; |
1359 | srp->header.status = 0xff & SRpnt->sr_result; | 1285 | srp->header.masked_status = status_byte(result); |
1360 | srp->header.masked_status = status_byte(SRpnt->sr_result); | 1286 | srp->header.msg_status = msg_byte(result); |
1361 | srp->header.msg_status = msg_byte(SRpnt->sr_result); | 1287 | srp->header.host_status = host_byte(result); |
1362 | srp->header.host_status = host_byte(SRpnt->sr_result); | 1288 | srp->header.driver_status = driver_byte(result); |
1363 | srp->header.driver_status = driver_byte(SRpnt->sr_result); | ||
1364 | if ((sdp->sgdebug > 0) && | 1289 | if ((sdp->sgdebug > 0) && |
1365 | ((CHECK_CONDITION == srp->header.masked_status) || | 1290 | ((CHECK_CONDITION == srp->header.masked_status) || |
1366 | (COMMAND_TERMINATED == srp->header.masked_status))) | 1291 | (COMMAND_TERMINATED == srp->header.masked_status))) |
1367 | scsi_print_req_sense("sg_cmd_done", SRpnt); | 1292 | __scsi_print_sense("sg_cmd_done", sense, |
1293 | SCSI_SENSE_BUFFERSIZE); | ||
1368 | 1294 | ||
1369 | /* Following if statement is a patch supplied by Eric Youngdale */ | 1295 | /* Following if statement is a patch supplied by Eric Youngdale */ |
1370 | if (driver_byte(SRpnt->sr_result) != 0 | 1296 | if (driver_byte(result) != 0 |
1371 | && scsi_command_normalize_sense(SCpnt, &sshdr) | 1297 | && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) |
1372 | && !scsi_sense_is_deferred(&sshdr) | 1298 | && !scsi_sense_is_deferred(&sshdr) |
1373 | && sshdr.sense_key == UNIT_ATTENTION | 1299 | && sshdr.sense_key == UNIT_ATTENTION |
1374 | && sdp->device->removable) { | 1300 | && sdp->device->removable) { |
@@ -1379,8 +1305,6 @@ sg_cmd_done(Scsi_Cmnd * SCpnt) | |||
1379 | } | 1305 | } |
1380 | /* Rely on write phase to clean out srp status values, so no "else" */ | 1306 | /* Rely on write phase to clean out srp status values, so no "else" */ |
1381 | 1307 | ||
1382 | scsi_release_request(SRpnt); | ||
1383 | SRpnt = NULL; | ||
1384 | if (sfp->closed) { /* whoops this fd already released, cleanup */ | 1308 | if (sfp->closed) { /* whoops this fd already released, cleanup */ |
1385 | SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); | 1309 | SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); |
1386 | sg_finish_rem_req(srp); | 1310 | sg_finish_rem_req(srp); |
@@ -1431,6 +1355,7 @@ static int sg_sysfs_valid = 0; | |||
1431 | 1355 | ||
1432 | static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) | 1356 | static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) |
1433 | { | 1357 | { |
1358 | struct request_queue *q = scsidp->request_queue; | ||
1434 | Sg_device *sdp; | 1359 | Sg_device *sdp; |
1435 | unsigned long iflags; | 1360 | unsigned long iflags; |
1436 | void *old_sg_dev_arr = NULL; | 1361 | void *old_sg_dev_arr = NULL; |
@@ -1473,7 +1398,7 @@ static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) | |||
1473 | sdp->disk = disk; | 1398 | sdp->disk = disk; |
1474 | sdp->device = scsidp; | 1399 | sdp->device = scsidp; |
1475 | init_waitqueue_head(&sdp->o_excl_wait); | 1400 | init_waitqueue_head(&sdp->o_excl_wait); |
1476 | sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0; | 1401 | sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); |
1477 | 1402 | ||
1478 | sg_nr_dev++; | 1403 | sg_nr_dev++; |
1479 | sg_dev_arr[k] = sdp; | 1404 | sg_dev_arr[k] = sdp; |
@@ -1753,36 +1678,35 @@ sg_finish_rem_req(Sg_request * srp) | |||
1753 | static int | 1678 | static int |
1754 | sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) | 1679 | sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) |
1755 | { | 1680 | { |
1756 | int ret_sz; | 1681 | int sg_bufflen = tablesize * sizeof(struct scatterlist); |
1757 | int elem_sz = sizeof (struct scatterlist); | 1682 | unsigned int gfp_flags = GFP_ATOMIC | __GFP_NOWARN; |
1758 | int sg_bufflen = tablesize * elem_sz; | ||
1759 | int mx_sc_elems = tablesize; | ||
1760 | 1683 | ||
1761 | schp->buffer = sg_page_malloc(sg_bufflen, sfp->low_dma, &ret_sz); | 1684 | /* |
1685 | * TODO: test without low_dma, we should not need it since | ||
1686 | * the block layer will bounce the buffer for us | ||
1687 | * | ||
1688 | * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list. | ||
1689 | */ | ||
1690 | if (sfp->low_dma) | ||
1691 | gfp_flags |= GFP_DMA; | ||
1692 | schp->buffer = kzalloc(sg_bufflen, gfp_flags); | ||
1762 | if (!schp->buffer) | 1693 | if (!schp->buffer) |
1763 | return -ENOMEM; | 1694 | return -ENOMEM; |
1764 | else if (ret_sz != sg_bufflen) { | ||
1765 | sg_bufflen = ret_sz; | ||
1766 | mx_sc_elems = sg_bufflen / elem_sz; | ||
1767 | } | ||
1768 | schp->sglist_len = sg_bufflen; | 1695 | schp->sglist_len = sg_bufflen; |
1769 | memset(schp->buffer, 0, sg_bufflen); | 1696 | return tablesize; /* number of scat_gath elements allocated */ |
1770 | return mx_sc_elems; /* number of scat_gath elements allocated */ | ||
1771 | } | 1697 | } |
1772 | 1698 | ||
1773 | #ifdef SG_ALLOW_DIO_CODE | 1699 | #ifdef SG_ALLOW_DIO_CODE |
1774 | /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */ | 1700 | /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */ |
1775 | /* hopefully this generic code will moved to a library */ | 1701 | /* TODO: hopefully we can use the generic block layer code */ |
1776 | 1702 | ||
1777 | /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if | 1703 | /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if |
1778 | - mapping of all pages not successful | 1704 | - mapping of all pages not successful |
1779 | - any page is above max_pfn | ||
1780 | (i.e., either completely successful or fails) | 1705 | (i.e., either completely successful or fails) |
1781 | */ | 1706 | */ |
1782 | static int | 1707 | static int |
1783 | st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, | 1708 | st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, |
1784 | unsigned long uaddr, size_t count, int rw, | 1709 | unsigned long uaddr, size_t count, int rw) |
1785 | unsigned long max_pfn) | ||
1786 | { | 1710 | { |
1787 | unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1711 | unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1788 | unsigned long start = uaddr >> PAGE_SHIFT; | 1712 | unsigned long start = uaddr >> PAGE_SHIFT; |
@@ -1828,21 +1752,17 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, | |||
1828 | * probably wrong function for rw==WRITE | 1752 | * probably wrong function for rw==WRITE |
1829 | */ | 1753 | */ |
1830 | flush_dcache_page(pages[i]); | 1754 | flush_dcache_page(pages[i]); |
1831 | if (page_to_pfn(pages[i]) > max_pfn) | ||
1832 | goto out_unlock; | ||
1833 | /* ?? Is locking needed? I don't think so */ | 1755 | /* ?? Is locking needed? I don't think so */ |
1834 | /* if (TestSetPageLocked(pages[i])) | 1756 | /* if (TestSetPageLocked(pages[i])) |
1835 | goto out_unlock; */ | 1757 | goto out_unlock; */ |
1836 | } | 1758 | } |
1837 | 1759 | ||
1838 | /* Populate the scatter/gather list */ | 1760 | sgl[0].page = pages[0]; |
1839 | sgl[0].page = pages[0]; | ||
1840 | sgl[0].offset = uaddr & ~PAGE_MASK; | 1761 | sgl[0].offset = uaddr & ~PAGE_MASK; |
1841 | if (nr_pages > 1) { | 1762 | if (nr_pages > 1) { |
1842 | sgl[0].length = PAGE_SIZE - sgl[0].offset; | 1763 | sgl[0].length = PAGE_SIZE - sgl[0].offset; |
1843 | count -= sgl[0].length; | 1764 | count -= sgl[0].length; |
1844 | for (i=1; i < nr_pages ; i++) { | 1765 | for (i=1; i < nr_pages ; i++) { |
1845 | sgl[i].offset = 0; | ||
1846 | sgl[i].page = pages[i]; | 1766 | sgl[i].page = pages[i]; |
1847 | sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; | 1767 | sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; |
1848 | count -= PAGE_SIZE; | 1768 | count -= PAGE_SIZE; |
@@ -1855,10 +1775,6 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, | |||
1855 | kfree(pages); | 1775 | kfree(pages); |
1856 | return nr_pages; | 1776 | return nr_pages; |
1857 | 1777 | ||
1858 | out_unlock: | ||
1859 | /* for (j=0; j < i; j++) | ||
1860 | unlock_page(pages[j]); */ | ||
1861 | res = 0; | ||
1862 | out_unmap: | 1778 | out_unmap: |
1863 | if (res > 0) { | 1779 | if (res > 0) { |
1864 | for (j=0; j < res; j++) | 1780 | for (j=0; j < res; j++) |
@@ -1904,20 +1820,20 @@ sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) | |||
1904 | sg_io_hdr_t *hp = &srp->header; | 1820 | sg_io_hdr_t *hp = &srp->header; |
1905 | Sg_scatter_hold *schp = &srp->data; | 1821 | Sg_scatter_hold *schp = &srp->data; |
1906 | int sg_tablesize = sfp->parentdp->sg_tablesize; | 1822 | int sg_tablesize = sfp->parentdp->sg_tablesize; |
1907 | struct scatterlist *sgl; | ||
1908 | int mx_sc_elems, res; | 1823 | int mx_sc_elems, res; |
1909 | struct scsi_device *sdev = sfp->parentdp->device; | 1824 | struct scsi_device *sdev = sfp->parentdp->device; |
1910 | 1825 | ||
1911 | if (((unsigned long)hp->dxferp & | 1826 | if (((unsigned long)hp->dxferp & |
1912 | queue_dma_alignment(sdev->request_queue)) != 0) | 1827 | queue_dma_alignment(sdev->request_queue)) != 0) |
1913 | return 1; | 1828 | return 1; |
1829 | |||
1914 | mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); | 1830 | mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); |
1915 | if (mx_sc_elems <= 0) { | 1831 | if (mx_sc_elems <= 0) { |
1916 | return 1; | 1832 | return 1; |
1917 | } | 1833 | } |
1918 | sgl = (struct scatterlist *)schp->buffer; | 1834 | res = st_map_user_pages(schp->buffer, mx_sc_elems, |
1919 | res = st_map_user_pages(sgl, mx_sc_elems, (unsigned long)hp->dxferp, dxfer_len, | 1835 | (unsigned long)hp->dxferp, dxfer_len, |
1920 | (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0, ULONG_MAX); | 1836 | (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0); |
1921 | if (res <= 0) | 1837 | if (res <= 0) |
1922 | return 1; | 1838 | return 1; |
1923 | schp->k_use_sg = res; | 1839 | schp->k_use_sg = res; |
@@ -1932,9 +1848,11 @@ sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) | |||
1932 | static int | 1848 | static int |
1933 | sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | 1849 | sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) |
1934 | { | 1850 | { |
1935 | int ret_sz; | 1851 | struct scatterlist *sg; |
1852 | int ret_sz = 0, k, rem_sz, num, mx_sc_elems; | ||
1853 | int sg_tablesize = sfp->parentdp->sg_tablesize; | ||
1936 | int blk_size = buff_size; | 1854 | int blk_size = buff_size; |
1937 | unsigned char *p = NULL; | 1855 | struct page *p = NULL; |
1938 | 1856 | ||
1939 | if ((blk_size < 0) || (!sfp)) | 1857 | if ((blk_size < 0) || (!sfp)) |
1940 | return -EFAULT; | 1858 | return -EFAULT; |
@@ -1944,59 +1862,35 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1944 | blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK); | 1862 | blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK); |
1945 | SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", | 1863 | SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", |
1946 | buff_size, blk_size)); | 1864 | buff_size, blk_size)); |
1947 | if (blk_size <= SG_SCATTER_SZ) { | 1865 | |
1948 | p = sg_page_malloc(blk_size, sfp->low_dma, &ret_sz); | 1866 | /* N.B. ret_sz carried into this block ... */ |
1949 | if (!p) | 1867 | mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); |
1950 | return -ENOMEM; | 1868 | if (mx_sc_elems < 0) |
1951 | if (blk_size == ret_sz) { /* got it on the first attempt */ | 1869 | return mx_sc_elems; /* most likely -ENOMEM */ |
1952 | schp->k_use_sg = 0; | 1870 | |
1953 | schp->buffer = p; | 1871 | for (k = 0, sg = schp->buffer, rem_sz = blk_size; |
1954 | schp->bufflen = blk_size; | 1872 | (rem_sz > 0) && (k < mx_sc_elems); |
1955 | schp->b_malloc_len = blk_size; | 1873 | ++k, rem_sz -= ret_sz, ++sg) { |
1956 | return 0; | 1874 | |
1957 | } | 1875 | num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz; |
1958 | } else { | 1876 | p = sg_page_malloc(num, sfp->low_dma, &ret_sz); |
1959 | p = sg_page_malloc(SG_SCATTER_SZ, sfp->low_dma, &ret_sz); | ||
1960 | if (!p) | 1877 | if (!p) |
1961 | return -ENOMEM; | 1878 | return -ENOMEM; |
1962 | } | 1879 | |
1963 | /* Want some local declarations, so start new block ... */ | 1880 | sg->page = p; |
1964 | { /* lets try and build a scatter gather list */ | 1881 | sg->length = ret_sz; |
1965 | struct scatterlist *sclp; | 1882 | |
1966 | int k, rem_sz, num; | 1883 | SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n", |
1967 | int mx_sc_elems; | 1884 | k, p, ret_sz)); |
1968 | int sg_tablesize = sfp->parentdp->sg_tablesize; | 1885 | } /* end of for loop */ |
1969 | int first = 1; | 1886 | |
1970 | 1887 | schp->k_use_sg = k; | |
1971 | /* N.B. ret_sz carried into this block ... */ | 1888 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); |
1972 | mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); | 1889 | |
1973 | if (mx_sc_elems < 0) | 1890 | schp->bufflen = blk_size; |
1974 | return mx_sc_elems; /* most likely -ENOMEM */ | 1891 | if (rem_sz > 0) /* must have failed */ |
1975 | 1892 | return -ENOMEM; | |
1976 | for (k = 0, sclp = schp->buffer, rem_sz = blk_size; | 1893 | |
1977 | (rem_sz > 0) && (k < mx_sc_elems); | ||
1978 | ++k, rem_sz -= ret_sz, ++sclp) { | ||
1979 | if (first) | ||
1980 | first = 0; | ||
1981 | else { | ||
1982 | num = | ||
1983 | (rem_sz > | ||
1984 | SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz; | ||
1985 | p = sg_page_malloc(num, sfp->low_dma, &ret_sz); | ||
1986 | if (!p) | ||
1987 | break; | ||
1988 | } | ||
1989 | sg_set_buf(sclp, p, ret_sz); | ||
1990 | |||
1991 | SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n", | ||
1992 | k, sg_scatg2virt(sclp), ret_sz)); | ||
1993 | } /* end of for loop */ | ||
1994 | schp->k_use_sg = k; | ||
1995 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); | ||
1996 | schp->bufflen = blk_size; | ||
1997 | if (rem_sz > 0) /* must have failed */ | ||
1998 | return -ENOMEM; | ||
1999 | } | ||
2000 | return 0; | 1894 | return 0; |
2001 | } | 1895 | } |
2002 | 1896 | ||
@@ -2005,6 +1899,7 @@ sg_write_xfer(Sg_request * srp) | |||
2005 | { | 1899 | { |
2006 | sg_io_hdr_t *hp = &srp->header; | 1900 | sg_io_hdr_t *hp = &srp->header; |
2007 | Sg_scatter_hold *schp = &srp->data; | 1901 | Sg_scatter_hold *schp = &srp->data; |
1902 | struct scatterlist *sg = schp->buffer; | ||
2008 | int num_xfer = 0; | 1903 | int num_xfer = 0; |
2009 | int j, k, onum, usglen, ksglen, res; | 1904 | int j, k, onum, usglen, ksglen, res; |
2010 | int iovec_count = (int) hp->iovec_count; | 1905 | int iovec_count = (int) hp->iovec_count; |
@@ -2033,63 +1928,45 @@ sg_write_xfer(Sg_request * srp) | |||
2033 | } else | 1928 | } else |
2034 | onum = 1; | 1929 | onum = 1; |
2035 | 1930 | ||
2036 | if (0 == schp->k_use_sg) { /* kernel has single buffer */ | 1931 | ksglen = sg->length; |
2037 | for (j = 0, p = schp->buffer; j < onum; ++j) { | 1932 | p = page_address(sg->page); |
2038 | res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); | 1933 | for (j = 0, k = 0; j < onum; ++j) { |
2039 | if (res) | 1934 | res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); |
2040 | return res; | 1935 | if (res) |
2041 | usglen = (num_xfer > usglen) ? usglen : num_xfer; | 1936 | return res; |
2042 | if (__copy_from_user(p, up, usglen)) | 1937 | |
2043 | return -EFAULT; | 1938 | for (; p; ++sg, ksglen = sg->length, |
2044 | p += usglen; | 1939 | p = page_address(sg->page)) { |
2045 | num_xfer -= usglen; | 1940 | if (usglen <= 0) |
2046 | if (num_xfer <= 0) | 1941 | break; |
2047 | return 0; | 1942 | if (ksglen > usglen) { |
2048 | } | 1943 | if (usglen >= num_xfer) { |
2049 | } else { /* kernel using scatter gather list */ | 1944 | if (__copy_from_user(p, up, num_xfer)) |
2050 | struct scatterlist *sclp = (struct scatterlist *) schp->buffer; | ||
2051 | |||
2052 | ksglen = (int) sclp->length; | ||
2053 | p = sg_scatg2virt(sclp); | ||
2054 | for (j = 0, k = 0; j < onum; ++j) { | ||
2055 | res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); | ||
2056 | if (res) | ||
2057 | return res; | ||
2058 | |||
2059 | for (; p; ++sclp, ksglen = (int) sclp->length, | ||
2060 | p = sg_scatg2virt(sclp)) { | ||
2061 | if (usglen <= 0) | ||
2062 | break; | ||
2063 | if (ksglen > usglen) { | ||
2064 | if (usglen >= num_xfer) { | ||
2065 | if (__copy_from_user | ||
2066 | (p, up, num_xfer)) | ||
2067 | return -EFAULT; | ||
2068 | return 0; | ||
2069 | } | ||
2070 | if (__copy_from_user(p, up, usglen)) | ||
2071 | return -EFAULT; | ||
2072 | p += usglen; | ||
2073 | ksglen -= usglen; | ||
2074 | break; | ||
2075 | } else { | ||
2076 | if (ksglen >= num_xfer) { | ||
2077 | if (__copy_from_user | ||
2078 | (p, up, num_xfer)) | ||
2079 | return -EFAULT; | ||
2080 | return 0; | ||
2081 | } | ||
2082 | if (__copy_from_user(p, up, ksglen)) | ||
2083 | return -EFAULT; | 1945 | return -EFAULT; |
2084 | up += ksglen; | 1946 | return 0; |
2085 | usglen -= ksglen; | ||
2086 | } | 1947 | } |
2087 | ++k; | 1948 | if (__copy_from_user(p, up, usglen)) |
2088 | if (k >= schp->k_use_sg) | 1949 | return -EFAULT; |
1950 | p += usglen; | ||
1951 | ksglen -= usglen; | ||
1952 | break; | ||
1953 | } else { | ||
1954 | if (ksglen >= num_xfer) { | ||
1955 | if (__copy_from_user(p, up, num_xfer)) | ||
1956 | return -EFAULT; | ||
2089 | return 0; | 1957 | return 0; |
1958 | } | ||
1959 | if (__copy_from_user(p, up, ksglen)) | ||
1960 | return -EFAULT; | ||
1961 | up += ksglen; | ||
1962 | usglen -= ksglen; | ||
2090 | } | 1963 | } |
1964 | ++k; | ||
1965 | if (k >= schp->k_use_sg) | ||
1966 | return 0; | ||
2091 | } | 1967 | } |
2092 | } | 1968 | } |
1969 | |||
2093 | return 0; | 1970 | return 0; |
2094 | } | 1971 | } |
2095 | 1972 | ||
@@ -2127,29 +2004,25 @@ sg_remove_scat(Sg_scatter_hold * schp) | |||
2127 | { | 2004 | { |
2128 | SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); | 2005 | SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); |
2129 | if (schp->buffer && (schp->sglist_len > 0)) { | 2006 | if (schp->buffer && (schp->sglist_len > 0)) { |
2130 | struct scatterlist *sclp = (struct scatterlist *) schp->buffer; | 2007 | struct scatterlist *sg = schp->buffer; |
2131 | 2008 | ||
2132 | if (schp->dio_in_use) { | 2009 | if (schp->dio_in_use) { |
2133 | #ifdef SG_ALLOW_DIO_CODE | 2010 | #ifdef SG_ALLOW_DIO_CODE |
2134 | st_unmap_user_pages(sclp, schp->k_use_sg, TRUE); | 2011 | st_unmap_user_pages(sg, schp->k_use_sg, TRUE); |
2135 | #endif | 2012 | #endif |
2136 | } else { | 2013 | } else { |
2137 | int k; | 2014 | int k; |
2138 | 2015 | ||
2139 | for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp); | 2016 | for (k = 0; (k < schp->k_use_sg) && sg->page; |
2140 | ++k, ++sclp) { | 2017 | ++k, ++sg) { |
2141 | SCSI_LOG_TIMEOUT(5, printk( | 2018 | SCSI_LOG_TIMEOUT(5, printk( |
2142 | "sg_remove_scat: k=%d, a=0x%p, len=%d\n", | 2019 | "sg_remove_scat: k=%d, a=0x%p, len=%d\n", |
2143 | k, sg_scatg2virt(sclp), sclp->length)); | 2020 | k, sg->page, sg->length)); |
2144 | sg_page_free(sg_scatg2virt(sclp), sclp->length); | 2021 | sg_page_free(sg->page, sg->length); |
2145 | sclp->page = NULL; | ||
2146 | sclp->offset = 0; | ||
2147 | sclp->length = 0; | ||
2148 | } | 2022 | } |
2149 | } | 2023 | } |
2150 | sg_page_free(schp->buffer, schp->sglist_len); | 2024 | kfree(schp->buffer); |
2151 | } else if (schp->buffer) | 2025 | } |
2152 | sg_page_free(schp->buffer, schp->b_malloc_len); | ||
2153 | memset(schp, 0, sizeof (*schp)); | 2026 | memset(schp, 0, sizeof (*schp)); |
2154 | } | 2027 | } |
2155 | 2028 | ||
@@ -2158,6 +2031,7 @@ sg_read_xfer(Sg_request * srp) | |||
2158 | { | 2031 | { |
2159 | sg_io_hdr_t *hp = &srp->header; | 2032 | sg_io_hdr_t *hp = &srp->header; |
2160 | Sg_scatter_hold *schp = &srp->data; | 2033 | Sg_scatter_hold *schp = &srp->data; |
2034 | struct scatterlist *sg = schp->buffer; | ||
2161 | int num_xfer = 0; | 2035 | int num_xfer = 0; |
2162 | int j, k, onum, usglen, ksglen, res; | 2036 | int j, k, onum, usglen, ksglen, res; |
2163 | int iovec_count = (int) hp->iovec_count; | 2037 | int iovec_count = (int) hp->iovec_count; |
@@ -2186,63 +2060,45 @@ sg_read_xfer(Sg_request * srp) | |||
2186 | } else | 2060 | } else |
2187 | onum = 1; | 2061 | onum = 1; |
2188 | 2062 | ||
2189 | if (0 == schp->k_use_sg) { /* kernel has single buffer */ | 2063 | p = page_address(sg->page); |
2190 | for (j = 0, p = schp->buffer; j < onum; ++j) { | 2064 | ksglen = sg->length; |
2191 | res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); | 2065 | for (j = 0, k = 0; j < onum; ++j) { |
2192 | if (res) | 2066 | res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); |
2193 | return res; | 2067 | if (res) |
2194 | usglen = (num_xfer > usglen) ? usglen : num_xfer; | 2068 | return res; |
2195 | if (__copy_to_user(up, p, usglen)) | 2069 | |
2196 | return -EFAULT; | 2070 | for (; p; ++sg, ksglen = sg->length, |
2197 | p += usglen; | 2071 | p = page_address(sg->page)) { |
2198 | num_xfer -= usglen; | 2072 | if (usglen <= 0) |
2199 | if (num_xfer <= 0) | 2073 | break; |
2200 | return 0; | 2074 | if (ksglen > usglen) { |
2201 | } | 2075 | if (usglen >= num_xfer) { |
2202 | } else { /* kernel using scatter gather list */ | 2076 | if (__copy_to_user(up, p, num_xfer)) |
2203 | struct scatterlist *sclp = (struct scatterlist *) schp->buffer; | ||
2204 | |||
2205 | ksglen = (int) sclp->length; | ||
2206 | p = sg_scatg2virt(sclp); | ||
2207 | for (j = 0, k = 0; j < onum; ++j) { | ||
2208 | res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); | ||
2209 | if (res) | ||
2210 | return res; | ||
2211 | |||
2212 | for (; p; ++sclp, ksglen = (int) sclp->length, | ||
2213 | p = sg_scatg2virt(sclp)) { | ||
2214 | if (usglen <= 0) | ||
2215 | break; | ||
2216 | if (ksglen > usglen) { | ||
2217 | if (usglen >= num_xfer) { | ||
2218 | if (__copy_to_user | ||
2219 | (up, p, num_xfer)) | ||
2220 | return -EFAULT; | ||
2221 | return 0; | ||
2222 | } | ||
2223 | if (__copy_to_user(up, p, usglen)) | ||
2224 | return -EFAULT; | ||
2225 | p += usglen; | ||
2226 | ksglen -= usglen; | ||
2227 | break; | ||
2228 | } else { | ||
2229 | if (ksglen >= num_xfer) { | ||
2230 | if (__copy_to_user | ||
2231 | (up, p, num_xfer)) | ||
2232 | return -EFAULT; | ||
2233 | return 0; | ||
2234 | } | ||
2235 | if (__copy_to_user(up, p, ksglen)) | ||
2236 | return -EFAULT; | 2077 | return -EFAULT; |
2237 | up += ksglen; | 2078 | return 0; |
2238 | usglen -= ksglen; | ||
2239 | } | 2079 | } |
2240 | ++k; | 2080 | if (__copy_to_user(up, p, usglen)) |
2241 | if (k >= schp->k_use_sg) | 2081 | return -EFAULT; |
2082 | p += usglen; | ||
2083 | ksglen -= usglen; | ||
2084 | break; | ||
2085 | } else { | ||
2086 | if (ksglen >= num_xfer) { | ||
2087 | if (__copy_to_user(up, p, num_xfer)) | ||
2088 | return -EFAULT; | ||
2242 | return 0; | 2089 | return 0; |
2090 | } | ||
2091 | if (__copy_to_user(up, p, ksglen)) | ||
2092 | return -EFAULT; | ||
2093 | up += ksglen; | ||
2094 | usglen -= ksglen; | ||
2243 | } | 2095 | } |
2096 | ++k; | ||
2097 | if (k >= schp->k_use_sg) | ||
2098 | return 0; | ||
2244 | } | 2099 | } |
2245 | } | 2100 | } |
2101 | |||
2246 | return 0; | 2102 | return 0; |
2247 | } | 2103 | } |
2248 | 2104 | ||
@@ -2250,37 +2106,32 @@ static int | |||
2250 | sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) | 2106 | sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) |
2251 | { | 2107 | { |
2252 | Sg_scatter_hold *schp = &srp->data; | 2108 | Sg_scatter_hold *schp = &srp->data; |
2109 | struct scatterlist *sg = schp->buffer; | ||
2110 | int k, num; | ||
2253 | 2111 | ||
2254 | SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", | 2112 | SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", |
2255 | num_read_xfer)); | 2113 | num_read_xfer)); |
2256 | if ((!outp) || (num_read_xfer <= 0)) | 2114 | if ((!outp) || (num_read_xfer <= 0)) |
2257 | return 0; | 2115 | return 0; |
2258 | if (schp->k_use_sg > 0) { | 2116 | |
2259 | int k, num; | 2117 | for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) { |
2260 | struct scatterlist *sclp = (struct scatterlist *) schp->buffer; | 2118 | num = sg->length; |
2261 | 2119 | if (num > num_read_xfer) { | |
2262 | for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp); | 2120 | if (__copy_to_user(outp, page_address(sg->page), |
2263 | ++k, ++sclp) { | 2121 | num_read_xfer)) |
2264 | num = (int) sclp->length; | 2122 | return -EFAULT; |
2265 | if (num > num_read_xfer) { | 2123 | break; |
2266 | if (__copy_to_user | 2124 | } else { |
2267 | (outp, sg_scatg2virt(sclp), num_read_xfer)) | 2125 | if (__copy_to_user(outp, page_address(sg->page), |
2268 | return -EFAULT; | 2126 | num)) |
2127 | return -EFAULT; | ||
2128 | num_read_xfer -= num; | ||
2129 | if (num_read_xfer <= 0) | ||
2269 | break; | 2130 | break; |
2270 | } else { | 2131 | outp += num; |
2271 | if (__copy_to_user | ||
2272 | (outp, sg_scatg2virt(sclp), num)) | ||
2273 | return -EFAULT; | ||
2274 | num_read_xfer -= num; | ||
2275 | if (num_read_xfer <= 0) | ||
2276 | break; | ||
2277 | outp += num; | ||
2278 | } | ||
2279 | } | 2132 | } |
2280 | } else { | ||
2281 | if (__copy_to_user(outp, schp->buffer, num_read_xfer)) | ||
2282 | return -EFAULT; | ||
2283 | } | 2133 | } |
2134 | |||
2284 | return 0; | 2135 | return 0; |
2285 | } | 2136 | } |
2286 | 2137 | ||
@@ -2306,44 +2157,31 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) | |||
2306 | { | 2157 | { |
2307 | Sg_scatter_hold *req_schp = &srp->data; | 2158 | Sg_scatter_hold *req_schp = &srp->data; |
2308 | Sg_scatter_hold *rsv_schp = &sfp->reserve; | 2159 | Sg_scatter_hold *rsv_schp = &sfp->reserve; |
2160 | struct scatterlist *sg = rsv_schp->buffer; | ||
2161 | int k, num, rem; | ||
2309 | 2162 | ||
2310 | srp->res_used = 1; | 2163 | srp->res_used = 1; |
2311 | SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); | 2164 | SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); |
2312 | size = (size + 1) & (~1); /* round to even for aha1542 */ | 2165 | rem = size = (size + 1) & (~1); /* round to even for aha1542 */ |
2313 | if (rsv_schp->k_use_sg > 0) { | 2166 | |
2314 | int k, num; | 2167 | for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { |
2315 | int rem = size; | 2168 | num = sg->length; |
2316 | struct scatterlist *sclp = | 2169 | if (rem <= num) { |
2317 | (struct scatterlist *) rsv_schp->buffer; | 2170 | sfp->save_scat_len = num; |
2318 | 2171 | sg->length = rem; | |
2319 | for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) { | 2172 | req_schp->k_use_sg = k + 1; |
2320 | num = (int) sclp->length; | 2173 | req_schp->sglist_len = rsv_schp->sglist_len; |
2321 | if (rem <= num) { | 2174 | req_schp->buffer = rsv_schp->buffer; |
2322 | if (0 == k) { | 2175 | |
2323 | req_schp->k_use_sg = 0; | 2176 | req_schp->bufflen = size; |
2324 | req_schp->buffer = sg_scatg2virt(sclp); | 2177 | req_schp->b_malloc_len = rsv_schp->b_malloc_len; |
2325 | } else { | 2178 | break; |
2326 | sfp->save_scat_len = num; | 2179 | } else |
2327 | sclp->length = (unsigned) rem; | 2180 | rem -= num; |
2328 | req_schp->k_use_sg = k + 1; | ||
2329 | req_schp->sglist_len = | ||
2330 | rsv_schp->sglist_len; | ||
2331 | req_schp->buffer = rsv_schp->buffer; | ||
2332 | } | ||
2333 | req_schp->bufflen = size; | ||
2334 | req_schp->b_malloc_len = rsv_schp->b_malloc_len; | ||
2335 | break; | ||
2336 | } else | ||
2337 | rem -= num; | ||
2338 | } | ||
2339 | if (k >= rsv_schp->k_use_sg) | ||
2340 | SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); | ||
2341 | } else { | ||
2342 | req_schp->k_use_sg = 0; | ||
2343 | req_schp->bufflen = size; | ||
2344 | req_schp->buffer = rsv_schp->buffer; | ||
2345 | req_schp->b_malloc_len = rsv_schp->b_malloc_len; | ||
2346 | } | 2181 | } |
2182 | |||
2183 | if (k >= rsv_schp->k_use_sg) | ||
2184 | SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); | ||
2347 | } | 2185 | } |
2348 | 2186 | ||
2349 | static void | 2187 | static void |
@@ -2355,11 +2193,10 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) | |||
2355 | SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", | 2193 | SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", |
2356 | (int) req_schp->k_use_sg)); | 2194 | (int) req_schp->k_use_sg)); |
2357 | if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) { | 2195 | if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) { |
2358 | struct scatterlist *sclp = | 2196 | struct scatterlist *sg = rsv_schp->buffer; |
2359 | (struct scatterlist *) rsv_schp->buffer; | ||
2360 | 2197 | ||
2361 | if (sfp->save_scat_len > 0) | 2198 | if (sfp->save_scat_len > 0) |
2362 | (sclp + (req_schp->k_use_sg - 1))->length = | 2199 | (sg + (req_schp->k_use_sg - 1))->length = |
2363 | (unsigned) sfp->save_scat_len; | 2200 | (unsigned) sfp->save_scat_len; |
2364 | else | 2201 | else |
2365 | SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n")); | 2202 | SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n")); |
@@ -2445,7 +2282,6 @@ sg_add_request(Sg_fd * sfp) | |||
2445 | if (resp) { | 2282 | if (resp) { |
2446 | resp->nextrp = NULL; | 2283 | resp->nextrp = NULL; |
2447 | resp->header.duration = jiffies_to_msecs(jiffies); | 2284 | resp->header.duration = jiffies_to_msecs(jiffies); |
2448 | resp->my_cmdp = NULL; | ||
2449 | } | 2285 | } |
2450 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); | 2286 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2451 | return resp; | 2287 | return resp; |
@@ -2463,8 +2299,6 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp) | |||
2463 | if ((!sfp) || (!srp) || (!sfp->headrp)) | 2299 | if ((!sfp) || (!srp) || (!sfp->headrp)) |
2464 | return res; | 2300 | return res; |
2465 | write_lock_irqsave(&sfp->rq_list_lock, iflags); | 2301 | write_lock_irqsave(&sfp->rq_list_lock, iflags); |
2466 | if (srp->my_cmdp) | ||
2467 | srp->my_cmdp->upper_private_data = NULL; | ||
2468 | prev_rp = sfp->headrp; | 2302 | prev_rp = sfp->headrp; |
2469 | if (srp == prev_rp) { | 2303 | if (srp == prev_rp) { |
2470 | sfp->headrp = prev_rp->nextrp; | 2304 | sfp->headrp = prev_rp->nextrp; |
@@ -2507,10 +2341,10 @@ sg_add_sfp(Sg_device * sdp, int dev) | |||
2507 | Sg_fd *sfp; | 2341 | Sg_fd *sfp; |
2508 | unsigned long iflags; | 2342 | unsigned long iflags; |
2509 | 2343 | ||
2510 | sfp = (Sg_fd *) sg_page_malloc(sizeof (Sg_fd), 0, NULL); | 2344 | sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); |
2511 | if (!sfp) | 2345 | if (!sfp) |
2512 | return NULL; | 2346 | return NULL; |
2513 | memset(sfp, 0, sizeof (Sg_fd)); | 2347 | |
2514 | init_waitqueue_head(&sfp->read_wait); | 2348 | init_waitqueue_head(&sfp->read_wait); |
2515 | rwlock_init(&sfp->rq_list_lock); | 2349 | rwlock_init(&sfp->rq_list_lock); |
2516 | 2350 | ||
@@ -2567,7 +2401,7 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) | |||
2567 | } | 2401 | } |
2568 | sfp->parentdp = NULL; | 2402 | sfp->parentdp = NULL; |
2569 | SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); | 2403 | SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); |
2570 | sg_page_free((char *) sfp, sizeof (Sg_fd)); | 2404 | kfree(sfp); |
2571 | } | 2405 | } |
2572 | 2406 | ||
2573 | /* Returns 0 in normal case, 1 when detached and sdp object removed */ | 2407 | /* Returns 0 in normal case, 1 when detached and sdp object removed */ |
@@ -2632,10 +2466,10 @@ sg_res_in_use(Sg_fd * sfp) | |||
2632 | } | 2466 | } |
2633 | 2467 | ||
2634 | /* If retSzp==NULL want exact size or fail */ | 2468 | /* If retSzp==NULL want exact size or fail */ |
2635 | static char * | 2469 | static struct page * |
2636 | sg_page_malloc(int rqSz, int lowDma, int *retSzp) | 2470 | sg_page_malloc(int rqSz, int lowDma, int *retSzp) |
2637 | { | 2471 | { |
2638 | char *resp = NULL; | 2472 | struct page *resp = NULL; |
2639 | gfp_t page_mask; | 2473 | gfp_t page_mask; |
2640 | int order, a_size; | 2474 | int order, a_size; |
2641 | int resSz = rqSz; | 2475 | int resSz = rqSz; |
@@ -2650,11 +2484,11 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp) | |||
2650 | 2484 | ||
2651 | for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; | 2485 | for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; |
2652 | order++, a_size <<= 1) ; | 2486 | order++, a_size <<= 1) ; |
2653 | resp = (char *) __get_free_pages(page_mask, order); | 2487 | resp = alloc_pages(page_mask, order); |
2654 | while ((!resp) && order && retSzp) { | 2488 | while ((!resp) && order && retSzp) { |
2655 | --order; | 2489 | --order; |
2656 | a_size >>= 1; /* divide by 2, until PAGE_SIZE */ | 2490 | a_size >>= 1; /* divide by 2, until PAGE_SIZE */ |
2657 | resp = (char *) __get_free_pages(page_mask, order); /* try half */ | 2491 | resp = alloc_pages(page_mask, order); /* try half */ |
2658 | resSz = a_size; | 2492 | resSz = a_size; |
2659 | } | 2493 | } |
2660 | if (resp) { | 2494 | if (resp) { |
@@ -2667,15 +2501,15 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp) | |||
2667 | } | 2501 | } |
2668 | 2502 | ||
2669 | static void | 2503 | static void |
2670 | sg_page_free(char *buff, int size) | 2504 | sg_page_free(struct page *page, int size) |
2671 | { | 2505 | { |
2672 | int order, a_size; | 2506 | int order, a_size; |
2673 | 2507 | ||
2674 | if (!buff) | 2508 | if (!page) |
2675 | return; | 2509 | return; |
2676 | for (order = 0, a_size = PAGE_SIZE; a_size < size; | 2510 | for (order = 0, a_size = PAGE_SIZE; a_size < size; |
2677 | order++, a_size <<= 1) ; | 2511 | order++, a_size <<= 1) ; |
2678 | free_pages((unsigned long) buff, order); | 2512 | __free_pages(page, order); |
2679 | } | 2513 | } |
2680 | 2514 | ||
2681 | #ifndef MAINTENANCE_IN_CMD | 2515 | #ifndef MAINTENANCE_IN_CMD |
@@ -3067,13 +2901,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) | |||
3067 | cp = " "; | 2901 | cp = " "; |
3068 | } | 2902 | } |
3069 | seq_printf(s, cp); | 2903 | seq_printf(s, cp); |
3070 | blen = srp->my_cmdp ? | 2904 | blen = srp->data.bufflen; |
3071 | srp->my_cmdp->sr_bufflen : srp->data.bufflen; | 2905 | usg = srp->data.k_use_sg; |
3072 | usg = srp->my_cmdp ? | ||
3073 | srp->my_cmdp->sr_use_sg : srp->data.k_use_sg; | ||
3074 | seq_printf(s, srp->done ? | 2906 | seq_printf(s, srp->done ? |
3075 | ((1 == srp->done) ? "rcv:" : "fin:") | 2907 | ((1 == srp->done) ? "rcv:" : "fin:") |
3076 | : (srp->my_cmdp ? "act:" : "prior:")); | 2908 | : "act:"); |
3077 | seq_printf(s, " id=%d blen=%d", | 2909 | seq_printf(s, " id=%d blen=%d", |
3078 | srp->header.pack_id, blen); | 2910 | srp->header.pack_id, blen); |
3079 | if (srp->done) | 2911 | if (srp->done) |