aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/gdth.c
diff options
context:
space:
mode:
authorBoaz Harrosh <bharrosh@panasas.com>2007-10-02 17:18:03 -0400
committerJames Bottomley <jejb@mulgrave.localdomain>2007-10-12 14:56:13 -0400
commit3892d88ae6fb186ff6205f764213b556d70800b0 (patch)
tree5435ca13a3d70569582887813a92f49a0ac7ca7c /drivers/scsi/gdth.c
parentf842b64e0ffbcc9ce48a3bf799d0b005094107c1 (diff)
[SCSI] gdth: !use_sg cleanup and use of scsi accessors
gdth_execute() will issue an internal, none scsi-standard commands onto __gdth_queuecommand(). Since it is not recommended to set struct scsi_cmnd IO members in llds, gdth now uses internal IO members for IO. In the case of gdth_execute() these members will be set properly. In case the command was issued from scsi-ml (by gdth_queuecommand) they will be set from scsi IO accessors. * define gdth IO accessors and use them throughout the driver. * use an sg-of-one in gdth_execute() and fix gdth_special_cmd() accordingly. * Clean the not use_sg code path and company Signed-off-by Boaz Harrosh <bharrosh@panasas.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/gdth.c')
-rw-r--r--drivers/scsi/gdth.c228
1 files changed, 109 insertions, 119 deletions
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index a68004b0d90b..0153d1d8b12e 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -85,11 +85,11 @@
85 85
86/* The meaning of the Scsi_Pointer members in this driver is as follows: 86/* The meaning of the Scsi_Pointer members in this driver is as follows:
87 * ptr: Chaining 87 * ptr: Chaining
88 * this_residual: unused 88 * this_residual: gdth_bufflen
89 * buffer: unused 89 * buffer: gdth_sglist
90 * dma_handle: will drop in !use_sg patch. 90 * dma_handle: unused
91 * buffers_residual: unused 91 * buffers_residual: gdth_sg_count
92 * Status: DMA mem. mappings (FIXME: drop in !use_sg patch.) 92 * Status: unused
93 * Message: unused 93 * Message: unused
94 * have_data_in: unused 94 * have_data_in: unused
95 * sent_command: unused 95 * sent_command: unused
@@ -132,6 +132,7 @@
132#include <asm/uaccess.h> 132#include <asm/uaccess.h>
133#include <linux/spinlock.h> 133#include <linux/spinlock.h>
134#include <linux/blkdev.h> 134#include <linux/blkdev.h>
135#include <linux/scatterlist.h>
135 136
136#include "scsi.h" 137#include "scsi.h"
137#include <scsi/scsi_host.h> 138#include <scsi/scsi_host.h>
@@ -159,7 +160,7 @@ static void gdth_readapp_event(gdth_ha_str *ha, unchar application,
159static void gdth_clear_events(void); 160static void gdth_clear_events(void);
160 161
161static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 162static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
162 char *buffer,ushort count); 163 char *buffer, ushort count, int to_buffer);
163static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); 164static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
164static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); 165static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
165 166
@@ -373,6 +374,47 @@ static const struct file_operations gdth_fops = {
373 .release = gdth_close, 374 .release = gdth_close,
374}; 375};
375 376
377/*
378 * gdth scsi_command access wrappers.
379 * below 6 functions are used throughout the driver to access scsi_command's
380 * io parameters. The reason we do not use the regular accessors from
381 * scsi_cmnd.h is because of gdth_execute(). Since it is unrecommended for
382 * llds to directly set scsi_cmnd's IO members. This driver will use SCp
383 * members for IO parameters, and will copy scsi_cmnd's members to Scp
384 * members in queuecommand. For internal commands through gdth_execute()
385 * SCp's members will be set directly.
386 */
387static inline unsigned gdth_bufflen(struct scsi_cmnd *cmd)
388{
389 return (unsigned)cmd->SCp.this_residual;
390}
391
392static inline void gdth_set_bufflen(struct scsi_cmnd *cmd, unsigned bufflen)
393{
394 cmd->SCp.this_residual = bufflen;
395}
396
397static inline unsigned gdth_sg_count(struct scsi_cmnd *cmd)
398{
399 return (unsigned)cmd->SCp.buffers_residual;
400}
401
402static inline void gdth_set_sg_count(struct scsi_cmnd *cmd, unsigned sg_count)
403{
404 cmd->SCp.buffers_residual = sg_count;
405}
406
407static inline struct scatterlist *gdth_sglist(struct scsi_cmnd *cmd)
408{
409 return cmd->SCp.buffer;
410}
411
412static inline void gdth_set_sglist(struct scsi_cmnd *cmd,
413 struct scatterlist *sglist)
414{
415 cmd->SCp.buffer = sglist;
416}
417
376#include "gdth_proc.h" 418#include "gdth_proc.h"
377#include "gdth_proc.c" 419#include "gdth_proc.c"
378 420
@@ -452,6 +494,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
452 gdth_ha_str *ha = shost_priv(sdev->host); 494 gdth_ha_str *ha = shost_priv(sdev->host);
453 Scsi_Cmnd *scp; 495 Scsi_Cmnd *scp;
454 struct gdth_cmndinfo cmndinfo; 496 struct gdth_cmndinfo cmndinfo;
497 struct scatterlist one_sg;
455 DECLARE_COMPLETION_ONSTACK(wait); 498 DECLARE_COMPLETION_ONSTACK(wait);
456 int rval; 499 int rval;
457 500
@@ -465,7 +508,10 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
465 /* use request field to save the ptr. to completion struct. */ 508 /* use request field to save the ptr. to completion struct. */
466 scp->request = (struct request *)&wait; 509 scp->request = (struct request *)&wait;
467 scp->timeout_per_command = timeout*HZ; 510 scp->timeout_per_command = timeout*HZ;
468 scp->request_buffer = gdtcmd; 511 sg_init_one(&one_sg, gdtcmd, sizeof(*gdtcmd));
512 gdth_set_sglist(scp, &one_sg);
513 gdth_set_sg_count(scp, 1);
514 gdth_set_bufflen(scp, sizeof(*gdtcmd));
469 scp->cmd_len = 12; 515 scp->cmd_len = 12;
470 memcpy(scp->cmnd, cmnd, 12); 516 memcpy(scp->cmnd, cmnd, 12);
471 cmndinfo.priority = IOCTL_PRI; 517 cmndinfo.priority = IOCTL_PRI;
@@ -2303,24 +2349,28 @@ static void gdth_next(gdth_ha_str *ha)
2303 ha->hanum, cmd_index); 2349 ha->hanum, cmd_index);
2304 } 2350 }
2305} 2351}
2306 2352
2353/*
2354 * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
2355 * buffers, kmap_atomic() as needed.
2356 */
2307static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 2357static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2308 char *buffer,ushort count) 2358 char *buffer, ushort count, int to_buffer)
2309{ 2359{
2310 ushort cpcount,i; 2360 ushort cpcount,i, max_sg = gdth_sg_count(scp);
2311 ushort cpsum,cpnow; 2361 ushort cpsum,cpnow;
2312 struct scatterlist *sl; 2362 struct scatterlist *sl;
2313 char *address; 2363 char *address;
2314 2364
2315 cpcount = count<=(ushort)scp->request_bufflen ? count:(ushort)scp->request_bufflen; 2365 cpcount = min_t(ushort, count, gdth_bufflen(scp));
2316 2366
2317 if (scp->use_sg) { 2367 if (cpcount) {
2318 sl = (struct scatterlist *)scp->request_buffer; 2368 cpsum=0;
2319 for (i=0,cpsum=0; i<scp->use_sg; ++i,++sl) { 2369 scsi_for_each_sg(scp, sl, max_sg, i) {
2320 unsigned long flags; 2370 unsigned long flags;
2321 cpnow = (ushort)sl->length; 2371 cpnow = (ushort)sl->length;
2322 TRACE(("copy_internal() now %d sum %d count %d %d\n", 2372 TRACE(("copy_internal() now %d sum %d count %d %d\n",
2323 cpnow,cpsum,cpcount,(ushort)scp->bufflen)); 2373 cpnow, cpsum, cpcount, gdth_bufflen(scp)));
2324 if (cpsum+cpnow > cpcount) 2374 if (cpsum+cpnow > cpcount)
2325 cpnow = cpcount - cpsum; 2375 cpnow = cpcount - cpsum;
2326 cpsum += cpnow; 2376 cpsum += cpnow;
@@ -2331,7 +2381,10 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2331 } 2381 }
2332 local_irq_save(flags); 2382 local_irq_save(flags);
2333 address = kmap_atomic(sl->page, KM_BIO_SRC_IRQ) + sl->offset; 2383 address = kmap_atomic(sl->page, KM_BIO_SRC_IRQ) + sl->offset;
2334 memcpy(address,buffer,cpnow); 2384 if (to_buffer)
2385 memcpy(buffer, address, cpnow);
2386 else
2387 memcpy(address, buffer, cpnow);
2335 flush_dcache_page(sl->page); 2388 flush_dcache_page(sl->page);
2336 kunmap_atomic(address, KM_BIO_SRC_IRQ); 2389 kunmap_atomic(address, KM_BIO_SRC_IRQ);
2337 local_irq_restore(flags); 2390 local_irq_restore(flags);
@@ -2339,9 +2392,10 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2339 break; 2392 break;
2340 buffer += cpnow; 2393 buffer += cpnow;
2341 } 2394 }
2342 } else { 2395 } else if (count) {
2343 TRACE(("copy_internal() count %d\n",cpcount)); 2396 printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n",
2344 memcpy((char*)scp->request_buffer,buffer,cpcount); 2397 ha->hanum);
2398 WARN_ON(1);
2345 } 2399 }
2346} 2400}
2347 2401
@@ -2384,7 +2438,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2384 strcpy(inq.vendor,ha->oem_name); 2438 strcpy(inq.vendor,ha->oem_name);
2385 sprintf(inq.product,"Host Drive #%02d",t); 2439 sprintf(inq.product,"Host Drive #%02d",t);
2386 strcpy(inq.revision," "); 2440 strcpy(inq.revision," ");
2387 gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data)); 2441 gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data), 0);
2388 break; 2442 break;
2389 2443
2390 case REQUEST_SENSE: 2444 case REQUEST_SENSE:
@@ -2394,7 +2448,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2394 sd.key = NO_SENSE; 2448 sd.key = NO_SENSE;
2395 sd.info = 0; 2449 sd.info = 0;
2396 sd.add_length= 0; 2450 sd.add_length= 0;
2397 gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data)); 2451 gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data), 0);
2398 break; 2452 break;
2399 2453
2400 case MODE_SENSE: 2454 case MODE_SENSE:
@@ -2406,7 +2460,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2406 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; 2460 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
2407 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; 2461 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
2408 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); 2462 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
2409 gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data)); 2463 gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data), 0);
2410 break; 2464 break;
2411 2465
2412 case READ_CAPACITY: 2466 case READ_CAPACITY:
@@ -2416,7 +2470,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2416 else 2470 else
2417 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); 2471 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
2418 rdc.block_length = cpu_to_be32(SECTOR_SIZE); 2472 rdc.block_length = cpu_to_be32(SECTOR_SIZE);
2419 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data)); 2473 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data), 0);
2420 break; 2474 break;
2421 2475
2422 case SERVICE_ACTION_IN: 2476 case SERVICE_ACTION_IN:
@@ -2428,7 +2482,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2428 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); 2482 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
2429 rdc16.block_length = cpu_to_be32(SECTOR_SIZE); 2483 rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
2430 gdth_copy_internal_data(ha, scp, (char*)&rdc16, 2484 gdth_copy_internal_data(ha, scp, (char*)&rdc16,
2431 sizeof(gdth_rdcap16_data)); 2485 sizeof(gdth_rdcap16_data), 0);
2432 } else { 2486 } else {
2433 scp->result = DID_ABORT << 16; 2487 scp->result = DID_ABORT << 16;
2434 } 2488 }
@@ -2451,13 +2505,9 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2451{ 2505{
2452 register gdth_cmd_str *cmdp; 2506 register gdth_cmd_str *cmdp;
2453 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); 2507 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2454 struct scatterlist *sl;
2455 ulong32 cnt, blockcnt; 2508 ulong32 cnt, blockcnt;
2456 ulong64 no, blockno; 2509 ulong64 no, blockno;
2457 dma_addr_t phys_addr;
2458 int i, cmd_index, read_write, sgcnt, mode64; 2510 int i, cmd_index, read_write, sgcnt, mode64;
2459 struct page *page;
2460 ulong offset;
2461 2511
2462 cmdp = ha->pccb; 2512 cmdp = ha->pccb;
2463 TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n", 2513 TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
@@ -2546,17 +2596,17 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2546 cmdp->u.cache.BlockCnt = blockcnt; 2596 cmdp->u.cache.BlockCnt = blockcnt;
2547 } 2597 }
2548 2598
2549 if (scp->use_sg) { 2599 if (gdth_bufflen(scp)) {
2550 sl = (struct scatterlist *)scp->request_buffer;
2551 sgcnt = scp->use_sg;
2552 scp->SCp.Status = GDTH_MAP_SG;
2553 cmndinfo->dma_dir = (read_write == 1 ? 2600 cmndinfo->dma_dir = (read_write == 1 ?
2554 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 2601 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
2555 sgcnt = pci_map_sg(ha->pdev, sl, scp->use_sg, cmndinfo->dma_dir); 2602 sgcnt = pci_map_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp),
2603 cmndinfo->dma_dir);
2556 if (mode64) { 2604 if (mode64) {
2605 struct scatterlist *sl;
2606
2557 cmdp->u.cache64.DestAddr= (ulong64)-1; 2607 cmdp->u.cache64.DestAddr= (ulong64)-1;
2558 cmdp->u.cache64.sg_canz = sgcnt; 2608 cmdp->u.cache64.sg_canz = sgcnt;
2559 for (i=0; i<sgcnt; ++i,++sl) { 2609 scsi_for_each_sg(scp, sl, sgcnt, i) {
2560 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); 2610 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2561#ifdef GDTH_DMA_STATISTICS 2611#ifdef GDTH_DMA_STATISTICS
2562 if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) 2612 if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff)
@@ -2567,9 +2617,11 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2567 cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl); 2617 cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
2568 } 2618 }
2569 } else { 2619 } else {
2620 struct scatterlist *sl;
2621
2570 cmdp->u.cache.DestAddr= 0xffffffff; 2622 cmdp->u.cache.DestAddr= 0xffffffff;
2571 cmdp->u.cache.sg_canz = sgcnt; 2623 cmdp->u.cache.sg_canz = sgcnt;
2572 for (i=0; i<sgcnt; ++i,++sl) { 2624 scsi_for_each_sg(scp, sl, sgcnt, i) {
2573 cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl); 2625 cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
2574#ifdef GDTH_DMA_STATISTICS 2626#ifdef GDTH_DMA_STATISTICS
2575 ha->dma32_cnt++; 2627 ha->dma32_cnt++;
@@ -2585,38 +2637,6 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2585 } 2637 }
2586#endif 2638#endif
2587 2639
2588 } else if (scp->request_bufflen) {
2589 scp->SCp.Status = GDTH_MAP_SINGLE;
2590 cmndinfo->dma_dir = (read_write == 1 ?
2591 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
2592 page = virt_to_page(scp->request_buffer);
2593 offset = (ulong)scp->request_buffer & ~PAGE_MASK;
2594 phys_addr = pci_map_page(ha->pdev,page,offset,
2595 scp->request_bufflen, cmndinfo->dma_dir);
2596 scp->SCp.dma_handle = phys_addr;
2597 if (mode64) {
2598 if (ha->cache_feat & SCATTER_GATHER) {
2599 cmdp->u.cache64.DestAddr = (ulong64)-1;
2600 cmdp->u.cache64.sg_canz = 1;
2601 cmdp->u.cache64.sg_lst[0].sg_ptr = phys_addr;
2602 cmdp->u.cache64.sg_lst[0].sg_len = scp->request_bufflen;
2603 cmdp->u.cache64.sg_lst[1].sg_len = 0;
2604 } else {
2605 cmdp->u.cache64.DestAddr = phys_addr;
2606 cmdp->u.cache64.sg_canz= 0;
2607 }
2608 } else {
2609 if (ha->cache_feat & SCATTER_GATHER) {
2610 cmdp->u.cache.DestAddr = 0xffffffff;
2611 cmdp->u.cache.sg_canz = 1;
2612 cmdp->u.cache.sg_lst[0].sg_ptr = phys_addr;
2613 cmdp->u.cache.sg_lst[0].sg_len = scp->request_bufflen;
2614 cmdp->u.cache.sg_lst[1].sg_len = 0;
2615 } else {
2616 cmdp->u.cache.DestAddr = phys_addr;
2617 cmdp->u.cache.sg_canz= 0;
2618 }
2619 }
2620 } 2640 }
2621 } 2641 }
2622 /* evaluate command size, check space */ 2642 /* evaluate command size, check space */
@@ -2659,9 +2679,8 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
2659static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) 2679static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2660{ 2680{
2661 register gdth_cmd_str *cmdp; 2681 register gdth_cmd_str *cmdp;
2662 struct scatterlist *sl;
2663 ushort i; 2682 ushort i;
2664 dma_addr_t phys_addr, sense_paddr; 2683 dma_addr_t sense_paddr;
2665 int cmd_index, sgcnt, mode64; 2684 int cmd_index, sgcnt, mode64;
2666 unchar t,l; 2685 unchar t,l;
2667 struct page *page; 2686 struct page *page;
@@ -2727,7 +2746,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2727 cmdp->u.raw64.lun = l; 2746 cmdp->u.raw64.lun = l;
2728 cmdp->u.raw64.bus = b; 2747 cmdp->u.raw64.bus = b;
2729 cmdp->u.raw64.priority = 0; 2748 cmdp->u.raw64.priority = 0;
2730 cmdp->u.raw64.sdlen = scp->request_bufflen; 2749 cmdp->u.raw64.sdlen = gdth_bufflen(scp);
2731 cmdp->u.raw64.sense_len = 16; 2750 cmdp->u.raw64.sense_len = 16;
2732 cmdp->u.raw64.sense_data = sense_paddr; 2751 cmdp->u.raw64.sense_data = sense_paddr;
2733 cmdp->u.raw64.direction = 2752 cmdp->u.raw64.direction =
@@ -2744,7 +2763,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2744 cmdp->u.raw.bus = b; 2763 cmdp->u.raw.bus = b;
2745 cmdp->u.raw.priority = 0; 2764 cmdp->u.raw.priority = 0;
2746 cmdp->u.raw.link_p = 0; 2765 cmdp->u.raw.link_p = 0;
2747 cmdp->u.raw.sdlen = scp->request_bufflen; 2766 cmdp->u.raw.sdlen = gdth_bufflen(scp);
2748 cmdp->u.raw.sense_len = 16; 2767 cmdp->u.raw.sense_len = 16;
2749 cmdp->u.raw.sense_data = sense_paddr; 2768 cmdp->u.raw.sense_data = sense_paddr;
2750 cmdp->u.raw.direction = 2769 cmdp->u.raw.direction =
@@ -2753,16 +2772,16 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2753 cmdp->u.raw.sg_ranz = 0; 2772 cmdp->u.raw.sg_ranz = 0;
2754 } 2773 }
2755 2774
2756 if (scp->use_sg) { 2775 if (gdth_bufflen(scp)) {
2757 sl = (struct scatterlist *)scp->request_buffer;
2758 sgcnt = scp->use_sg;
2759 scp->SCp.Status = GDTH_MAP_SG;
2760 cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL; 2776 cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL;
2761 sgcnt = pci_map_sg(ha->pdev,sl, scp->use_sg, cmndinfo->dma_dir); 2777 sgcnt = pci_map_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp),
2778 cmndinfo->dma_dir);
2762 if (mode64) { 2779 if (mode64) {
2780 struct scatterlist *sl;
2781
2763 cmdp->u.raw64.sdata = (ulong64)-1; 2782 cmdp->u.raw64.sdata = (ulong64)-1;
2764 cmdp->u.raw64.sg_ranz = sgcnt; 2783 cmdp->u.raw64.sg_ranz = sgcnt;
2765 for (i=0; i<sgcnt; ++i,++sl) { 2784 scsi_for_each_sg(scp, sl, sgcnt, i) {
2766 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); 2785 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2767#ifdef GDTH_DMA_STATISTICS 2786#ifdef GDTH_DMA_STATISTICS
2768 if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) 2787 if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff)
@@ -2773,9 +2792,11 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2773 cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl); 2792 cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
2774 } 2793 }
2775 } else { 2794 } else {
2795 struct scatterlist *sl;
2796
2776 cmdp->u.raw.sdata = 0xffffffff; 2797 cmdp->u.raw.sdata = 0xffffffff;
2777 cmdp->u.raw.sg_ranz = sgcnt; 2798 cmdp->u.raw.sg_ranz = sgcnt;
2778 for (i=0; i<sgcnt; ++i,++sl) { 2799 scsi_for_each_sg(scp, sl, sgcnt, i) {
2779 cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl); 2800 cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
2780#ifdef GDTH_DMA_STATISTICS 2801#ifdef GDTH_DMA_STATISTICS
2781 ha->dma32_cnt++; 2802 ha->dma32_cnt++;
@@ -2791,38 +2812,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2791 } 2812 }
2792#endif 2813#endif
2793 2814
2794 } else if (scp->request_bufflen) {
2795 scp->SCp.Status = GDTH_MAP_SINGLE;
2796 cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL;
2797 page = virt_to_page(scp->request_buffer);
2798 offset = (ulong)scp->request_buffer & ~PAGE_MASK;
2799 phys_addr = pci_map_page(ha->pdev,page,offset,
2800 scp->request_bufflen, cmndinfo->dma_dir);
2801 scp->SCp.dma_handle = phys_addr;
2802
2803 if (mode64) {
2804 if (ha->raw_feat & SCATTER_GATHER) {
2805 cmdp->u.raw64.sdata = (ulong64)-1;
2806 cmdp->u.raw64.sg_ranz= 1;
2807 cmdp->u.raw64.sg_lst[0].sg_ptr = phys_addr;
2808 cmdp->u.raw64.sg_lst[0].sg_len = scp->request_bufflen;
2809 cmdp->u.raw64.sg_lst[1].sg_len = 0;
2810 } else {
2811 cmdp->u.raw64.sdata = phys_addr;
2812 cmdp->u.raw64.sg_ranz= 0;
2813 }
2814 } else {
2815 if (ha->raw_feat & SCATTER_GATHER) {
2816 cmdp->u.raw.sdata = 0xffffffff;
2817 cmdp->u.raw.sg_ranz= 1;
2818 cmdp->u.raw.sg_lst[0].sg_ptr = phys_addr;
2819 cmdp->u.raw.sg_lst[0].sg_len = scp->request_bufflen;
2820 cmdp->u.raw.sg_lst[1].sg_len = 0;
2821 } else {
2822 cmdp->u.raw.sdata = phys_addr;
2823 cmdp->u.raw.sg_ranz= 0;
2824 }
2825 }
2826 } 2815 }
2827 if (mode64) { 2816 if (mode64) {
2828 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", 2817 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
@@ -2871,7 +2860,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2871 if (ha->type==GDT_EISA && ha->cmd_cnt>0) 2860 if (ha->type==GDT_EISA && ha->cmd_cnt>0)
2872 return 0; 2861 return 0;
2873 2862
2874 memcpy( cmdp, scp->request_buffer, sizeof(gdth_cmd_str)); 2863 gdth_copy_internal_data(ha, scp, (char *)cmdp, sizeof(gdth_cmd_str), 1);
2875 cmdp->RequestBuffer = scp; 2864 cmdp->RequestBuffer = scp;
2876 2865
2877 /* search free command index */ 2866 /* search free command index */
@@ -3404,12 +3393,10 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
3404 /* retry */ 3393 /* retry */
3405 return 2; 3394 return 2;
3406 } 3395 }
3407 if (scp->SCp.Status == GDTH_MAP_SG) 3396 if (gdth_bufflen(scp))
3408 pci_unmap_sg(ha->pdev,scp->request_buffer, 3397 pci_unmap_sg(ha->pdev, gdth_sglist(scp), gdth_sg_count(scp),
3409 scp->use_sg, cmndinfo->dma_dir); 3398 cmndinfo->dma_dir);
3410 else if (scp->SCp.Status == GDTH_MAP_SINGLE) 3399
3411 pci_unmap_page(ha->pdev,scp->SCp.dma_handle,
3412 scp->request_bufflen, cmndinfo->dma_dir);
3413 if (cmndinfo->sense_paddr) 3400 if (cmndinfo->sense_paddr)
3414 pci_unmap_page(ha->pdev, cmndinfo->sense_paddr, 16, 3401 pci_unmap_page(ha->pdev, cmndinfo->sense_paddr, 16,
3415 PCI_DMA_FROMDEVICE); 3402 PCI_DMA_FROMDEVICE);
@@ -4046,6 +4033,11 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
4046 scp->scsi_done = done; 4033 scp->scsi_done = done;
4047 gdth_update_timeout(scp, scp->timeout_per_command * 6); 4034 gdth_update_timeout(scp, scp->timeout_per_command * 6);
4048 cmndinfo->priority = DEFAULT_PRI; 4035 cmndinfo->priority = DEFAULT_PRI;
4036
4037 gdth_set_bufflen(scp, scsi_bufflen(scp));
4038 gdth_set_sg_count(scp, scsi_sg_count(scp));
4039 gdth_set_sglist(scp, scsi_sglist(scp));
4040
4049 return __gdth_queuecommand(ha, scp, cmndinfo); 4041 return __gdth_queuecommand(ha, scp, cmndinfo);
4050} 4042}
4051 4043
@@ -4056,7 +4048,6 @@ static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
4056 cmndinfo->wait_for_completion = 1; 4048 cmndinfo->wait_for_completion = 1;
4057 cmndinfo->phase = -1; 4049 cmndinfo->phase = -1;
4058 cmndinfo->OpCode = -1; 4050 cmndinfo->OpCode = -1;
4059 scp->SCp.Status = GDTH_MAP_NONE;
4060 4051
4061#ifdef GDTH_STATISTICS 4052#ifdef GDTH_STATISTICS
4062 ++act_ios; 4053 ++act_ios;
@@ -4626,7 +4617,6 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4626 return -ENOMEM; 4617 return -ENOMEM;
4627 scp->device = ha->sdev; 4618 scp->device = ha->sdev;
4628 scp->cmd_len = 12; 4619 scp->cmd_len = 12;
4629 scp->use_sg = 0;
4630 scp->device->channel = res.number; 4620 scp->device->channel = res.number;
4631 rval = gdth_eh_bus_reset(scp); 4621 rval = gdth_eh_bus_reset(scp);
4632 res.status = (rval == SUCCESS ? S_OK : S_GENERR); 4622 res.status = (rval == SUCCESS ? S_OK : S_GENERR);