aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2007-07-26 11:41:33 -0400
committerJames Bottomley <jejb@mulgrave.localdomain>2007-10-12 14:39:22 -0400
commit47d853ccbe7fc6b79aeddd97cf6f5b08bf81d58b (patch)
tree6e8c7c30b0fb562f9510c649b2dc4b7b0ee23f6c
parent2ffb45c672eff6a797712c5c8b5a6ddf3692187a (diff)
[SCSI] advansys: remove INQUIRY sniffing
Use slave_configure() to do all the work that used to be done in AscInquiryHandling and AdvInquiryHandling. Split slave_configure into two functions, one for wide and one for narrow controllers. Remove some unused definitions, duplicate definitions, unnecessary declarations, and scsireqq, cap_info and inquiry from struct asc_board. Signed-off-by: Matthew Wilcox <matthew@wil.cx> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r--drivers/scsi/advansys.c637
1 files changed, 211 insertions, 426 deletions
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 277002a54601..5885ce4aad9a 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -944,10 +944,6 @@ typedef unsigned char uchar;
944#define ASC_MAX_CDB_LEN 12 944#define ASC_MAX_CDB_LEN 12
945#define ASC_SCSI_RESET_HOLD_TIME_US 60 945#define ASC_SCSI_RESET_HOLD_TIME_US 60
946 946
947#define ADV_INQ_CLOCKING_ST_ONLY 0x0
948#define ADV_INQ_CLOCKING_DT_ONLY 0x1
949#define ADV_INQ_CLOCKING_ST_AND_DT 0x3
950
951/* 947/*
952 * Inquiry SPC-2 SPI Byte 1 EVPD (Enable Vital Product Data) 948 * Inquiry SPC-2 SPI Byte 1 EVPD (Enable Vital Product Data)
953 * and CmdDt (Command Support Data) field bit definitions. 949 * and CmdDt (Command Support Data) field bit definitions.
@@ -966,57 +962,8 @@ typedef unsigned char uchar;
966#define ASC_SRB_TID(x) ((uchar)((uchar)(x) & (uchar)0x0F)) 962#define ASC_SRB_TID(x) ((uchar)((uchar)(x) & (uchar)0x0F))
967#define ASC_SRB_LUN(x) ((uchar)((uint)(x) >> 13)) 963#define ASC_SRB_LUN(x) ((uchar)((uint)(x) >> 13))
968#define PUT_CDB1(x) ((uchar)((uint)(x) >> 8)) 964#define PUT_CDB1(x) ((uchar)((uint)(x) >> 8))
969#define MS_CMD_DONE 0x00
970#define MS_EXTEND 0x01
971#define MS_SDTR_LEN 0x03 965#define MS_SDTR_LEN 0x03
972#define MS_SDTR_CODE 0x01
973#define MS_WDTR_LEN 0x02 966#define MS_WDTR_LEN 0x02
974#define MS_WDTR_CODE 0x03
975#define MS_MDP_LEN 0x05
976#define MS_MDP_CODE 0x00
977
978/*
979 * Inquiry data structure and bitfield macros
980 *
981 * Only quantities of more than 1 bit are shifted, since the others are
982 * just tested for true or false. C bitfields aren't portable between big
983 * and little-endian platforms so they are not used.
984 */
985
986#define ASC_INQ_DVC_TYPE(inq) ((inq)->periph & 0x1f)
987#define ASC_INQ_QUALIFIER(inq) (((inq)->periph & 0xe0) >> 5)
988#define ASC_INQ_DVC_TYPE_MOD(inq) ((inq)->devtype & 0x7f)
989#define ASC_INQ_REMOVABLE(inq) ((inq)->devtype & 0x80)
990#define ASC_INQ_ANSI_VER(inq) ((inq)->ver & 0x07)
991#define ASC_INQ_ECMA_VER(inq) (((inq)->ver & 0x38) >> 3)
992#define ASC_INQ_ISO_VER(inq) (((inq)->ver & 0xc0) >> 6)
993#define ASC_INQ_RESPONSE_FMT(inq) ((inq)->byte3 & 0x0f)
994#define ASC_INQ_TERM_IO(inq) ((inq)->byte3 & 0x40)
995#define ASC_INQ_ASYNC_NOTIF(inq) ((inq)->byte3 & 0x80)
996#define ASC_INQ_SOFT_RESET(inq) ((inq)->flags & 0x01)
997#define ASC_INQ_CMD_QUEUE(inq) ((inq)->flags & 0x02)
998#define ASC_INQ_LINK_CMD(inq) ((inq)->flags & 0x08)
999#define ASC_INQ_SYNC(inq) ((inq)->flags & 0x10)
1000#define ASC_INQ_WIDE16(inq) ((inq)->flags & 0x20)
1001#define ASC_INQ_WIDE32(inq) ((inq)->flags & 0x40)
1002#define ASC_INQ_REL_ADDR(inq) ((inq)->flags & 0x80)
1003#define ASC_INQ_INFO_UNIT(inq) ((inq)->info & 0x01)
1004#define ASC_INQ_QUICK_ARB(inq) ((inq)->info & 0x02)
1005#define ASC_INQ_CLOCKING(inq) (((inq)->info & 0x0c) >> 2)
1006
1007typedef struct {
1008 uchar periph;
1009 uchar devtype;
1010 uchar ver;
1011 uchar byte3;
1012 uchar add_len;
1013 uchar res1;
1014 uchar res2;
1015 uchar flags;
1016 uchar vendor_id[8];
1017 uchar product_id[16];
1018 uchar product_rev_level[4];
1019} ASC_SCSI_INQUIRY;
1020 967
1021#define ASC_SG_LIST_PER_Q 7 968#define ASC_SG_LIST_PER_Q 7
1022#define QS_FREE 0x00 969#define QS_FREE 0x00
@@ -1932,9 +1879,7 @@ static void DvcDelayNanoSecond(ASC_DVC_VAR *, ASC_DCNT);
1932static void DvcPutScsiQ(PortAddr, ushort, uchar *, int); 1879static void DvcPutScsiQ(PortAddr, ushort, uchar *, int);
1933static void DvcGetQinfo(PortAddr, ushort, uchar *, int); 1880static void DvcGetQinfo(PortAddr, ushort, uchar *, int);
1934static ushort AscInitAsc1000Driver(ASC_DVC_VAR *); 1881static ushort AscInitAsc1000Driver(ASC_DVC_VAR *);
1935static void AscAsyncFix(ASC_DVC_VAR *, uchar, ASC_SCSI_INQUIRY *); 1882static void AscAsyncFix(ASC_DVC_VAR *, struct scsi_device *);
1936static int AscTagQueuingSafe(ASC_SCSI_INQUIRY *);
1937static void AscInquiryHandling(ASC_DVC_VAR *, uchar, ASC_SCSI_INQUIRY *);
1938static int AscExeScsiQueue(ASC_DVC_VAR *, ASC_SCSI_Q *); 1883static int AscExeScsiQueue(ASC_DVC_VAR *, ASC_SCSI_Q *);
1939static int AscISR(ASC_DVC_VAR *); 1884static int AscISR(ASC_DVC_VAR *);
1940static uint AscGetNumOfFreeQueue(ASC_DVC_VAR *, uchar, uchar); 1885static uint AscGetNumOfFreeQueue(ASC_DVC_VAR *, uchar, uchar);
@@ -3081,7 +3026,6 @@ static int AdvResetSB(ADV_DVC_VAR *asc_dvc);
3081 * Internal Adv Library functions. 3026 * Internal Adv Library functions.
3082 */ 3027 */
3083static int AdvSendIdleCmd(ADV_DVC_VAR *, ushort, ADV_DCNT); 3028static int AdvSendIdleCmd(ADV_DVC_VAR *, ushort, ADV_DCNT);
3084static void AdvInquiryHandling(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
3085static int AdvInitFrom3550EEP(ADV_DVC_VAR *); 3029static int AdvInitFrom3550EEP(ADV_DVC_VAR *);
3086static int AdvInitFrom38C0800EEP(ADV_DVC_VAR *); 3030static int AdvInitFrom38C0800EEP(ADV_DVC_VAR *);
3087static int AdvInitFrom38C1600EEP(ADV_DVC_VAR *); 3031static int AdvInitFrom38C1600EEP(ADV_DVC_VAR *);
@@ -3296,74 +3240,6 @@ static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config;
3296 ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK)) 3240 ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK))
3297 3241
3298/* 3242/*
3299 * Inquiry data structure and bitfield macros
3300 *
3301 * Using bitfields to access the subchar data isn't portable across
3302 * endianness, so instead mask and shift. Only quantities of more
3303 * than 1 bit are shifted, since the others are just tested for true
3304 * or false.
3305 */
3306
3307#define ADV_INQ_DVC_TYPE(inq) ((inq)->periph & 0x1f)
3308#define ADV_INQ_QUALIFIER(inq) (((inq)->periph & 0xe0) >> 5)
3309#define ADV_INQ_DVC_TYPE_MOD(inq) ((inq)->devtype & 0x7f)
3310#define ADV_INQ_REMOVABLE(inq) ((inq)->devtype & 0x80)
3311#define ADV_INQ_ANSI_VER(inq) ((inq)->ver & 0x07)
3312#define ADV_INQ_ECMA_VER(inq) (((inq)->ver & 0x38) >> 3)
3313#define ADV_INQ_ISO_VER(inq) (((inq)->ver & 0xc0) >> 6)
3314#define ADV_INQ_RESPONSE_FMT(inq) ((inq)->byte3 & 0x0f)
3315#define ADV_INQ_TERM_IO(inq) ((inq)->byte3 & 0x40)
3316#define ADV_INQ_ASYNC_NOTIF(inq) ((inq)->byte3 & 0x80)
3317#define ADV_INQ_SOFT_RESET(inq) ((inq)->flags & 0x01)
3318#define ADV_INQ_CMD_QUEUE(inq) ((inq)->flags & 0x02)
3319#define ADV_INQ_LINK_CMD(inq) ((inq)->flags & 0x08)
3320#define ADV_INQ_SYNC(inq) ((inq)->flags & 0x10)
3321#define ADV_INQ_WIDE16(inq) ((inq)->flags & 0x20)
3322#define ADV_INQ_WIDE32(inq) ((inq)->flags & 0x40)
3323#define ADV_INQ_REL_ADDR(inq) ((inq)->flags & 0x80)
3324#define ADV_INQ_INFO_UNIT(inq) ((inq)->info & 0x01)
3325#define ADV_INQ_QUICK_ARB(inq) ((inq)->info & 0x02)
3326#define ADV_INQ_CLOCKING(inq) (((inq)->info & 0x0c) >> 2)
3327
3328typedef struct {
3329 uchar periph; /* peripheral device type [0:4] */
3330 /* peripheral qualifier [5:7] */
3331 uchar devtype; /* device type modifier (for SCSI I) [0:6] */
3332 /* RMB - removable medium bit [7] */
3333 uchar ver; /* ANSI approved version [0:2] */
3334 /* ECMA version [3:5] */
3335 /* ISO version [6:7] */
3336 uchar byte3; /* response data format [0:3] */
3337 /* 0 SCSI 1 */
3338 /* 1 CCS */
3339 /* 2 SCSI-2 */
3340 /* 3-F reserved */
3341 /* reserved [4:5] */
3342 /* terminate I/O process bit (see 5.6.22) [6] */
3343 /* asynch. event notification (processor) [7] */
3344 uchar add_len; /* additional length */
3345 uchar res1; /* reserved */
3346 uchar res2; /* reserved */
3347 uchar flags; /* soft reset implemented [0] */
3348 /* command queuing [1] */
3349 /* reserved [2] */
3350 /* linked command for this logical unit [3] */
3351 /* synchronous data transfer [4] */
3352 /* wide bus 16 bit data transfer [5] */
3353 /* wide bus 32 bit data transfer [6] */
3354 /* relative addressing mode [7] */
3355 uchar vendor_id[8]; /* vendor identification */
3356 uchar product_id[16]; /* product identification */
3357 uchar product_rev_level[4]; /* product revision level */
3358 uchar vendor_specific[20]; /* vendor specific */
3359 uchar info; /* information unit supported [0] */
3360 /* quick arbitrate supported [1] */
3361 /* clocking field [2:3] */
3362 /* reserved [4:7] */
3363 uchar res3; /* reserved */
3364} ADV_SCSI_INQUIRY; /* 58 bytes */
3365
3366/*
3367 * --- Driver Constants and Macros 3243 * --- Driver Constants and Macros
3368 */ 3244 */
3369 3245
@@ -3771,10 +3647,6 @@ typedef struct asc_board {
3771 /* 3647 /*
3772 * The following fields are used only for Narrow Boards. 3648 * The following fields are used only for Narrow Boards.
3773 */ 3649 */
3774 /* The following three structures must be in DMA-able memory. */
3775 ASC_SCSI_REQ_Q scsireqq;
3776 ASC_CAP_INFO cap_info;
3777 ASC_SCSI_INQUIRY inquiry;
3778 uchar sdtr_data[ASC_MAX_TID + 1]; /* SDTR information */ 3650 uchar sdtr_data[ASC_MAX_TID + 1]; /* SDTR information */
3779 /* 3651 /*
3780 * The following fields are used only for Wide Boards. 3652 * The following fields are used only for Wide Boards.
@@ -3809,8 +3681,6 @@ static int asc_dbglvl = 3;
3809 3681
3810/* 3682/*
3811 * --- Driver Function Prototypes 3683 * --- Driver Function Prototypes
3812 *
3813 * advansys.h contains function prototypes for functions global to Linux.
3814 */ 3684 */
3815 3685
3816static int advansys_slave_configure(struct scsi_device *); 3686static int advansys_slave_configure(struct scsi_device *);
@@ -4622,38 +4492,203 @@ static irqreturn_t advansys_interrupt(int irq, void *dev_id)
4622 return result; 4492 return result;
4623} 4493}
4624 4494
4495static void
4496advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
4497{
4498 ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id;
4499 ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng;
4500
4501 if (sdev->lun == 0) {
4502 ASC_SCSI_BIT_ID_TYPE orig_init_sdtr = asc_dvc->init_sdtr;
4503 if ((asc_dvc->cfg->sdtr_enable & tid_bit) && sdev->sdtr) {
4504 asc_dvc->init_sdtr |= tid_bit;
4505 } else {
4506 asc_dvc->init_sdtr &= ~tid_bit;
4507 }
4508
4509 if (orig_init_sdtr != asc_dvc->init_sdtr)
4510 AscAsyncFix(asc_dvc, sdev);
4511 }
4512
4513 if (sdev->tagged_supported) {
4514 if (asc_dvc->cfg->cmd_qng_enabled & tid_bit) {
4515 if (sdev->lun == 0) {
4516 asc_dvc->cfg->can_tagged_qng |= tid_bit;
4517 asc_dvc->use_tagged_qng |= tid_bit;
4518 }
4519 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
4520 asc_dvc->max_dvc_qng[sdev->id]);
4521 }
4522 } else {
4523 if (sdev->lun == 0) {
4524 asc_dvc->cfg->can_tagged_qng &= ~tid_bit;
4525 asc_dvc->use_tagged_qng &= ~tid_bit;
4526 }
4527 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4528 }
4529
4530 if ((sdev->lun == 0) &&
4531 (orig_use_tagged_qng != asc_dvc->use_tagged_qng)) {
4532 AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B,
4533 asc_dvc->cfg->disc_enable);
4534 AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B,
4535 asc_dvc->use_tagged_qng);
4536 AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B,
4537 asc_dvc->cfg->can_tagged_qng);
4538
4539 asc_dvc->max_dvc_qng[sdev->id] =
4540 asc_dvc->cfg->max_tag_qng[sdev->id];
4541 AscWriteLramByte(asc_dvc->iop_base,
4542 (ushort)(ASCV_MAX_DVC_QNG_BEG + sdev->id),
4543 asc_dvc->max_dvc_qng[sdev->id]);
4544 }
4545}
4546
4625/* 4547/*
4626 * Set the number of commands to queue per device for the 4548 * Wide Transfers
4627 * specified host adapter. 4549 *
4550 * If the EEPROM enabled WDTR for the device and the device supports wide
4551 * bus (16 bit) transfers, then turn on the device's 'wdtr_able' bit and
4552 * write the new value to the microcode.
4628 */ 4553 */
4629static int advansys_slave_configure(struct scsi_device *device) 4554static void
4555advansys_wide_enable_wdtr(AdvPortAddr iop_base, unsigned short tidmask)
4630{ 4556{
4631 asc_board_t *boardp; 4557 unsigned short cfg_word;
4558 AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
4559 if ((cfg_word & tidmask) != 0)
4560 return;
4561
4562 cfg_word |= tidmask;
4563 AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
4632 4564
4633 boardp = ASC_BOARDP(device->host);
4634 boardp->flags |= ASC_SELECT_QUEUE_DEPTHS;
4635 /* 4565 /*
4636 * Save a pointer to the device and set its initial/maximum 4566 * Clear the microcode SDTR and WDTR negotiation done indicators for
4637 * queue depth. Only save the pointer for a lun0 dev though. 4567 * the target to cause it to negotiate with the new setting set above.
4568 * WDTR when accepted causes the target to enter asynchronous mode, so
4569 * SDTR must be negotiated.
4638 */ 4570 */
4639 if (device->lun == 0) 4571 AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
4640 boardp->device[device->id] = device; 4572 cfg_word &= ~tidmask;
4641 if (device->tagged_supported) { 4573 AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
4642 if (ASC_NARROW_BOARD(boardp)) { 4574 AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word);
4643 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, 4575 cfg_word &= ~tidmask;
4644 boardp->dvc_var.asc_dvc_var. 4576 AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word);
4645 max_dvc_qng[device->id]); 4577}
4646 } else { 4578
4647 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, 4579/*
4648 boardp->dvc_var.adv_dvc_var. 4580 * Synchronous Transfers
4649 max_dvc_qng); 4581 *
4582 * If the EEPROM enabled SDTR for the device and the device
4583 * supports synchronous transfers, then turn on the device's
4584 * 'sdtr_able' bit. Write the new value to the microcode.
4585 */
4586static void
4587advansys_wide_enable_sdtr(AdvPortAddr iop_base, unsigned short tidmask)
4588{
4589 unsigned short cfg_word;
4590 AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
4591 if ((cfg_word & tidmask) != 0)
4592 return;
4593
4594 cfg_word |= tidmask;
4595 AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
4596
4597 /*
4598 * Clear the microcode "SDTR negotiation" done indicator for the
4599 * target to cause it to negotiate with the new setting set above.
4600 */
4601 AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
4602 cfg_word &= ~tidmask;
4603 AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
4604}
4605
4606/*
4607 * PPR (Parallel Protocol Request) Capable
4608 *
4609 * If the device supports DT mode, then it must be PPR capable.
4610 * The PPR message will be used in place of the SDTR and WDTR
4611 * messages to negotiate synchronous speed and offset, transfer
4612 * width, and protocol options.
4613 */
4614static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc,
4615 AdvPortAddr iop_base, unsigned short tidmask)
4616{
4617 AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able);
4618 adv_dvc->ppr_able |= tidmask;
4619 AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE, adv_dvc->ppr_able);
4620}
4621
4622static void
4623advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
4624{
4625 AdvPortAddr iop_base = adv_dvc->iop_base;
4626 unsigned short tidmask = 1 << sdev->id;
4627
4628 if (sdev->lun == 0) {
4629 /*
4630 * Handle WDTR, SDTR, and Tag Queuing. If the feature
4631 * is enabled in the EEPROM and the device supports the
4632 * feature, then enable it in the microcode.
4633 */
4634
4635 if ((adv_dvc->wdtr_able & tidmask) && sdev->wdtr)
4636 advansys_wide_enable_wdtr(iop_base, tidmask);
4637 if ((adv_dvc->sdtr_able & tidmask) && sdev->sdtr)
4638 advansys_wide_enable_sdtr(iop_base, tidmask);
4639 if (adv_dvc->chip_type == ADV_CHIP_ASC38C1600 && sdev->ppr)
4640 advansys_wide_enable_ppr(adv_dvc, iop_base, tidmask);
4641
4642 /*
4643 * Tag Queuing is disabled for the BIOS which runs in polled
4644 * mode and would see no benefit from Tag Queuing. Also by
4645 * disabling Tag Queuing in the BIOS devices with Tag Queuing
4646 * bugs will at least work with the BIOS.
4647 */
4648 if ((adv_dvc->tagqng_able & tidmask) &&
4649 sdev->tagged_supported) {
4650 unsigned short cfg_word;
4651 AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word);
4652 cfg_word |= tidmask;
4653 AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE,
4654 cfg_word);
4655 AdvWriteByteLram(iop_base,
4656 ASC_MC_NUMBER_OF_MAX_CMD + sdev->id,
4657 adv_dvc->max_dvc_qng);
4650 } 4658 }
4659 }
4660
4661 if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) {
4662 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
4663 adv_dvc->max_dvc_qng);
4651 } else { 4664 } else {
4652 scsi_adjust_queue_depth(device, 0, device->host->cmd_per_lun); 4665 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4653 } 4666 }
4654 ASC_DBG4(1, 4667}
4655 "advansys_slave_configure: device 0x%lx, boardp 0x%lx, id %d, depth %d\n", 4668
4656 (ulong)device, (ulong)boardp, device->id, device->queue_depth); 4669/*
4670 * Set the number of commands to queue per device for the
4671 * specified host adapter.
4672 */
4673static int advansys_slave_configure(struct scsi_device *sdev)
4674{
4675 asc_board_t *boardp = ASC_BOARDP(sdev->host);
4676 boardp->flags |= ASC_SELECT_QUEUE_DEPTHS;
4677
4678 /*
4679 * Save a pointer to the sdev and set its initial/maximum
4680 * queue depth. Only save the pointer for a lun0 dev though.
4681 */
4682 if (sdev->lun == 0)
4683 boardp->device[sdev->id] = sdev;
4684
4685 if (ASC_NARROW_BOARD(boardp))
4686 advansys_narrow_slave_configure(sdev,
4687 &boardp->dvc_var.asc_dvc_var);
4688 else
4689 advansys_wide_slave_configure(sdev,
4690 &boardp->dvc_var.adv_dvc_var);
4691
4657 return 0; 4692 return 0;
4658} 4693}
4659 4694
@@ -5406,21 +5441,10 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
5406 scp->result = 0; 5441 scp->result = 0;
5407 5442
5408 /* 5443 /*
5409 * If an INQUIRY command completed successfully, then call
5410 * the AscInquiryHandling() function to set-up the device.
5411 */
5412 if (scp->cmnd[0] == INQUIRY && scp->device->lun == 0 &&
5413 (scp->request_bufflen - qdonep->remain_bytes) >= 8) {
5414 AscInquiryHandling(asc_dvc_varp, scp->device->id & 0x7,
5415 (ASC_SCSI_INQUIRY *)scp->
5416 request_buffer);
5417 }
5418
5419 /*
5420 * Check for an underrun condition. 5444 * Check for an underrun condition.
5421 * 5445 *
5422 * If there was no error and an underrun condition, then 5446 * If there was no error and an underrun condition, then
5423 * then return the number of underrun bytes. 5447 * return the number of underrun bytes.
5424 */ 5448 */
5425 if (scp->request_bufflen != 0 && qdonep->remain_bytes != 0 && 5449 if (scp->request_bufflen != 0 && qdonep->remain_bytes != 0 &&
5426 qdonep->remain_bytes <= scp->request_bufflen) { 5450 qdonep->remain_bytes <= scp->request_bufflen) {
@@ -8229,8 +8253,8 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
8229 (uchar *)&ext_msg, 8253 (uchar *)&ext_msg,
8230 sizeof(EXT_MSG) >> 1); 8254 sizeof(EXT_MSG) >> 1);
8231 8255
8232 if (ext_msg.msg_type == MS_EXTEND && 8256 if (ext_msg.msg_type == EXTENDED_MESSAGE &&
8233 ext_msg.msg_req == MS_SDTR_CODE && 8257 ext_msg.msg_req == EXTENDED_SDTR &&
8234 ext_msg.msg_len == MS_SDTR_LEN) { 8258 ext_msg.msg_len == MS_SDTR_LEN) {
8235 sdtr_accept = TRUE; 8259 sdtr_accept = TRUE;
8236 if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) { 8260 if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) {
@@ -8312,8 +8336,8 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
8312 q_cntl); 8336 q_cntl);
8313 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); 8337 AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
8314 return (0); 8338 return (0);
8315 } else if (ext_msg.msg_type == MS_EXTEND && 8339 } else if (ext_msg.msg_type == EXTENDED_MESSAGE &&
8316 ext_msg.msg_req == MS_WDTR_CODE && 8340 ext_msg.msg_req == EXTENDED_WDTR &&
8317 ext_msg.msg_len == MS_WDTR_LEN) { 8341 ext_msg.msg_len == MS_WDTR_LEN) {
8318 8342
8319 ext_msg.wdtr_width = 0; 8343 ext_msg.wdtr_width = 0;
@@ -8406,9 +8430,9 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
8406 (uchar *)&out_msg, 8430 (uchar *)&out_msg,
8407 sizeof(EXT_MSG) >> 1); 8431 sizeof(EXT_MSG) >> 1);
8408 8432
8409 if ((out_msg.msg_type == MS_EXTEND) && 8433 if ((out_msg.msg_type == EXTENDED_MESSAGE) &&
8410 (out_msg.msg_len == MS_SDTR_LEN) && 8434 (out_msg.msg_len == MS_SDTR_LEN) &&
8411 (out_msg.msg_req == MS_SDTR_CODE)) { 8435 (out_msg.msg_req == EXTENDED_SDTR)) {
8412 8436
8413 asc_dvc->init_sdtr &= ~target_id; 8437 asc_dvc->init_sdtr &= ~target_id;
8414 asc_dvc->sdtr_done &= ~target_id; 8438 asc_dvc->sdtr_done &= ~target_id;
@@ -9901,9 +9925,9 @@ AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset)
9901 PortAddr iop_base; 9925 PortAddr iop_base;
9902 9926
9903 iop_base = asc_dvc->iop_base; 9927 iop_base = asc_dvc->iop_base;
9904 sdtr_buf.msg_type = MS_EXTEND; 9928 sdtr_buf.msg_type = EXTENDED_MESSAGE;
9905 sdtr_buf.msg_len = MS_SDTR_LEN; 9929 sdtr_buf.msg_len = MS_SDTR_LEN;
9906 sdtr_buf.msg_req = MS_SDTR_CODE; 9930 sdtr_buf.msg_req = EXTENDED_SDTR;
9907 sdtr_buf.xfer_period = sdtr_period; 9931 sdtr_buf.xfer_period = sdtr_period;
9908 sdtr_offset &= ASC_SYN_MAX_OFFSET; 9932 sdtr_offset &= ASC_SYN_MAX_OFFSET;
9909 sdtr_buf.req_ack_offset = sdtr_offset; 9933 sdtr_buf.req_ack_offset = sdtr_offset;
@@ -10985,91 +11009,31 @@ AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
10985 return (n_error); 11009 return (n_error);
10986} 11010}
10987 11011
10988static void 11012static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev)
10989AscAsyncFix(ASC_DVC_VAR *asc_dvc, uchar tid_no, ASC_SCSI_INQUIRY *inq)
10990{ 11013{
10991 uchar dvc_type; 11014 char type = sdev->type;
10992 ASC_SCSI_BIT_ID_TYPE tid_bits; 11015 ASC_SCSI_BIT_ID_TYPE tid_bits = 1 << sdev->id;
10993
10994 dvc_type = ASC_INQ_DVC_TYPE(inq);
10995 tid_bits = ASC_TIX_TO_TARGET_ID(tid_no);
10996 11016
10997 if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN) { 11017 if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN) {
10998 if (!(asc_dvc->init_sdtr & tid_bits)) { 11018 if (!(asc_dvc->init_sdtr & tid_bits)) {
10999 if ((dvc_type == TYPE_ROM) && 11019 if ((type == TYPE_ROM) &&
11000 (strncmp(inq->vendor_id, "HP ", 3) == 0)) { 11020 (strncmp(sdev->vendor, "HP ", 3) == 0)) {
11001 asc_dvc->pci_fix_asyn_xfer_always |= tid_bits; 11021 asc_dvc->pci_fix_asyn_xfer_always |= tid_bits;
11002 } 11022 }
11003 asc_dvc->pci_fix_asyn_xfer |= tid_bits; 11023 asc_dvc->pci_fix_asyn_xfer |= tid_bits;
11004 if ((dvc_type == TYPE_PROCESSOR) || 11024 if ((type == TYPE_PROCESSOR) ||
11005 (dvc_type == TYPE_SCANNER) || 11025 (type == TYPE_SCANNER) || (type == TYPE_ROM) ||
11006 (dvc_type == TYPE_ROM) || (dvc_type == TYPE_TAPE)) { 11026 (type == TYPE_TAPE)) {
11007 asc_dvc->pci_fix_asyn_xfer &= ~tid_bits; 11027 asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
11008 } 11028 }
11009 11029
11010 if (asc_dvc->pci_fix_asyn_xfer & tid_bits) { 11030 if (asc_dvc->pci_fix_asyn_xfer & tid_bits) {
11011 AscSetRunChipSynRegAtID(asc_dvc->iop_base, 11031 AscSetRunChipSynRegAtID(asc_dvc->iop_base,
11012 tid_no, 11032 sdev->id,
11013 ASYN_SDTR_DATA_FIX_PCI_REV_AB); 11033 ASYN_SDTR_DATA_FIX_PCI_REV_AB);
11014 } 11034 }
11015 } 11035 }
11016 } 11036 }
11017 return;
11018}
11019
11020static int AscTagQueuingSafe(ASC_SCSI_INQUIRY *inq)
11021{
11022 if ((inq->add_len >= 32) &&
11023 (strncmp(inq->vendor_id, "QUANTUM XP34301", 15) == 0) &&
11024 (strncmp(inq->product_rev_level, "1071", 4) == 0)) {
11025 return 0;
11026 }
11027 return 1;
11028}
11029
11030static void
11031AscInquiryHandling(ASC_DVC_VAR *asc_dvc, uchar tid_no, ASC_SCSI_INQUIRY *inq)
11032{
11033 ASC_SCSI_BIT_ID_TYPE tid_bit = ASC_TIX_TO_TARGET_ID(tid_no);
11034 ASC_SCSI_BIT_ID_TYPE orig_init_sdtr, orig_use_tagged_qng;
11035
11036 orig_init_sdtr = asc_dvc->init_sdtr;
11037 orig_use_tagged_qng = asc_dvc->use_tagged_qng;
11038
11039 asc_dvc->init_sdtr &= ~tid_bit;
11040 asc_dvc->cfg->can_tagged_qng &= ~tid_bit;
11041 asc_dvc->use_tagged_qng &= ~tid_bit;
11042
11043 if (ASC_INQ_RESPONSE_FMT(inq) >= 2 || ASC_INQ_ANSI_VER(inq) >= 2) {
11044 if ((asc_dvc->cfg->sdtr_enable & tid_bit) && ASC_INQ_SYNC(inq)) {
11045 asc_dvc->init_sdtr |= tid_bit;
11046 }
11047 if ((asc_dvc->cfg->cmd_qng_enabled & tid_bit) &&
11048 ASC_INQ_CMD_QUEUE(inq)) {
11049 if (AscTagQueuingSafe(inq)) {
11050 asc_dvc->use_tagged_qng |= tid_bit;
11051 asc_dvc->cfg->can_tagged_qng |= tid_bit;
11052 }
11053 }
11054 }
11055 if (orig_use_tagged_qng != asc_dvc->use_tagged_qng) {
11056 AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B,
11057 asc_dvc->cfg->disc_enable);
11058 AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B,
11059 asc_dvc->use_tagged_qng);
11060 AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B,
11061 asc_dvc->cfg->can_tagged_qng);
11062
11063 asc_dvc->max_dvc_qng[tid_no] =
11064 asc_dvc->cfg->max_tag_qng[tid_no];
11065 AscWriteLramByte(asc_dvc->iop_base,
11066 (ushort)(ASCV_MAX_DVC_QNG_BEG + tid_no),
11067 asc_dvc->max_dvc_qng[tid_no]);
11068 }
11069 if (orig_init_sdtr != asc_dvc->init_sdtr) {
11070 AscAsyncFix(asc_dvc, tid_no, inq);
11071 }
11072 return;
11073} 11037}
11074 11038
11075static uchar AscReadLramByte(PortAddr iop_base, ushort addr) 11039static uchar AscReadLramByte(PortAddr iop_base, ushort addr)
@@ -13998,7 +13962,7 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
13998 13962
13999 /* 13963 /*
14000 * Microcode operating variables for WDTR, SDTR, and command tag 13964 * Microcode operating variables for WDTR, SDTR, and command tag
14001 * queuing will be set in AdvInquiryHandling() based on what a 13965 * queuing will be set in slave_configure() based on what a
14002 * device reports it is capable of in Inquiry byte 7. 13966 * device reports it is capable of in Inquiry byte 7.
14003 * 13967 *
14004 * If SCSI Bus Resets have been disabled, then directly set 13968 * If SCSI Bus Resets have been disabled, then directly set
@@ -14649,7 +14613,7 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
14649 14613
14650 /* 14614 /*
14651 * Microcode operating variables for WDTR, SDTR, and command tag 14615 * Microcode operating variables for WDTR, SDTR, and command tag
14652 * queuing will be set in AdvInquiryHandling() based on what a 14616 * queuing will be set in slave_configure() based on what a
14653 * device reports it is capable of in Inquiry byte 7. 14617 * device reports it is capable of in Inquiry byte 7.
14654 * 14618 *
14655 * If SCSI Bus Resets have been disabled, then directly set 14619 * If SCSI Bus Resets have been disabled, then directly set
@@ -15269,7 +15233,7 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
15269 15233
15270 /* 15234 /*
15271 * Microcode operating variables for WDTR, SDTR, and command tag 15235 * Microcode operating variables for WDTR, SDTR, and command tag
15272 * queuing will be set in AdvInquiryHandling() based on what a 15236 * queuing will be set in slave_configure() based on what a
15273 * device reports it is capable of in Inquiry byte 7. 15237 * device reports it is capable of in Inquiry byte 7.
15274 * 15238 *
15275 * If SCSI Bus Resets have been disabled, then directly set 15239 * If SCSI Bus Resets have been disabled, then directly set
@@ -16953,23 +16917,6 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc)
16953 scsiq->cntl = 0; 16917 scsiq->cntl = 0;
16954 16918
16955 /* 16919 /*
16956 * If the command that completed was a SCSI INQUIRY and
16957 * LUN 0 was sent the command, then process the INQUIRY
16958 * command information for the device.
16959 *
16960 * Note: If data returned were either VPD or CmdDt data,
16961 * don't process the INQUIRY command information for
16962 * the device, otherwise may erroneously set *_able bits.
16963 */
16964 if (scsiq->done_status == QD_NO_ERROR &&
16965 scsiq->cdb[0] == INQUIRY &&
16966 scsiq->target_lun == 0 &&
16967 (scsiq->cdb[1] & ADV_INQ_RTN_VPD_AND_CMDDT)
16968 == ADV_INQ_RTN_STD_INQUIRY_DATA) {
16969 AdvInquiryHandling(asc_dvc, scsiq);
16970 }
16971
16972 /*
16973 * Notify the driver of the completed request by passing 16920 * Notify the driver of the completed request by passing
16974 * the ADV_SCSI_REQ_Q pointer to its callback function. 16921 * the ADV_SCSI_REQ_Q pointer to its callback function.
16975 */ 16922 */
@@ -17074,168 +17021,6 @@ AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc,
17074 return ADV_ERROR; 17021 return ADV_ERROR;
17075} 17022}
17076 17023
17077/*
17078 * Inquiry Information Byte 7 Handling
17079 *
17080 * Handle SCSI Inquiry Command information for a device by setting
17081 * microcode operating variables that affect WDTR, SDTR, and Tag
17082 * Queuing.
17083 */
17084static void AdvInquiryHandling(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq)
17085{
17086 AdvPortAddr iop_base;
17087 uchar tid;
17088 ADV_SCSI_INQUIRY *inq;
17089 ushort tidmask;
17090 ushort cfg_word;
17091
17092 /*
17093 * AdvInquiryHandling() requires up to INQUIRY information Byte 7
17094 * to be available.
17095 *
17096 * If less than 8 bytes of INQUIRY information were requested or less
17097 * than 8 bytes were transferred, then return. cdb[4] is the request
17098 * length and the ADV_SCSI_REQ_Q 'data_cnt' field is set by the
17099 * microcode to the transfer residual count.
17100 */
17101
17102 if (scsiq->cdb[4] < 8 ||
17103 (scsiq->cdb[4] - le32_to_cpu(scsiq->data_cnt)) < 8) {
17104 return;
17105 }
17106
17107 iop_base = asc_dvc->iop_base;
17108 tid = scsiq->target_id;
17109
17110 inq = (ADV_SCSI_INQUIRY *) scsiq->vdata_addr;
17111
17112 /*
17113 * WDTR, SDTR, and Tag Queuing cannot be enabled for old devices.
17114 */
17115 if (ADV_INQ_RESPONSE_FMT(inq) < 2 && ADV_INQ_ANSI_VER(inq) < 2) {
17116 return;
17117 } else {
17118 /*
17119 * INQUIRY Byte 7 Handling
17120 *
17121 * Use a device's INQUIRY byte 7 to determine whether it
17122 * supports WDTR, SDTR, and Tag Queuing. If the feature
17123 * is enabled in the EEPROM and the device supports the
17124 * feature, then enable it in the microcode.
17125 */
17126
17127 tidmask = ADV_TID_TO_TIDMASK(tid);
17128
17129 /*
17130 * Wide Transfers
17131 *
17132 * If the EEPROM enabled WDTR for the device and the device
17133 * supports wide bus (16 bit) transfers, then turn on the
17134 * device's 'wdtr_able' bit and write the new value to the
17135 * microcode.
17136 */
17137 if ((asc_dvc->wdtr_able & tidmask) && ADV_INQ_WIDE16(inq)) {
17138 AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
17139 if ((cfg_word & tidmask) == 0) {
17140 cfg_word |= tidmask;
17141 AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE,
17142 cfg_word);
17143
17144 /*
17145 * Clear the microcode "SDTR negotiation" and "WDTR
17146 * negotiation" done indicators for the target to cause
17147 * it to negotiate with the new setting set above.
17148 * WDTR when accepted causes the target to enter
17149 * asynchronous mode, so SDTR must be negotiated.
17150 */
17151 AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE,
17152 cfg_word);
17153 cfg_word &= ~tidmask;
17154 AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE,
17155 cfg_word);
17156 AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE,
17157 cfg_word);
17158 cfg_word &= ~tidmask;
17159 AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE,
17160 cfg_word);
17161 }
17162 }
17163
17164 /*
17165 * Synchronous Transfers
17166 *
17167 * If the EEPROM enabled SDTR for the device and the device
17168 * supports synchronous transfers, then turn on the device's
17169 * 'sdtr_able' bit. Write the new value to the microcode.
17170 */
17171 if ((asc_dvc->sdtr_able & tidmask) && ADV_INQ_SYNC(inq)) {
17172 AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
17173 if ((cfg_word & tidmask) == 0) {
17174 cfg_word |= tidmask;
17175 AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE,
17176 cfg_word);
17177
17178 /*
17179 * Clear the microcode "SDTR negotiation" done indicator
17180 * for the target to cause it to negotiate with the new
17181 * setting set above.
17182 */
17183 AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE,
17184 cfg_word);
17185 cfg_word &= ~tidmask;
17186 AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE,
17187 cfg_word);
17188 }
17189 }
17190 /*
17191 * If the Inquiry data included enough space for the SPI-3
17192 * Clocking field, then check if DT mode is supported.
17193 */
17194 if (asc_dvc->chip_type == ADV_CHIP_ASC38C1600 &&
17195 (scsiq->cdb[4] >= 57 ||
17196 (scsiq->cdb[4] - le32_to_cpu(scsiq->data_cnt)) >= 57)) {
17197 /*
17198 * PPR (Parallel Protocol Request) Capable
17199 *
17200 * If the device supports DT mode, then it must be PPR capable.
17201 * The PPR message will be used in place of the SDTR and WDTR
17202 * messages to negotiate synchronous speed and offset, transfer
17203 * width, and protocol options.
17204 */
17205 if (ADV_INQ_CLOCKING(inq) & ADV_INQ_CLOCKING_DT_ONLY) {
17206 AdvReadWordLram(iop_base, ASC_MC_PPR_ABLE,
17207 asc_dvc->ppr_able);
17208 asc_dvc->ppr_able |= tidmask;
17209 AdvWriteWordLram(iop_base, ASC_MC_PPR_ABLE,
17210 asc_dvc->ppr_able);
17211 }
17212 }
17213
17214 /*
17215 * If the EEPROM enabled Tag Queuing for the device and the
17216 * device supports Tag Queueing, then turn on the device's
17217 * 'tagqng_enable' bit in the microcode and set the microcode
17218 * maximum command count to the ADV_DVC_VAR 'max_dvc_qng'
17219 * value.
17220 *
17221 * Tag Queuing is disabled for the BIOS which runs in polled
17222 * mode and would see no benefit from Tag Queuing. Also by
17223 * disabling Tag Queuing in the BIOS devices with Tag Queuing
17224 * bugs will at least work with the BIOS.
17225 */
17226 if ((asc_dvc->tagqng_able & tidmask) && ADV_INQ_CMD_QUEUE(inq)) {
17227 AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word);
17228 cfg_word |= tidmask;
17229 AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE,
17230 cfg_word);
17231
17232 AdvWriteByteLram(iop_base,
17233 ASC_MC_NUMBER_OF_MAX_CMD + tid,
17234 asc_dvc->max_dvc_qng);
17235 }
17236 }
17237}
17238
17239static int __devinit 17024static int __devinit
17240advansys_wide_init_chip(asc_board_t *boardp, ADV_DVC_VAR *adv_dvc_varp) 17025advansys_wide_init_chip(asc_board_t *boardp, ADV_DVC_VAR *adv_dvc_varp)
17241{ 17026{