aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/message
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/Makefile6
-rw-r--r--drivers/message/fusion/Kconfig66
-rw-r--r--drivers/message/fusion/Makefile52
-rw-r--r--drivers/message/fusion/linux_compat.h18
-rw-r--r--drivers/message/fusion/lsi/fc_log.h89
-rw-r--r--drivers/message/fusion/lsi/mpi.h746
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h2105
-rw-r--r--drivers/message/fusion/lsi/mpi_fc.h363
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt276
-rw-r--r--drivers/message/fusion/lsi/mpi_inb.h220
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h362
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h770
-rw-r--r--drivers/message/fusion/lsi/mpi_lan.h212
-rw-r--r--drivers/message/fusion/lsi/mpi_raid.h232
-rw-r--r--drivers/message/fusion/lsi/mpi_sas.h181
-rw-r--r--drivers/message/fusion/lsi/mpi_targ.h435
-rw-r--r--drivers/message/fusion/lsi/mpi_tool.h305
-rw-r--r--drivers/message/fusion/lsi/mpi_type.h86
-rw-r--r--drivers/message/fusion/mptbase.c5946
-rw-r--r--drivers/message/fusion/mptbase.h1021
-rw-r--r--drivers/message/fusion/mptctl.c2878
-rw-r--r--drivers/message/fusion/mptctl.h484
-rw-r--r--drivers/message/fusion/mptlan.c1688
-rw-r--r--drivers/message/fusion/mptlan.h85
-rw-r--r--drivers/message/fusion/mptscsih.c6021
-rw-r--r--drivers/message/fusion/mptscsih.h94
-rw-r--r--drivers/message/i2o/Kconfig75
-rw-r--r--drivers/message/i2o/Makefile13
-rw-r--r--drivers/message/i2o/README98
-rw-r--r--drivers/message/i2o/README.ioctl394
-rw-r--r--drivers/message/i2o/debug.c481
-rw-r--r--drivers/message/i2o/device.c634
-rw-r--r--drivers/message/i2o/driver.c374
-rw-r--r--drivers/message/i2o/exec-osm.c507
-rw-r--r--drivers/message/i2o/i2o_block.c1247
-rw-r--r--drivers/message/i2o/i2o_block.h99
-rw-r--r--drivers/message/i2o/i2o_config.c1160
-rw-r--r--drivers/message/i2o/i2o_lan.h159
-rw-r--r--drivers/message/i2o/i2o_proc.c2112
-rw-r--r--drivers/message/i2o/i2o_scsi.c830
-rw-r--r--drivers/message/i2o/iop.c1327
-rw-r--r--drivers/message/i2o/pci.c528
42 files changed, 34779 insertions, 0 deletions
diff --git a/drivers/message/Makefile b/drivers/message/Makefile
new file mode 100644
index 000000000000..97ef5a01ad11
--- /dev/null
+++ b/drivers/message/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for MPT based block devices
3#
4
5obj-$(CONFIG_I2O) += i2o/
6obj-$(CONFIG_FUSION) += fusion/
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
new file mode 100644
index 000000000000..452418b24d7b
--- /dev/null
+++ b/drivers/message/fusion/Kconfig
@@ -0,0 +1,66 @@
1
2menu "Fusion MPT device support"
3
4config FUSION
5 tristate "Fusion MPT (base + ScsiHost) drivers"
6 depends on PCI && SCSI
7 ---help---
8 LSI Logic Fusion(TM) Message Passing Technology (MPT) device support
9 provides high performance SCSI host initiator, and LAN [1] interface
10 services to a host system. The Fusion architecture is capable of
11 duplexing these protocols on high-speed Fibre Channel
12 (up to 2 GHz x 2 ports = 4 GHz) and parallel SCSI (up to Ultra-320)
13 physical medium.
14
15 [1] LAN is not supported on parallel SCSI medium.
16
17config FUSION_MAX_SGE
18 int "Maximum number of scatter gather entries"
19 depends on FUSION
20 default "40"
21 help
22 This option allows you to specify the maximum number of scatter-
23 gather entries per I/O. The driver defaults to 40, a reasonable number
24 for most systems. However, the user may increase this up to 128.
25 Increasing this parameter will require significantly more memory
26 on a per controller instance. Increasing the parameter is not
27 necessary (or recommended) unless the user will be running
28 large I/O's via the raw interface.
29
30config FUSION_CTL
31 tristate "Fusion MPT misc device (ioctl) driver"
32 depends on FUSION
33 ---help---
34 The Fusion MPT misc device driver provides specialized control
35 of MPT adapters via system ioctl calls. Use of ioctl calls to
36 the MPT driver requires that you create and use a misc device
37 node ala:
38 mknod /dev/mptctl c 10 240
39
40 One use of this ioctl interface is to perform an upgrade (reflash)
41 of the MPT adapter firmware. Refer to readme file(s) distributed
42 with the Fusion MPT linux driver for additional details.
43
44 If enabled by saying M to this, a driver named: mptctl
45 will be compiled.
46
47 If unsure whether you really want or need this, say N.
48
49config FUSION_LAN
50 tristate "Fusion MPT LAN driver"
51 depends on FUSION && NET_FC
52 ---help---
53 This module supports LAN IP traffic over Fibre Channel port(s)
54 on Fusion MPT compatible hardware (LSIFC9xx chips).
55 The physical interface used is defined in RFC 2625.
56 Please refer to that document for details.
57
58 Installing this driver requires the knowledge to configure and
59 activate a new network interface, "fc0", using standard Linux tools.
60
61 If enabled by saying M to this, a driver named: mptlan
62 will be compiled.
63
64 If unsure whether you really want or need this, say N.
65
66endmenu
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
new file mode 100644
index 000000000000..f6fdcaaefc89
--- /dev/null
+++ b/drivers/message/fusion/Makefile
@@ -0,0 +1,52 @@
1#
2# Makefile for the LSI Logic Fusion MPT (Message Passing Technology) drivers.
3#
4# Note! If you want to turn on various debug defines for an extended period of
5# time but don't want them lingering around in the Makefile when you pass it on
6# to someone else, use the MPT_CFLAGS env variable (thanks Steve). -nromer
7
8#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-{ LSI_LOGIC
9
10# Architecture-specific...
11# # intel
12#EXTRA_CFLAGS += -g
13# # sparc64
14#EXTRA_CFLAGS += -gstabs+
15
16EXTRA_CFLAGS += ${MPT_CFLAGS}
17
18# Fusion MPT drivers; recognized debug defines...
19# MPT general:
20#EXTRA_CFLAGS += -DMPT_DEBUG_SCSI
21#EXTRA_CFLAGS += -DMPT_DEBUG
22#EXTRA_CFLAGS += -DMPT_DEBUG_MSG_FRAME
23#EXTRA_CFLAGS += -DMPT_DEBUG_SG
24
25#
26# driver/module specifics...
27#
28# For mptbase:
29#CFLAGS_mptbase.o += -DMPT_DEBUG_HANDSHAKE
30#CFLAGS_mptbase.o += -DMPT_DEBUG_IRQ
31#
32# For mptscsih:
33#CFLAGS_mptscsih.o += -DMPT_DEBUG_SCANDV
34#CFLAGS_mptscsih.o += -DMPT_DEBUG_RESET
35#CFLAGS_mptscsih.o += -DMPT_DEBUG_NEH
36#
37# For mptctl:
38#CFLAGS_mptctl.o += -DMPT_DEBUG_IOCTL
39#
40# For mptlan:
41#CFLAGS_mptlan.o += -DMPT_LAN_IO_DEBUG
42#
43# For isense:
44
45# EXP...
46##mptscsih-objs := scsihost.o scsiherr.o
47
48#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-} LSI_LOGIC
49
50obj-$(CONFIG_FUSION) += mptbase.o mptscsih.o
51obj-$(CONFIG_FUSION_CTL) += mptctl.o
52obj-$(CONFIG_FUSION_LAN) += mptlan.o
diff --git a/drivers/message/fusion/linux_compat.h b/drivers/message/fusion/linux_compat.h
new file mode 100644
index 000000000000..048b5b8610e3
--- /dev/null
+++ b/drivers/message/fusion/linux_compat.h
@@ -0,0 +1,18 @@
1/* drivers/message/fusion/linux_compat.h */
2
3#ifndef FUSION_LINUX_COMPAT_H
4#define FUSION_LINUX_COMPAT_H
5
6#include <linux/version.h>
7#include <scsi/scsi_device.h>
8
9#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6))
10static int inline scsi_device_online(struct scsi_device *sdev)
11{
12 return sdev->online;
13}
14#endif
15
16
17/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
18#endif /* _LINUX_COMPAT_H */
diff --git a/drivers/message/fusion/lsi/fc_log.h b/drivers/message/fusion/lsi/fc_log.h
new file mode 100644
index 000000000000..dc98d46f9071
--- /dev/null
+++ b/drivers/message/fusion/lsi/fc_log.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright (c) 2000-2001 LSI Logic Corporation. All rights reserved.
3 *
4 * NAME: fc_log.h
5 * SUMMARY: MPI IocLogInfo definitions for the SYMFC9xx chips
6 * DESCRIPTION: Contains the enumerated list of values that may be returned
7 * in the IOCLogInfo field of a MPI Default Reply Message.
8 *
9 * CREATION DATE: 6/02/2000
10 * ID: $Id: fc_log.h,v 4.6 2001/07/26 14:41:33 sschremm Exp $
11 */
12
13
14/*
15 * MpiIocLogInfo_t enum
16 *
17 * These 32 bit values are used in the IOCLogInfo field of the MPI reply
18 * messages.
19 * The value is 0xabcccccc where
20 * a = The type of log info as per the MPI spec. Since these codes are
21 * all for Fibre Channel this value will always be 2.
22 * b = Specifies a subclass of the firmware where
23 * 0 = FCP Initiator
24 * 1 = FCP Target
25 * 2 = LAN
26 * 3 = MPI Message Layer
27 * 4 = FC Link
28 * 5 = Context Manager
29 * 6 = Invalid Field Offset
30 * 7 = State Change Info
31 * all others are reserved for future use
32 * c = A specific value within the subclass.
33 *
34 * NOTE: Any new values should be added to the end of each subclass so that the
35 * codes remain consistent across firmware releases.
36 */
37typedef enum _MpiIocLogInfoFc
38{
39 MPI_IOCLOGINFO_FC_INIT_BASE = 0x20000000,
40 MPI_IOCLOGINFO_FC_INIT_ERROR_OUT_OF_ORDER_FRAME = 0x20000001, /* received an out of order frame - unsupported */
41 MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_START_OF_FRAME = 0x20000002, /* Bad Rx Frame, bad start of frame primative */
42 MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_END_OF_FRAME = 0x20000003, /* Bad Rx Frame, bad end of frame primative */
43 MPI_IOCLOGINFO_FC_INIT_ERROR_OVER_RUN = 0x20000004, /* Bad Rx Frame, overrun */
44 MPI_IOCLOGINFO_FC_INIT_ERROR_RX_OTHER = 0x20000005, /* Other errors caught by IOC which require retries */
45 MPI_IOCLOGINFO_FC_INIT_ERROR_SUBPROC_DEAD = 0x20000006, /* Main processor could not initialize sub-processor */
46 MPI_IOCLOGINFO_FC_INIT_ERROR_RX_OVERRUN = 0x20000007, /* Scatter Gather overrun */
47 MPI_IOCLOGINFO_FC_INIT_ERROR_RX_BAD_STATUS = 0x20000008, /* Receiver detected context mismatch via invalid header */
48 MPI_IOCLOGINFO_FC_INIT_ERROR_RX_UNEXPECTED_FRAME= 0x20000009, /* CtxMgr detected unsupported frame type */
49 MPI_IOCLOGINFO_FC_INIT_ERROR_LINK_FAILURE = 0x2000000A, /* Link failure occurred */
50 MPI_IOCLOGINFO_FC_INIT_ERROR_TX_TIMEOUT = 0x2000000B, /* Transmitter timeout error */
51
52 MPI_IOCLOGINFO_FC_TARGET_BASE = 0x21000000,
53 MPI_IOCLOGINFO_FC_TARGET_NO_PDISC = 0x21000001, /* not sent because we are waiting for a PDISC from the initiator */
54 MPI_IOCLOGINFO_FC_TARGET_NO_LOGIN = 0x21000002, /* not sent because we are not logged in to the remote node */
55 MPI_IOCLOGINFO_FC_TARGET_DOAR_KILLED_BY_LIP = 0x21000003, /* Data Out, Auto Response, not sent due to a LIP */
56 MPI_IOCLOGINFO_FC_TARGET_DIAR_KILLED_BY_LIP = 0x21000004, /* Data In, Auto Response, not sent due to a LIP */
57 MPI_IOCLOGINFO_FC_TARGET_DIAR_MISSING_DATA = 0x21000005, /* Data In, Auto Response, missing data frames */
58 MPI_IOCLOGINFO_FC_TARGET_DONR_KILLED_BY_LIP = 0x21000006, /* Data Out, No Response, not sent due to a LIP */
59 MPI_IOCLOGINFO_FC_TARGET_WRSP_KILLED_BY_LIP = 0x21000007, /* Auto-response after a write not sent due to a LIP */
60 MPI_IOCLOGINFO_FC_TARGET_DINR_KILLED_BY_LIP = 0x21000008, /* Data In, No Response, not completed due to a LIP */
61 MPI_IOCLOGINFO_FC_TARGET_DINR_MISSING_DATA = 0x21000009, /* Data In, No Response, missing data frames */
62 MPI_IOCLOGINFO_FC_TARGET_MRSP_KILLED_BY_LIP = 0x2100000a, /* Manual Response not sent due to a LIP */
63 MPI_IOCLOGINFO_FC_TARGET_NO_CLASS_3 = 0x2100000b, /* not sent because remote node does not support Class 3 */
64 MPI_IOCLOGINFO_FC_TARGET_LOGIN_NOT_VALID = 0x2100000c, /* not sent because login to remote node not validated */
65 MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND = 0x2100000e, /* cleared from the outbound queue after a logout */
66 MPI_IOCLOGINFO_FC_TARGET_WAITING_FOR_DATA_IN = 0x2100000f, /* cleared waiting for data after a logout */
67
68 MPI_IOCLOGINFO_FC_LAN_BASE = 0x22000000,
69 MPI_IOCLOGINFO_FC_LAN_TRANS_SGL_MISSING = 0x22000001, /* Transaction Context Sgl Missing */
70 MPI_IOCLOGINFO_FC_LAN_TRANS_WRONG_PLACE = 0x22000002, /* Transaction Context found before an EOB */
71 MPI_IOCLOGINFO_FC_LAN_TRANS_RES_BITS_SET = 0x22000003, /* Transaction Context value has reserved bits set */
72 MPI_IOCLOGINFO_FC_LAN_WRONG_SGL_FLAG = 0x22000004, /* Invalid SGL Flags */
73
74 MPI_IOCLOGINFO_FC_MSG_BASE = 0x23000000,
75
76 MPI_IOCLOGINFO_FC_LINK_BASE = 0x24000000,
77 MPI_IOCLOGINFO_FC_LINK_LOOP_INIT_TIMEOUT = 0x24000001, /* Loop initialization timed out */
78 MPI_IOCLOGINFO_FC_LINK_ALREADY_INITIALIZED = 0x24000002, /* Another system controller already initialized the loop */
79 MPI_IOCLOGINFO_FC_LINK_LINK_NOT_ESTABLISHED = 0x24000003, /* Not synchronized to signal or still negotiating (possible cable problem) */
80 MPI_IOCLOGINFO_FC_LINK_CRC_ERROR = 0x24000004, /* CRC check detected error on received frame */
81
82 MPI_IOCLOGINFO_FC_CTX_BASE = 0x25000000,
83
84 MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET = 0x26000000, /* The lower 24 bits give the byte offset of the field in the request message that is invalid */
85 MPI_IOCLOGINFO_FC_INVALID_FIELD_MAX_OFFSET = 0x26ffffff,
86
87 MPI_IOCLOGINFO_FC_STATE_CHANGE = 0x27000000 /* The lower 24 bits give additional information concerning state change */
88
89} MpiIocLogInfoFc_t;
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
new file mode 100644
index 000000000000..9dbb061265fe
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -0,0 +1,746 @@
1/*
2 * Copyright (c) 2000-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi.h
6 * Title: MPI Message independent structures and definitions
7 * Creation Date: July 27, 2000
8 *
9 * mpi.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
17 * 05-24-00 00.10.02 Added MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH definition.
18 * 06-06-00 01.00.01 Update MPI_VERSION_MAJOR and MPI_VERSION_MINOR.
19 * 06-22-00 01.00.02 Added MPI_IOCSTATUS_LAN_ definitions.
20 * Removed LAN_SUSPEND function definition.
21 * Added MPI_MSGFLAGS_CONTINUATION_REPLY definition.
22 * 06-30-00 01.00.03 Added MPI_CONTEXT_REPLY_TYPE_LAN definition.
23 * Added MPI_GET/SET_CONTEXT_REPLY_TYPE macros.
24 * 07-27-00 01.00.04 Added MPI_FAULT_ definitions.
25 * Removed MPI_IOCSTATUS_MSG/DATA_XFER_ERROR definitions.
26 * Added MPI_IOCSTATUS_INTERNAL_ERROR definition.
27 * Added MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH.
28 * 11-02-00 01.01.01 Original release for post 1.0 work.
29 * 12-04-00 01.01.02 Added new function codes.
30 * 01-09-01 01.01.03 Added more definitions to the system interface section
31 * Added MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT.
32 * 01-25-01 01.01.04 Changed MPI_VERSION_MINOR from 0x00 to 0x01.
33 * 02-20-01 01.01.05 Started using MPI_POINTER.
34 * Fixed value for MPI_DIAG_RW_ENABLE.
35 * Added defines for MPI_DIAG_PREVENT_IOC_BOOT and
36 * MPI_DIAG_CLEAR_FLASH_BAD_SIG.
37 * Obsoleted MPI_IOCSTATUS_TARGET_FC_ defines.
38 * 02-27-01 01.01.06 Removed MPI_HOST_INDEX_REGISTER define.
39 * Added function codes for RAID.
40 * 04-09-01 01.01.07 Added alternate define for MPI_DOORBELL_ACTIVE,
41 * MPI_DOORBELL_USED, to better match the spec.
42 * 08-08-01 01.02.01 Original release for v1.2 work.
43 * Changed MPI_VERSION_MINOR from 0x01 to 0x02.
44 * Added define MPI_FUNCTION_TOOLBOX.
45 * 09-28-01 01.02.02 New function code MPI_SCSI_ENCLOSURE_PROCESSOR.
46 * 11-01-01 01.02.03 Changed name to MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR.
47 * 03-14-02 01.02.04 Added MPI_HEADER_VERSION_ defines.
48 * 05-31-02 01.02.05 Bumped MPI_HEADER_VERSION_UNIT.
49 * 07-12-02 01.02.06 Added define for MPI_FUNCTION_MAILBOX.
50 * 09-16-02 01.02.07 Bumped value for MPI_HEADER_VERSION_UNIT.
51 * 11-15-02 01.02.08 Added define MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX and
52 * obsoleted define MPI_IOCSTATUS_TARGET_INVALID_IOCINDEX.
53 * 04-01-03 01.02.09 New IOCStatus code: MPI_IOCSTATUS_FC_EXCHANGE_CANCELED
54 * 06-26-03 01.02.10 Bumped MPI_HEADER_VERSION_UNIT value.
55 * --------------------------------------------------------------------------
56 */
57
58#ifndef MPI_H
59#define MPI_H
60
61
62/*****************************************************************************
63*
64* M P I V e r s i o n D e f i n i t i o n s
65*
66*****************************************************************************/
67
68#define MPI_VERSION_MAJOR (0x01)
69#define MPI_VERSION_MINOR (0x05)
70#define MPI_VERSION_MAJOR_MASK (0xFF00)
71#define MPI_VERSION_MAJOR_SHIFT (8)
72#define MPI_VERSION_MINOR_MASK (0x00FF)
73#define MPI_VERSION_MINOR_SHIFT (0)
74#define MPI_VERSION ((MPI_VERSION_MAJOR << MPI_VERSION_MAJOR_SHIFT) | \
75 MPI_VERSION_MINOR)
76
77#define MPI_VERSION_01_00 (0x0100)
78#define MPI_VERSION_01_01 (0x0101)
79#define MPI_VERSION_01_02 (0x0102)
80#define MPI_VERSION_01_03 (0x0103)
81#define MPI_VERSION_01_05 (0x0105)
82/* Note: The major versions of 0xe0 through 0xff are reserved */
83
84/* versioning for this MPI header set */
85#define MPI_HEADER_VERSION_UNIT (0x00)
86#define MPI_HEADER_VERSION_DEV (0x00)
87#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
88#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
89#define MPI_HEADER_VERSION_DEV_MASK (0x00FF)
90#define MPI_HEADER_VERSION_DEV_SHIFT (0)
91#define MPI_HEADER_VERSION ((MPI_HEADER_VERSION_UNIT << 8) | MPI_HEADER_VERSION_DEV)
92
93/*****************************************************************************
94*
95* I O C S t a t e D e f i n i t i o n s
96*
97*****************************************************************************/
98
99#define MPI_IOC_STATE_RESET (0x00000000)
100#define MPI_IOC_STATE_READY (0x10000000)
101#define MPI_IOC_STATE_OPERATIONAL (0x20000000)
102#define MPI_IOC_STATE_FAULT (0x40000000)
103
104#define MPI_IOC_STATE_MASK (0xF0000000)
105#define MPI_IOC_STATE_SHIFT (28)
106
107/* Fault state codes (product independent range 0x8000-0xFFFF) */
108
109#define MPI_FAULT_REQUEST_MESSAGE_PCI_PARITY_ERROR (0x8111)
110#define MPI_FAULT_REQUEST_MESSAGE_PCI_BUS_FAULT (0x8112)
111#define MPI_FAULT_REPLY_MESSAGE_PCI_PARITY_ERROR (0x8113)
112#define MPI_FAULT_REPLY_MESSAGE_PCI_BUS_FAULT (0x8114)
113#define MPI_FAULT_DATA_SEND_PCI_PARITY_ERROR (0x8115)
114#define MPI_FAULT_DATA_SEND_PCI_BUS_FAULT (0x8116)
115#define MPI_FAULT_DATA_RECEIVE_PCI_PARITY_ERROR (0x8117)
116#define MPI_FAULT_DATA_RECEIVE_PCI_BUS_FAULT (0x8118)
117
118
119/*****************************************************************************
120*
121* P C I S y s t e m I n t e r f a c e R e g i s t e r s
122*
123*****************************************************************************/
124
125/* S y s t e m D o o r b e l l */
126#define MPI_DOORBELL_OFFSET (0x00000000)
127#define MPI_DOORBELL_ACTIVE (0x08000000) /* DoorbellUsed */
128#define MPI_DOORBELL_USED (MPI_DOORBELL_ACTIVE)
129#define MPI_DOORBELL_ACTIVE_SHIFT (27)
130#define MPI_DOORBELL_WHO_INIT_MASK (0x07000000)
131#define MPI_DOORBELL_WHO_INIT_SHIFT (24)
132#define MPI_DOORBELL_FUNCTION_MASK (0xFF000000)
133#define MPI_DOORBELL_FUNCTION_SHIFT (24)
134#define MPI_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
135#define MPI_DOORBELL_ADD_DWORDS_SHIFT (16)
136#define MPI_DOORBELL_DATA_MASK (0x0000FFFF)
137
138
139#define MPI_WRITE_SEQUENCE_OFFSET (0x00000004)
140#define MPI_WRSEQ_KEY_VALUE_MASK (0x0000000F)
141#define MPI_WRSEQ_1ST_KEY_VALUE (0x04)
142#define MPI_WRSEQ_2ND_KEY_VALUE (0x0B)
143#define MPI_WRSEQ_3RD_KEY_VALUE (0x02)
144#define MPI_WRSEQ_4TH_KEY_VALUE (0x07)
145#define MPI_WRSEQ_5TH_KEY_VALUE (0x0D)
146
147#define MPI_DIAGNOSTIC_OFFSET (0x00000008)
148#define MPI_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
149#define MPI_DIAG_PREVENT_IOC_BOOT (0x00000200)
150#define MPI_DIAG_DRWE (0x00000080)
151#define MPI_DIAG_FLASH_BAD_SIG (0x00000040)
152#define MPI_DIAG_RESET_HISTORY (0x00000020)
153#define MPI_DIAG_RW_ENABLE (0x00000010)
154#define MPI_DIAG_RESET_ADAPTER (0x00000004)
155#define MPI_DIAG_DISABLE_ARM (0x00000002)
156#define MPI_DIAG_MEM_ENABLE (0x00000001)
157
158#define MPI_TEST_BASE_ADDRESS_OFFSET (0x0000000C)
159
160#define MPI_DIAG_RW_DATA_OFFSET (0x00000010)
161
162#define MPI_DIAG_RW_ADDRESS_OFFSET (0x00000014)
163
164#define MPI_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
165#define MPI_HIS_IOP_DOORBELL_STATUS (0x80000000)
166#define MPI_HIS_REPLY_MESSAGE_INTERRUPT (0x00000008)
167#define MPI_HIS_DOORBELL_INTERRUPT (0x00000001)
168
169#define MPI_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
170#define MPI_HIM_RIM (0x00000008)
171#define MPI_HIM_DIM (0x00000001)
172
173#define MPI_REQUEST_QUEUE_OFFSET (0x00000040)
174#define MPI_REQUEST_POST_FIFO_OFFSET (0x00000040)
175
176#define MPI_REPLY_QUEUE_OFFSET (0x00000044)
177#define MPI_REPLY_POST_FIFO_OFFSET (0x00000044)
178#define MPI_REPLY_FREE_FIFO_OFFSET (0x00000044)
179
180#define MPI_HI_PRI_REQUEST_QUEUE_OFFSET (0x00000048)
181
182
183
184/*****************************************************************************
185*
186* M e s s a g e F r a m e D e s c r i p t o r s
187*
188*****************************************************************************/
189
190#define MPI_REQ_MF_DESCRIPTOR_NB_MASK (0x00000003)
191#define MPI_REQ_MF_DESCRIPTOR_F_BIT (0x00000004)
192#define MPI_REQ_MF_DESCRIPTOR_ADDRESS_MASK (0xFFFFFFF8)
193
194#define MPI_ADDRESS_REPLY_A_BIT (0x80000000)
195#define MPI_ADDRESS_REPLY_ADDRESS_MASK (0x7FFFFFFF)
196
197#define MPI_CONTEXT_REPLY_A_BIT (0x80000000)
198#define MPI_CONTEXT_REPLY_TYPE_MASK (0x60000000)
199#define MPI_CONTEXT_REPLY_TYPE_SCSI_INIT (0x00)
200#define MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET (0x01)
201#define MPI_CONTEXT_REPLY_TYPE_LAN (0x02)
202#define MPI_CONTEXT_REPLY_TYPE_SHIFT (29)
203#define MPI_CONTEXT_REPLY_CONTEXT_MASK (0x1FFFFFFF)
204
205
206/****************************************************************************/
207/* Context Reply macros */
208/****************************************************************************/
209
210#define MPI_GET_CONTEXT_REPLY_TYPE(x) (((x) & MPI_CONTEXT_REPLY_TYPE_MASK) \
211 >> MPI_CONTEXT_REPLY_TYPE_SHIFT)
212
213#define MPI_SET_CONTEXT_REPLY_TYPE(x, typ) \
214 ((x) = ((x) & ~MPI_CONTEXT_REPLY_TYPE_MASK) | \
215 (((typ) << MPI_CONTEXT_REPLY_TYPE_SHIFT) & \
216 MPI_CONTEXT_REPLY_TYPE_MASK))
217
218
219/*****************************************************************************
220*
221* M e s s a g e F u n c t i o n s
222* 0x80 -> 0x8F reserved for private message use per product
223*
224*
225*****************************************************************************/
226
227#define MPI_FUNCTION_SCSI_IO_REQUEST (0x00)
228#define MPI_FUNCTION_SCSI_TASK_MGMT (0x01)
229#define MPI_FUNCTION_IOC_INIT (0x02)
230#define MPI_FUNCTION_IOC_FACTS (0x03)
231#define MPI_FUNCTION_CONFIG (0x04)
232#define MPI_FUNCTION_PORT_FACTS (0x05)
233#define MPI_FUNCTION_PORT_ENABLE (0x06)
234#define MPI_FUNCTION_EVENT_NOTIFICATION (0x07)
235#define MPI_FUNCTION_EVENT_ACK (0x08)
236#define MPI_FUNCTION_FW_DOWNLOAD (0x09)
237#define MPI_FUNCTION_TARGET_CMD_BUFFER_POST (0x0A)
238#define MPI_FUNCTION_TARGET_ASSIST (0x0B)
239#define MPI_FUNCTION_TARGET_STATUS_SEND (0x0C)
240#define MPI_FUNCTION_TARGET_MODE_ABORT (0x0D)
241#define MPI_FUNCTION_FC_LINK_SRVC_BUF_POST (0x0E)
242#define MPI_FUNCTION_FC_LINK_SRVC_RSP (0x0F)
243#define MPI_FUNCTION_FC_EX_LINK_SRVC_SEND (0x10)
244#define MPI_FUNCTION_FC_ABORT (0x11)
245#define MPI_FUNCTION_FW_UPLOAD (0x12)
246#define MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND (0x13)
247#define MPI_FUNCTION_FC_PRIMITIVE_SEND (0x14)
248
249#define MPI_FUNCTION_RAID_ACTION (0x15)
250#define MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16)
251
252#define MPI_FUNCTION_TOOLBOX (0x17)
253
254#define MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18)
255
256#define MPI_FUNCTION_MAILBOX (0x19)
257
258#define MPI_FUNCTION_SMP_PASSTHROUGH (0x1A)
259#define MPI_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B)
260
261#define MPI_DIAG_BUFFER_POST (0x1D)
262#define MPI_DIAG_RELEASE (0x1E)
263
264#define MPI_FUNCTION_SCSI_IO_32 (0x1F)
265
266#define MPI_FUNCTION_LAN_SEND (0x20)
267#define MPI_FUNCTION_LAN_RECEIVE (0x21)
268#define MPI_FUNCTION_LAN_RESET (0x22)
269
270#define MPI_FUNCTION_INBAND_BUFFER_POST (0x28)
271#define MPI_FUNCTION_INBAND_SEND (0x29)
272#define MPI_FUNCTION_INBAND_RSP (0x2A)
273#define MPI_FUNCTION_INBAND_ABORT (0x2B)
274
275#define MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
276#define MPI_FUNCTION_IO_UNIT_RESET (0x41)
277#define MPI_FUNCTION_HANDSHAKE (0x42)
278#define MPI_FUNCTION_REPLY_FRAME_REMOVAL (0x43)
279
280
281/* standard version format */
282typedef struct _MPI_VERSION_STRUCT
283{
284 U8 Dev; /* 00h */
285 U8 Unit; /* 01h */
286 U8 Minor; /* 02h */
287 U8 Major; /* 03h */
288} MPI_VERSION_STRUCT, MPI_POINTER PTR_MPI_VERSION_STRUCT,
289 MpiVersionStruct_t, MPI_POINTER pMpiVersionStruct;
290
291typedef union _MPI_VERSION_FORMAT
292{
293 MPI_VERSION_STRUCT Struct;
294 U32 Word;
295} MPI_VERSION_FORMAT, MPI_POINTER PTR_MPI_VERSION_FORMAT,
296 MpiVersionFormat_t, MPI_POINTER pMpiVersionFormat_t;
297
298
299/*****************************************************************************
300*
301* S c a t t e r G a t h e r E l e m e n t s
302*
303*****************************************************************************/
304
305/****************************************************************************/
306/* Simple element structures */
307/****************************************************************************/
308
309typedef struct _SGE_SIMPLE32
310{
311 U32 FlagsLength;
312 U32 Address;
313} SGE_SIMPLE32, MPI_POINTER PTR_SGE_SIMPLE32,
314 SGESimple32_t, MPI_POINTER pSGESimple32_t;
315
316typedef struct _SGE_SIMPLE64
317{
318 U32 FlagsLength;
319 U64 Address;
320} SGE_SIMPLE64, MPI_POINTER PTR_SGE_SIMPLE64,
321 SGESimple64_t, MPI_POINTER pSGESimple64_t;
322
323typedef struct _SGE_SIMPLE_UNION
324{
325 U32 FlagsLength;
326 union
327 {
328 U32 Address32;
329 U64 Address64;
330 }u;
331} SGESimpleUnion_t, MPI_POINTER pSGESimpleUnion_t,
332 SGE_SIMPLE_UNION, MPI_POINTER PTR_SGE_SIMPLE_UNION;
333
334/****************************************************************************/
335/* Chain element structures */
336/****************************************************************************/
337
338typedef struct _SGE_CHAIN32
339{
340 U16 Length;
341 U8 NextChainOffset;
342 U8 Flags;
343 U32 Address;
344} SGE_CHAIN32, MPI_POINTER PTR_SGE_CHAIN32,
345 SGEChain32_t, MPI_POINTER pSGEChain32_t;
346
347typedef struct _SGE_CHAIN64
348{
349 U16 Length;
350 U8 NextChainOffset;
351 U8 Flags;
352 U64 Address;
353} SGE_CHAIN64, MPI_POINTER PTR_SGE_CHAIN64,
354 SGEChain64_t, MPI_POINTER pSGEChain64_t;
355
356typedef struct _SGE_CHAIN_UNION
357{
358 U16 Length;
359 U8 NextChainOffset;
360 U8 Flags;
361 union
362 {
363 U32 Address32;
364 U64 Address64;
365 }u;
366} SGE_CHAIN_UNION, MPI_POINTER PTR_SGE_CHAIN_UNION,
367 SGEChainUnion_t, MPI_POINTER pSGEChainUnion_t;
368
369/****************************************************************************/
370/* Transaction Context element */
371/****************************************************************************/
372
373typedef struct _SGE_TRANSACTION32
374{
375 U8 Reserved;
376 U8 ContextSize;
377 U8 DetailsLength;
378 U8 Flags;
379 U32 TransactionContext[1];
380 U32 TransactionDetails[1];
381} SGE_TRANSACTION32, MPI_POINTER PTR_SGE_TRANSACTION32,
382 SGETransaction32_t, MPI_POINTER pSGETransaction32_t;
383
384typedef struct _SGE_TRANSACTION64
385{
386 U8 Reserved;
387 U8 ContextSize;
388 U8 DetailsLength;
389 U8 Flags;
390 U32 TransactionContext[2];
391 U32 TransactionDetails[1];
392} SGE_TRANSACTION64, MPI_POINTER PTR_SGE_TRANSACTION64,
393 SGETransaction64_t, MPI_POINTER pSGETransaction64_t;
394
395typedef struct _SGE_TRANSACTION96
396{
397 U8 Reserved;
398 U8 ContextSize;
399 U8 DetailsLength;
400 U8 Flags;
401 U32 TransactionContext[3];
402 U32 TransactionDetails[1];
403} SGE_TRANSACTION96, MPI_POINTER PTR_SGE_TRANSACTION96,
404 SGETransaction96_t, MPI_POINTER pSGETransaction96_t;
405
406typedef struct _SGE_TRANSACTION128
407{
408 U8 Reserved;
409 U8 ContextSize;
410 U8 DetailsLength;
411 U8 Flags;
412 U32 TransactionContext[4];
413 U32 TransactionDetails[1];
414} SGE_TRANSACTION128, MPI_POINTER PTR_SGE_TRANSACTION128,
415 SGETransaction_t128, MPI_POINTER pSGETransaction_t128;
416
417typedef struct _SGE_TRANSACTION_UNION
418{
419 U8 Reserved;
420 U8 ContextSize;
421 U8 DetailsLength;
422 U8 Flags;
423 union
424 {
425 U32 TransactionContext32[1];
426 U32 TransactionContext64[2];
427 U32 TransactionContext96[3];
428 U32 TransactionContext128[4];
429 }u;
430 U32 TransactionDetails[1];
431} SGE_TRANSACTION_UNION, MPI_POINTER PTR_SGE_TRANSACTION_UNION,
432 SGETransactionUnion_t, MPI_POINTER pSGETransactionUnion_t;
433
434
435/****************************************************************************/
436/* SGE IO types union for IO SGL's */
437/****************************************************************************/
438
439typedef struct _SGE_IO_UNION
440{
441 union
442 {
443 SGE_SIMPLE_UNION Simple;
444 SGE_CHAIN_UNION Chain;
445 } u;
446} SGE_IO_UNION, MPI_POINTER PTR_SGE_IO_UNION,
447 SGEIOUnion_t, MPI_POINTER pSGEIOUnion_t;
448
449/****************************************************************************/
450/* SGE union for SGL's with Simple and Transaction elements */
451/****************************************************************************/
452
453typedef struct _SGE_TRANS_SIMPLE_UNION
454{
455 union
456 {
457 SGE_SIMPLE_UNION Simple;
458 SGE_TRANSACTION_UNION Transaction;
459 } u;
460} SGE_TRANS_SIMPLE_UNION, MPI_POINTER PTR_SGE_TRANS_SIMPLE_UNION,
461 SGETransSimpleUnion_t, MPI_POINTER pSGETransSimpleUnion_t;
462
463/****************************************************************************/
464/* All SGE types union */
465/****************************************************************************/
466
467typedef struct _SGE_MPI_UNION
468{
469 union
470 {
471 SGE_SIMPLE_UNION Simple;
472 SGE_CHAIN_UNION Chain;
473 SGE_TRANSACTION_UNION Transaction;
474 } u;
475} SGE_MPI_UNION, MPI_POINTER PTR_SGE_MPI_UNION,
476 MPI_SGE_UNION_t, MPI_POINTER pMPI_SGE_UNION_t,
477 SGEAllUnion_t, MPI_POINTER pSGEAllUnion_t;
478
479
480/****************************************************************************/
481/* SGE field definition and masks */
482/****************************************************************************/
483
484/* Flags field bit definitions */
485
486#define MPI_SGE_FLAGS_LAST_ELEMENT (0x80)
487#define MPI_SGE_FLAGS_END_OF_BUFFER (0x40)
488#define MPI_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
489#define MPI_SGE_FLAGS_LOCAL_ADDRESS (0x08)
490#define MPI_SGE_FLAGS_DIRECTION (0x04)
491#define MPI_SGE_FLAGS_ADDRESS_SIZE (0x02)
492#define MPI_SGE_FLAGS_END_OF_LIST (0x01)
493
494#define MPI_SGE_FLAGS_SHIFT (24)
495
496#define MPI_SGE_LENGTH_MASK (0x00FFFFFF)
497#define MPI_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
498
499/* Element Type */
500
501#define MPI_SGE_FLAGS_TRANSACTION_ELEMENT (0x00)
502#define MPI_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
503#define MPI_SGE_FLAGS_CHAIN_ELEMENT (0x30)
504#define MPI_SGE_FLAGS_ELEMENT_MASK (0x30)
505
506/* Address location */
507
508#define MPI_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
509
510/* Direction */
511
512#define MPI_SGE_FLAGS_IOC_TO_HOST (0x00)
513#define MPI_SGE_FLAGS_HOST_TO_IOC (0x04)
514
515/* Address Size */
516
517#define MPI_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
518#define MPI_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
519
520/* Context Size */
521
522#define MPI_SGE_FLAGS_32_BIT_CONTEXT (0x00)
523#define MPI_SGE_FLAGS_64_BIT_CONTEXT (0x02)
524#define MPI_SGE_FLAGS_96_BIT_CONTEXT (0x04)
525#define MPI_SGE_FLAGS_128_BIT_CONTEXT (0x06)
526
527#define MPI_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
528#define MPI_SGE_CHAIN_OFFSET_SHIFT (16)
529
530
531/****************************************************************************/
532/* SGE operation Macros */
533/****************************************************************************/
534
535 /* SIMPLE FlagsLength manipulations... */
536#define MPI_SGE_SET_FLAGS(f) ((U32)(f) << MPI_SGE_FLAGS_SHIFT)
537#define MPI_SGE_GET_FLAGS(fl) (((fl) & ~MPI_SGE_LENGTH_MASK) >> MPI_SGE_FLAGS_SHIFT)
538#define MPI_SGE_LENGTH(fl) ((fl) & MPI_SGE_LENGTH_MASK)
539#define MPI_SGE_CHAIN_LENGTH(fl) ((fl) & MPI_SGE_CHAIN_LENGTH_MASK)
540
541#define MPI_SGE_SET_FLAGS_LENGTH(f,l) (MPI_SGE_SET_FLAGS(f) | MPI_SGE_LENGTH(l))
542
543#define MPI_pSGE_GET_FLAGS(psg) MPI_SGE_GET_FLAGS((psg)->FlagsLength)
544#define MPI_pSGE_GET_LENGTH(psg) MPI_SGE_LENGTH((psg)->FlagsLength)
545#define MPI_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI_SGE_SET_FLAGS_LENGTH(f,l)
546 /* CAUTION - The following are READ-MODIFY-WRITE! */
547#define MPI_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI_SGE_SET_FLAGS(f)
548#define MPI_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI_SGE_LENGTH(l)
549
550#define MPI_GET_CHAIN_OFFSET(x) ((x&MPI_SGE_CHAIN_OFFSET_MASK)>>MPI_SGE_CHAIN_OFFSET_SHIFT)
551
552
553
554/*****************************************************************************
555*
556* S t a n d a r d M e s s a g e S t r u c t u r e s
557*
558*****************************************************************************/
559
560/****************************************************************************/
561/* Standard message request header for all request messages */
562/****************************************************************************/
563
564typedef struct _MSG_REQUEST_HEADER
565{
566 U8 Reserved[2]; /* function specific */
567 U8 ChainOffset;
568 U8 Function;
569 U8 Reserved1[3]; /* function specific */
570 U8 MsgFlags;
571 U32 MsgContext;
572} MSG_REQUEST_HEADER, MPI_POINTER PTR_MSG_REQUEST_HEADER,
573 MPIHeader_t, MPI_POINTER pMPIHeader_t;
574
575
576/****************************************************************************/
577/* Default Reply */
578/****************************************************************************/
579
580typedef struct _MSG_DEFAULT_REPLY
581{
582 U8 Reserved[2]; /* function specific */
583 U8 MsgLength;
584 U8 Function;
585 U8 Reserved1[3]; /* function specific */
586 U8 MsgFlags;
587 U32 MsgContext;
588 U8 Reserved2[2]; /* function specific */
589 U16 IOCStatus;
590 U32 IOCLogInfo;
591} MSG_DEFAULT_REPLY, MPI_POINTER PTR_MSG_DEFAULT_REPLY,
592 MPIDefaultReply_t, MPI_POINTER pMPIDefaultReply_t;
593
594
595/* MsgFlags definition for all replies */
596
597#define MPI_MSGFLAGS_CONTINUATION_REPLY (0x80)
598
599
600/*****************************************************************************
601*
602* I O C S t a t u s V a l u e s
603*
604*****************************************************************************/
605
606/****************************************************************************/
607/* Common IOCStatus values for all replies */
608/****************************************************************************/
609
610#define MPI_IOCSTATUS_SUCCESS (0x0000)
611#define MPI_IOCSTATUS_INVALID_FUNCTION (0x0001)
612#define MPI_IOCSTATUS_BUSY (0x0002)
613#define MPI_IOCSTATUS_INVALID_SGL (0x0003)
614#define MPI_IOCSTATUS_INTERNAL_ERROR (0x0004)
615#define MPI_IOCSTATUS_RESERVED (0x0005)
616#define MPI_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
617#define MPI_IOCSTATUS_INVALID_FIELD (0x0007)
618#define MPI_IOCSTATUS_INVALID_STATE (0x0008)
619#define MPI_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
620
621/****************************************************************************/
622/* Config IOCStatus values */
623/****************************************************************************/
624
625#define MPI_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
626#define MPI_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
627#define MPI_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
628#define MPI_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
629#define MPI_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
630#define MPI_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
631
632/****************************************************************************/
633/* SCSIIO Reply (SPI & FCP) initiator values */
634/****************************************************************************/
635
636#define MPI_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
637#define MPI_IOCSTATUS_SCSI_INVALID_BUS (0x0041)
638#define MPI_IOCSTATUS_SCSI_INVALID_TARGETID (0x0042)
639#define MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
640#define MPI_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
641#define MPI_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
642#define MPI_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
643#define MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
644#define MPI_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
645#define MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
646#define MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
647#define MPI_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
648#define MPI_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
649
650/****************************************************************************/
651/* For use by SCSI Initiator and SCSI Target end-to-end data protection */
652/****************************************************************************/
653
654#define MPI_IOCSTATUS_EEDP_CRC_ERROR (0x004D)
655#define MPI_IOCSTATUS_EEDP_LBA_TAG_ERROR (0x004E)
656#define MPI_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
657
658
659/****************************************************************************/
660/* SCSI (SPI & FCP) target values */
661/****************************************************************************/
662
663#define MPI_IOCSTATUS_TARGET_PRIORITY_IO (0x0060)
664#define MPI_IOCSTATUS_TARGET_INVALID_PORT (0x0061)
665#define MPI_IOCSTATUS_TARGET_INVALID_IOCINDEX (0x0062) /* obsolete */
666#define MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
667#define MPI_IOCSTATUS_TARGET_ABORTED (0x0063)
668#define MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
669#define MPI_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
670#define MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
671#define MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT (0x006B)
672
673/****************************************************************************/
674/* Additional FCP target values (obsolete) */
675/****************************************************************************/
676
677#define MPI_IOCSTATUS_TARGET_FC_ABORTED (0x0066) /* obsolete */
678#define MPI_IOCSTATUS_TARGET_FC_RX_ID_INVALID (0x0067) /* obsolete */
679#define MPI_IOCSTATUS_TARGET_FC_DID_INVALID (0x0068) /* obsolete */
680#define MPI_IOCSTATUS_TARGET_FC_NODE_LOGGED_OUT (0x0069) /* obsolete */
681
682/****************************************************************************/
683/* Fibre Channel Direct Access values */
684/****************************************************************************/
685
686#define MPI_IOCSTATUS_FC_ABORTED (0x0066)
687#define MPI_IOCSTATUS_FC_RX_ID_INVALID (0x0067)
688#define MPI_IOCSTATUS_FC_DID_INVALID (0x0068)
689#define MPI_IOCSTATUS_FC_NODE_LOGGED_OUT (0x0069)
690#define MPI_IOCSTATUS_FC_EXCHANGE_CANCELED (0x006C)
691
692/****************************************************************************/
693/* LAN values */
694/****************************************************************************/
695
696#define MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND (0x0080)
697#define MPI_IOCSTATUS_LAN_DEVICE_FAILURE (0x0081)
698#define MPI_IOCSTATUS_LAN_TRANSMIT_ERROR (0x0082)
699#define MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED (0x0083)
700#define MPI_IOCSTATUS_LAN_RECEIVE_ERROR (0x0084)
701#define MPI_IOCSTATUS_LAN_RECEIVE_ABORTED (0x0085)
702#define MPI_IOCSTATUS_LAN_PARTIAL_PACKET (0x0086)
703#define MPI_IOCSTATUS_LAN_CANCELED (0x0087)
704
705/****************************************************************************/
706/* Serial Attached SCSI values */
707/****************************************************************************/
708
709#define MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
710
711/****************************************************************************/
712/* Inband values */
713/****************************************************************************/
714
715#define MPI_IOCSTATUS_INBAND_ABORTED (0x0098)
716#define MPI_IOCSTATUS_INBAND_NO_CONNECTION (0x0099)
717
718/****************************************************************************/
719/* Diagnostic Tools values */
720/****************************************************************************/
721
722#define MPI_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
723
724
725/****************************************************************************/
726/* IOCStatus flag to indicate that log info is available */
727/****************************************************************************/
728
729#define MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
730#define MPI_IOCSTATUS_MASK (0x7FFF)
731
732/****************************************************************************/
733/* LogInfo Types */
734/****************************************************************************/
735
736#define MPI_IOCLOGINFO_TYPE_MASK (0xF0000000)
737#define MPI_IOCLOGINFO_TYPE_SHIFT (28)
738#define MPI_IOCLOGINFO_TYPE_NONE (0x0)
739#define MPI_IOCLOGINFO_TYPE_SCSI (0x1)
740#define MPI_IOCLOGINFO_TYPE_FC (0x2)
741#define MPI_IOCLOGINFO_TYPE_SAS (0x3)
742#define MPI_IOCLOGINFO_TYPE_ISCSI (0x4)
743#define MPI_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
744
745
746#endif
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
new file mode 100644
index 000000000000..a5680d864bf0
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -0,0 +1,2105 @@
1/*
2 * Copyright (c) 2000-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_cnfg.h
6 * Title: MPI Config message, structures, and Pages
7 * Creation Date: July 27, 2000
8 *
9 * mpi_cnfg.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
17 * 06-06-00 01.00.01 Update version number for 1.0 release.
18 * 06-08-00 01.00.02 Added _PAGEVERSION definitions for all pages.
19 * Added FcPhLowestVersion, FcPhHighestVersion, Reserved2
20 * fields to FC_DEVICE_0 page, updated the page version.
21 * Changed _FREE_RUNNING_CLOCK to _PACING_TRANSFERS in
22 * SCSI_PORT_0, SCSI_DEVICE_0 and SCSI_DEVICE_1 pages
23 * and updated the page versions.
24 * Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
25 * page and updated the page version.
26 * Added Information field and _INFO_PARAMS_NEGOTIATED
27 * definitionto SCSI_DEVICE_0 page.
28 * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
29 * page version.
30 * Added BucketsRemaining to LAN_1 page, redefined the
31 * state values, and updated the page version.
32 * Revised bus width definitions in SCSI_PORT_0,
33 * SCSI_DEVICE_0 and SCSI_DEVICE_1 pages.
34 * 06-30-00 01.00.04 Added MaxReplySize to LAN_1 page and updated the page
35 * version.
36 * Moved FC_DEVICE_0 PageAddress description to spec.
37 * 07-27-00 01.00.05 Corrected the SubsystemVendorID and SubsystemID field
38 * widths in IOC_0 page and updated the page version.
39 * 11-02-00 01.01.01 Original release for post 1.0 work
40 * Added Manufacturing pages, IO Unit Page 2, SCSI SPI
41 * Port Page 2, FC Port Page 4, FC Port Page 5
42 * 11-15-00 01.01.02 Interim changes to match proposals
43 * 12-04-00 01.01.03 Config page changes to match MPI rev 1.00.01.
44 * 12-05-00 01.01.04 Modified config page actions.
45 * 01-09-01 01.01.05 Added defines for page address formats.
46 * Data size for Manufacturing pages 2 and 3 no longer
47 * defined here.
48 * Io Unit Page 2 size is fixed at 4 adapters and some
49 * flags were changed.
50 * SCSI Port Page 2 Device Settings modified.
51 * New fields added to FC Port Page 0 and some flags
52 * cleaned up.
53 * Removed impedance flash from FC Port Page 1.
54 * Added FC Port pages 6 and 7.
55 * 01-25-01 01.01.06 Added MaxInitiators field to FcPortPage0.
56 * 01-29-01 01.01.07 Changed some defines to make them 32 character unique.
57 * Added some LinkType defines for FcPortPage0.
58 * 02-20-01 01.01.08 Started using MPI_POINTER.
59 * 02-27-01 01.01.09 Replaced MPI_CONFIG_PAGETYPE_SCSI_LUN with
60 * MPI_CONFIG_PAGETYPE_RAID_VOLUME.
61 * Added definitions and structures for IOC Page 2 and
62 * RAID Volume Page 2.
63 * 03-27-01 01.01.10 Added CONFIG_PAGE_FC_PORT_8 and CONFIG_PAGE_FC_PORT_9.
64 * CONFIG_PAGE_FC_PORT_3 now supports persistent by DID.
65 * Added VendorId and ProductRevLevel fields to
66 * RAIDVOL2_IM_PHYS_ID struct.
67 * Modified values for MPI_FCPORTPAGE0_FLAGS_ATTACH_
68 * defines to make them compatible to MPI version 1.0.
69 * Added structure offset comments.
70 * 04-09-01 01.01.11 Added some new defines for the PageAddress field and
71 * removed some obsolete ones.
72 * Added IO Unit Page 3.
73 * Modified defines for Scsi Port Page 2.
74 * Modified RAID Volume Pages.
75 * 08-08-01 01.02.01 Original release for v1.2 work.
76 * Added SepID and SepBus to RVP2 IMPhysicalDisk struct.
77 * Added defines for the SEP bits in RVP2 VolumeSettings.
78 * Modified the DeviceSettings field in RVP2 to use the
79 * proper structure.
80 * Added defines for SES, SAF-TE, and cross channel for
81 * IOCPage2 CapabilitiesFlags.
82 * Removed define for MPI_IOUNITPAGE2_FLAGS_RAID_DISABLE.
83 * Removed define for
84 * MPI_SCSIPORTPAGE2_PORT_FLAGS_PARITY_ENABLE.
85 * Added define for MPI_CONFIG_PAGEATTR_RO_PERSISTENT.
86 * 08-29-01 01.02.02 Fixed value for MPI_MANUFACTPAGE_DEVID_53C1035.
87 * Added defines for MPI_FCPORTPAGE1_FLAGS_HARD_ALPA_ONLY
88 * and MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY.
89 * Removed MPI_SCSIPORTPAGE0_CAP_PACING_TRANSFERS,
90 * MPI_SCSIDEVPAGE0_NP_PACING_TRANSFERS, and
91 * MPI_SCSIDEVPAGE1_RP_PACING_TRANSFERS, and
92 * MPI_SCSIDEVPAGE1_CONF_PPR_ALLOWED.
93 * Added defines for MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED
94 * and MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED.
95 * Added OnBusTimerValue to CONFIG_PAGE_SCSI_PORT_1.
96 * Added rejected bits to SCSI Device Page 0 Information.
97 * Increased size of ALPA array in FC Port Page 2 by one
98 * and removed a one byte reserved field.
99 * 09-28-01 01.02.03 Swapped NegWireSpeedLow and NegWireSpeedLow in
100 * CONFIG_PAGE_LAN_1 to match preferred 64-bit ordering.
101 * Added structures for Manufacturing Page 4, IO Unit
102 * Page 3, IOC Page 3, IOC Page 4, RAID Volume Page 0, and
103 * RAID PhysDisk Page 0.
104 * 10-04-01 01.02.04 Added define for MPI_CONFIG_PAGETYPE_RAID_PHYSDISK.
105 * Modified some of the new defines to make them 32
106 * character unique.
107 * Modified how variable length pages (arrays) are defined.
108 * Added generic defines for hot spare pools and RAID
109 * volume types.
110 * 11-01-01 01.02.05 Added define for MPI_IOUNITPAGE1_DISABLE_IR.
111 * 03-14-02 01.02.06 Added PCISlotNum field to CONFIG_PAGE_IOC_1 along with
112 * related define, and bumped the page version define.
113 * 05-31-02 01.02.07 Added a Flags field to CONFIG_PAGE_IOC_2_RAID_VOL in a
114 * reserved byte and added a define.
115 * Added define for
116 * MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE.
117 * Added new config page: CONFIG_PAGE_IOC_5.
118 * Added MaxAliases, MaxHardAliases, and NumCurrentAliases
119 * fields to CONFIG_PAGE_FC_PORT_0.
120 * Added AltConnector and NumRequestedAliases fields to
121 * CONFIG_PAGE_FC_PORT_1.
122 * Added new config page: CONFIG_PAGE_FC_PORT_10.
123 * 07-12-02 01.02.08 Added more MPI_MANUFACTPAGE_DEVID_ defines.
124 * Added additional MPI_SCSIDEVPAGE0_NP_ defines.
125 * Added more MPI_SCSIDEVPAGE1_RP_ defines.
126 * Added define for
127 * MPI_SCSIDEVPAGE1_CONF_EXTENDED_PARAMS_ENABLE.
128 * Added new config page: CONFIG_PAGE_SCSI_DEVICE_3.
129 * Modified MPI_FCPORTPAGE5_FLAGS_ defines.
130 * 09-16-02 01.02.09 Added MPI_SCSIDEVPAGE1_CONF_FORCE_PPR_MSG define.
131 * 11-15-02 01.02.10 Added ConnectedID defines for CONFIG_PAGE_SCSI_PORT_0.
132 * Added more Flags defines for CONFIG_PAGE_FC_PORT_1.
133 * Added more Flags defines for CONFIG_PAGE_FC_DEVICE_0.
134 * 04-01-03 01.02.11 Added RR_TOV field and additional Flags defines for
135 * CONFIG_PAGE_FC_PORT_1.
136 * Added define MPI_FCPORTPAGE5_FLAGS_DISABLE to disable
137 * an alias.
138 * Added more device id defines.
139 * 06-26-03 01.02.12 Added MPI_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID define.
140 * Added TargetConfig and IDConfig fields to
141 * CONFIG_PAGE_SCSI_PORT_1.
142 * Added more PortFlags defines for CONFIG_PAGE_SCSI_PORT_2
143 * to control DV.
144 * Added more Flags defines for CONFIG_PAGE_FC_PORT_1.
145 * In CONFIG_PAGE_FC_DEVICE_0, replaced Reserved1 field
146 * with ADISCHardALPA.
147 * Added MPI_FC_DEVICE_PAGE0_PROT_FCP_RETRY define.
148 * --------------------------------------------------------------------------
149 */
150
151#ifndef MPI_CNFG_H
152#define MPI_CNFG_H
153
154
155/*****************************************************************************
156*
157* C o n f i g M e s s a g e a n d S t r u c t u r e s
158*
159*****************************************************************************/
160
161typedef struct _CONFIG_PAGE_HEADER
162{
163 U8 PageVersion; /* 00h */
164 U8 PageLength; /* 01h */
165 U8 PageNumber; /* 02h */
166 U8 PageType; /* 03h */
167} fCONFIG_PAGE_HEADER, MPI_POINTER PTR_CONFIG_PAGE_HEADER,
168 ConfigPageHeader_t, MPI_POINTER pConfigPageHeader_t;
169
170typedef union _CONFIG_PAGE_HEADER_UNION
171{
172 ConfigPageHeader_t Struct;
173 U8 Bytes[4];
174 U16 Word16[2];
175 U32 Word32;
176} ConfigPageHeaderUnion, MPI_POINTER pConfigPageHeaderUnion,
177 fCONFIG_PAGE_HEADER_UNION, MPI_POINTER PTR_CONFIG_PAGE_HEADER_UNION;
178
179typedef struct _CONFIG_EXTENDED_PAGE_HEADER
180{
181 U8 PageVersion; /* 00h */
182 U8 Reserved1; /* 01h */
183 U8 PageNumber; /* 02h */
184 U8 PageType; /* 03h */
185 U16 ExtPageLength; /* 04h */
186 U8 ExtPageType; /* 06h */
187 U8 Reserved2; /* 07h */
188} fCONFIG_EXTENDED_PAGE_HEADER, MPI_POINTER PTR_CONFIG_EXTENDED_PAGE_HEADER,
189 ConfigExtendedPageHeader_t, MPI_POINTER pConfigExtendedPageHeader_t;
190
191
192
193/****************************************************************************
194* PageType field values
195****************************************************************************/
196#define MPI_CONFIG_PAGEATTR_READ_ONLY (0x00)
197#define MPI_CONFIG_PAGEATTR_CHANGEABLE (0x10)
198#define MPI_CONFIG_PAGEATTR_PERSISTENT (0x20)
199#define MPI_CONFIG_PAGEATTR_RO_PERSISTENT (0x30)
200#define MPI_CONFIG_PAGEATTR_MASK (0xF0)
201
202#define MPI_CONFIG_PAGETYPE_IO_UNIT (0x00)
203#define MPI_CONFIG_PAGETYPE_IOC (0x01)
204#define MPI_CONFIG_PAGETYPE_BIOS (0x02)
205#define MPI_CONFIG_PAGETYPE_SCSI_PORT (0x03)
206#define MPI_CONFIG_PAGETYPE_SCSI_DEVICE (0x04)
207#define MPI_CONFIG_PAGETYPE_FC_PORT (0x05)
208#define MPI_CONFIG_PAGETYPE_FC_DEVICE (0x06)
209#define MPI_CONFIG_PAGETYPE_LAN (0x07)
210#define MPI_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
211#define MPI_CONFIG_PAGETYPE_MANUFACTURING (0x09)
212#define MPI_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
213#define MPI_CONFIG_PAGETYPE_INBAND (0x0B)
214#define MPI_CONFIG_PAGETYPE_EXTENDED (0x0F)
215#define MPI_CONFIG_PAGETYPE_MASK (0x0F)
216
217#define MPI_CONFIG_TYPENUM_MASK (0x0FFF)
218
219
220/****************************************************************************
221* ExtPageType field values
222****************************************************************************/
223#define MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
224#define MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
225#define MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
226#define MPI_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
227
228
229/****************************************************************************
230* PageAddress field values
231****************************************************************************/
232#define MPI_SCSI_PORT_PGAD_PORT_MASK (0x000000FF)
233
234#define MPI_SCSI_DEVICE_TARGET_ID_MASK (0x000000FF)
235#define MPI_SCSI_DEVICE_TARGET_ID_SHIFT (0)
236#define MPI_SCSI_DEVICE_BUS_MASK (0x0000FF00)
237#define MPI_SCSI_DEVICE_BUS_SHIFT (8)
238
239#define MPI_FC_PORT_PGAD_PORT_MASK (0xF0000000)
240#define MPI_FC_PORT_PGAD_PORT_SHIFT (28)
241#define MPI_FC_PORT_PGAD_FORM_MASK (0x0F000000)
242#define MPI_FC_PORT_PGAD_FORM_INDEX (0x01000000)
243#define MPI_FC_PORT_PGAD_INDEX_MASK (0x0000FFFF)
244#define MPI_FC_PORT_PGAD_INDEX_SHIFT (0)
245
246#define MPI_FC_DEVICE_PGAD_PORT_MASK (0xF0000000)
247#define MPI_FC_DEVICE_PGAD_PORT_SHIFT (28)
248#define MPI_FC_DEVICE_PGAD_FORM_MASK (0x0F000000)
249#define MPI_FC_DEVICE_PGAD_FORM_NEXT_DID (0x00000000)
250#define MPI_FC_DEVICE_PGAD_ND_PORT_MASK (0xF0000000)
251#define MPI_FC_DEVICE_PGAD_ND_PORT_SHIFT (28)
252#define MPI_FC_DEVICE_PGAD_ND_DID_MASK (0x00FFFFFF)
253#define MPI_FC_DEVICE_PGAD_ND_DID_SHIFT (0)
254#define MPI_FC_DEVICE_PGAD_FORM_BUS_TID (0x01000000)
255#define MPI_FC_DEVICE_PGAD_BT_BUS_MASK (0x0000FF00)
256#define MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT (8)
257#define MPI_FC_DEVICE_PGAD_BT_TID_MASK (0x000000FF)
258#define MPI_FC_DEVICE_PGAD_BT_TID_SHIFT (0)
259
260#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
261#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_SHIFT (0)
262
263#define MPI_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
264#define MPI_SAS_DEVICE_PGAD_FORM_SHIFT (28)
265#define MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
266#define MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID (0x00000001)
267#define MPI_SAS_DEVICE_PGAD_FORM_HANDLE (0x00000002)
268#define MPI_SAS_DEVICE_PGAD_GNH_HANDLE_MASK (0x0000FFFF)
269#define MPI_SAS_DEVICE_PGAD_GNH_HANDLE_SHIFT (0)
270#define MPI_SAS_DEVICE_PGAD_BT_BUS_MASK (0x0000FF00)
271#define MPI_SAS_DEVICE_PGAD_BT_BUS_SHIFT (8)
272#define MPI_SAS_DEVICE_PGAD_BT_TID_MASK (0x000000FF)
273#define MPI_SAS_DEVICE_PGAD_BT_TID_SHIFT (0)
274#define MPI_SAS_DEVICE_PGAD_H_HANDLE_MASK (0x0000FFFF)
275#define MPI_SAS_DEVICE_PGAD_H_HANDLE_SHIFT (0)
276
277#define MPI_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x00FF0000)
278#define MPI_SAS_PHY_PGAD_PHY_NUMBER_SHIFT (16)
279#define MPI_SAS_PHY_PGAD_DEVHANDLE_MASK (0x0000FFFF)
280#define MPI_SAS_PHY_PGAD_DEVHANDLE_SHIFT (0)
281
282
283/****************************************************************************
284* Config Request Message
285****************************************************************************/
286typedef struct _MSG_CONFIG
287{
288 U8 Action; /* 00h */
289 U8 Reserved; /* 01h */
290 U8 ChainOffset; /* 02h */
291 U8 Function; /* 03h */
292 U16 ExtPageLength; /* 04h */
293 U8 ExtPageType; /* 06h */
294 U8 MsgFlags; /* 07h */
295 U32 MsgContext; /* 08h */
296 U8 Reserved2[8]; /* 0Ch */
297 fCONFIG_PAGE_HEADER Header; /* 14h */
298 U32 PageAddress; /* 18h */
299 SGE_IO_UNION PageBufferSGE; /* 1Ch */
300} MSG_CONFIG, MPI_POINTER PTR_MSG_CONFIG,
301 Config_t, MPI_POINTER pConfig_t;
302
303
304/****************************************************************************
305* Action field values
306****************************************************************************/
307#define MPI_CONFIG_ACTION_PAGE_HEADER (0x00)
308#define MPI_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
309#define MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
310#define MPI_CONFIG_ACTION_PAGE_DEFAULT (0x03)
311#define MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
312#define MPI_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
313#define MPI_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
314
315
316/* Config Reply Message */
317typedef struct _MSG_CONFIG_REPLY
318{
319 U8 Action; /* 00h */
320 U8 Reserved; /* 01h */
321 U8 MsgLength; /* 02h */
322 U8 Function; /* 03h */
323 U16 ExtPageLength; /* 04h */
324 U8 ExtPageType; /* 06h */
325 U8 MsgFlags; /* 07h */
326 U32 MsgContext; /* 08h */
327 U8 Reserved2[2]; /* 0Ch */
328 U16 IOCStatus; /* 0Eh */
329 U32 IOCLogInfo; /* 10h */
330 fCONFIG_PAGE_HEADER Header; /* 14h */
331} MSG_CONFIG_REPLY, MPI_POINTER PTR_MSG_CONFIG_REPLY,
332 ConfigReply_t, MPI_POINTER pConfigReply_t;
333
334
335
336/*****************************************************************************
337*
338* C o n f i g u r a t i o n P a g e s
339*
340*****************************************************************************/
341
342/****************************************************************************
343* Manufacturing Config pages
344****************************************************************************/
345#define MPI_MANUFACTPAGE_VENDORID_LSILOGIC (0x1000)
346/* Fibre Channel */
347#define MPI_MANUFACTPAGE_DEVICEID_FC909 (0x0621)
348#define MPI_MANUFACTPAGE_DEVICEID_FC919 (0x0624)
349#define MPI_MANUFACTPAGE_DEVICEID_FC929 (0x0622)
350#define MPI_MANUFACTPAGE_DEVICEID_FC919X (0x0628)
351#define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626)
352/* SCSI */
353#define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030)
354#define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031)
355#define MPI_MANUFACTPAGE_DEVID_1030_53C1035 (0x0032)
356#define MPI_MANUFACTPAGE_DEVID_1030ZC_53C1035 (0x0033)
357#define MPI_MANUFACTPAGE_DEVID_53C1035 (0x0040)
358#define MPI_MANUFACTPAGE_DEVID_53C1035ZC (0x0041)
359/* SAS */
360#define MPI_MANUFACTPAGE_DEVID_SAS1064 (0x0050)
361
362
363typedef struct _CONFIG_PAGE_MANUFACTURING_0
364{
365 fCONFIG_PAGE_HEADER Header; /* 00h */
366 U8 ChipName[16]; /* 04h */
367 U8 ChipRevision[8]; /* 14h */
368 U8 BoardName[16]; /* 1Ch */
369 U8 BoardAssembly[16]; /* 2Ch */
370 U8 BoardTracerNumber[16]; /* 3Ch */
371
372} fCONFIG_PAGE_MANUFACTURING_0, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_0,
373 ManufacturingPage0_t, MPI_POINTER pManufacturingPage0_t;
374
375#define MPI_MANUFACTURING0_PAGEVERSION (0x00)
376
377
378typedef struct _CONFIG_PAGE_MANUFACTURING_1
379{
380 fCONFIG_PAGE_HEADER Header; /* 00h */
381 U8 VPD[256]; /* 04h */
382} fCONFIG_PAGE_MANUFACTURING_1, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_1,
383 ManufacturingPage1_t, MPI_POINTER pManufacturingPage1_t;
384
385#define MPI_MANUFACTURING1_PAGEVERSION (0x00)
386
387
388typedef struct _MPI_CHIP_REVISION_ID
389{
390 U16 DeviceID; /* 00h */
391 U8 PCIRevisionID; /* 02h */
392 U8 Reserved; /* 03h */
393} MPI_CHIP_REVISION_ID, MPI_POINTER PTR_MPI_CHIP_REVISION_ID,
394 MpiChipRevisionId_t, MPI_POINTER pMpiChipRevisionId_t;
395
396
397/*
398 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
399 * one and check Header.PageLength at runtime.
400 */
401#ifndef MPI_MAN_PAGE_2_HW_SETTINGS_WORDS
402#define MPI_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
403#endif
404
405typedef struct _CONFIG_PAGE_MANUFACTURING_2
406{
407 fCONFIG_PAGE_HEADER Header; /* 00h */
408 MPI_CHIP_REVISION_ID ChipId; /* 04h */
409 U32 HwSettings[MPI_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 08h */
410} fCONFIG_PAGE_MANUFACTURING_2, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_2,
411 ManufacturingPage2_t, MPI_POINTER pManufacturingPage2_t;
412
413#define MPI_MANUFACTURING2_PAGEVERSION (0x00)
414
415
416/*
417 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
418 * one and check Header.PageLength at runtime.
419 */
420#ifndef MPI_MAN_PAGE_3_INFO_WORDS
421#define MPI_MAN_PAGE_3_INFO_WORDS (1)
422#endif
423
424typedef struct _CONFIG_PAGE_MANUFACTURING_3
425{
426 fCONFIG_PAGE_HEADER Header; /* 00h */
427 MPI_CHIP_REVISION_ID ChipId; /* 04h */
428 U32 Info[MPI_MAN_PAGE_3_INFO_WORDS];/* 08h */
429} fCONFIG_PAGE_MANUFACTURING_3, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_3,
430 ManufacturingPage3_t, MPI_POINTER pManufacturingPage3_t;
431
432#define MPI_MANUFACTURING3_PAGEVERSION (0x00)
433
434
435typedef struct _CONFIG_PAGE_MANUFACTURING_4
436{
437 fCONFIG_PAGE_HEADER Header; /* 00h */
438 U32 Reserved1; /* 04h */
439 U8 InfoOffset0; /* 08h */
440 U8 InfoSize0; /* 09h */
441 U8 InfoOffset1; /* 0Ah */
442 U8 InfoSize1; /* 0Bh */
443 U8 InquirySize; /* 0Ch */
444 U8 Flags; /* 0Dh */
445 U16 Reserved2; /* 0Eh */
446 U8 InquiryData[56]; /* 10h */
447 U32 ISVolumeSettings; /* 48h */
448 U32 IMEVolumeSettings; /* 4Ch */
449 U32 IMVolumeSettings; /* 50h */
450} fCONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4,
451 ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t;
452
453#define MPI_MANUFACTURING4_PAGEVERSION (0x01)
454
455/* defines for the Flags field */
456#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01)
457
458
459typedef struct _CONFIG_PAGE_MANUFACTURING_5
460{
461 fCONFIG_PAGE_HEADER Header; /* 00h */
462 U64 BaseWWID; /* 04h */
463} fCONFIG_PAGE_MANUFACTURING_5, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_5,
464 ManufacturingPage5_t, MPI_POINTER pManufacturingPage5_t;
465
466#define MPI_MANUFACTURING5_PAGEVERSION (0x00)
467
468
469typedef struct _CONFIG_PAGE_MANUFACTURING_6
470{
471 fCONFIG_PAGE_HEADER Header; /* 00h */
472 U32 ProductSpecificInfo;/* 04h */
473} fCONFIG_PAGE_MANUFACTURING_6, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_6,
474 ManufacturingPage6_t, MPI_POINTER pManufacturingPage6_t;
475
476#define MPI_MANUFACTURING6_PAGEVERSION (0x00)
477
478
479/****************************************************************************
480* IO Unit Config Pages
481****************************************************************************/
482
483typedef struct _CONFIG_PAGE_IO_UNIT_0
484{
485 fCONFIG_PAGE_HEADER Header; /* 00h */
486 U64 UniqueValue; /* 04h */
487} fCONFIG_PAGE_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_0,
488 IOUnitPage0_t, MPI_POINTER pIOUnitPage0_t;
489
490#define MPI_IOUNITPAGE0_PAGEVERSION (0x00)
491
492
493typedef struct _CONFIG_PAGE_IO_UNIT_1
494{
495 fCONFIG_PAGE_HEADER Header; /* 00h */
496 U32 Flags; /* 04h */
497} fCONFIG_PAGE_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_1,
498 IOUnitPage1_t, MPI_POINTER pIOUnitPage1_t;
499
500#define MPI_IOUNITPAGE1_PAGEVERSION (0x01)
501
502/* IO Unit Page 1 Flags defines */
503#define MPI_IOUNITPAGE1_MULTI_FUNCTION (0x00000000)
504#define MPI_IOUNITPAGE1_SINGLE_FUNCTION (0x00000001)
505#define MPI_IOUNITPAGE1_MULTI_PATHING (0x00000002)
506#define MPI_IOUNITPAGE1_SINGLE_PATHING (0x00000000)
507#define MPI_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
508#define MPI_IOUNITPAGE1_DISABLE_QUEUE_FULL_HANDLING (0x00000020)
509#define MPI_IOUNITPAGE1_DISABLE_IR (0x00000040)
510#define MPI_IOUNITPAGE1_FORCE_32 (0x00000080)
511#define MPI_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
512
513
514typedef struct _MPI_ADAPTER_INFO
515{
516 U8 PciBusNumber; /* 00h */
517 U8 PciDeviceAndFunctionNumber; /* 01h */
518 U16 AdapterFlags; /* 02h */
519} MPI_ADAPTER_INFO, MPI_POINTER PTR_MPI_ADAPTER_INFO,
520 MpiAdapterInfo_t, MPI_POINTER pMpiAdapterInfo_t;
521
522#define MPI_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
523#define MPI_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
524
525typedef struct _CONFIG_PAGE_IO_UNIT_2
526{
527 fCONFIG_PAGE_HEADER Header; /* 00h */
528 U32 Flags; /* 04h */
529 U32 BiosVersion; /* 08h */
530 MPI_ADAPTER_INFO AdapterOrder[4]; /* 0Ch */
531} fCONFIG_PAGE_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_2,
532 IOUnitPage2_t, MPI_POINTER pIOUnitPage2_t;
533
534#define MPI_IOUNITPAGE2_PAGEVERSION (0x00)
535
536#define MPI_IOUNITPAGE2_FLAGS_PAUSE_ON_ERROR (0x00000002)
537#define MPI_IOUNITPAGE2_FLAGS_VERBOSE_ENABLE (0x00000004)
538#define MPI_IOUNITPAGE2_FLAGS_COLOR_VIDEO_DISABLE (0x00000008)
539#define MPI_IOUNITPAGE2_FLAGS_DONT_HOOK_INT_40 (0x00000010)
540
541#define MPI_IOUNITPAGE2_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
542#define MPI_IOUNITPAGE2_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
543#define MPI_IOUNITPAGE2_FLAGS_ADAPTER_DISPLAY (0x00000020)
544#define MPI_IOUNITPAGE2_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
545
546
547/*
548 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
549 * one and check Header.PageLength at runtime.
550 */
551#ifndef MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX
552#define MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
553#endif
554
555typedef struct _CONFIG_PAGE_IO_UNIT_3
556{
557 fCONFIG_PAGE_HEADER Header; /* 00h */
558 U8 GPIOCount; /* 04h */
559 U8 Reserved1; /* 05h */
560 U16 Reserved2; /* 06h */
561 U16 GPIOVal[MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX]; /* 08h */
562} fCONFIG_PAGE_IO_UNIT_3, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_3,
563 IOUnitPage3_t, MPI_POINTER pIOUnitPage3_t;
564
565#define MPI_IOUNITPAGE3_PAGEVERSION (0x01)
566
567#define MPI_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFC)
568#define MPI_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
569#define MPI_IOUNITPAGE3_GPIO_SETTING_OFF (0x00)
570#define MPI_IOUNITPAGE3_GPIO_SETTING_ON (0x01)
571
572
573/****************************************************************************
574* IOC Config Pages
575****************************************************************************/
576
577typedef struct _CONFIG_PAGE_IOC_0
578{
579 fCONFIG_PAGE_HEADER Header; /* 00h */
580 U32 TotalNVStore; /* 04h */
581 U32 FreeNVStore; /* 08h */
582 U16 VendorID; /* 0Ch */
583 U16 DeviceID; /* 0Eh */
584 U8 RevisionID; /* 10h */
585 U8 Reserved[3]; /* 11h */
586 U32 ClassCode; /* 14h */
587 U16 SubsystemVendorID; /* 18h */
588 U16 SubsystemID; /* 1Ah */
589} fCONFIG_PAGE_IOC_0, MPI_POINTER PTR_CONFIG_PAGE_IOC_0,
590 IOCPage0_t, MPI_POINTER pIOCPage0_t;
591
592#define MPI_IOCPAGE0_PAGEVERSION (0x01)
593
594
595typedef struct _CONFIG_PAGE_IOC_1
596{
597 fCONFIG_PAGE_HEADER Header; /* 00h */
598 U32 Flags; /* 04h */
599 U32 CoalescingTimeout; /* 08h */
600 U8 CoalescingDepth; /* 0Ch */
601 U8 PCISlotNum; /* 0Dh */
602 U8 Reserved[2]; /* 0Eh */
603} fCONFIG_PAGE_IOC_1, MPI_POINTER PTR_CONFIG_PAGE_IOC_1,
604 IOCPage1_t, MPI_POINTER pIOCPage1_t;
605
606#define MPI_IOCPAGE1_PAGEVERSION (0x01)
607
608/* defines for the Flags field */
609#define MPI_IOCPAGE1_EEDP_HOST_SUPPORTS_DIF (0x08000000)
610#define MPI_IOCPAGE1_EEDP_MODE_MASK (0x07000000)
611#define MPI_IOCPAGE1_EEDP_MODE_OFF (0x00000000)
612#define MPI_IOCPAGE1_EEDP_MODE_T10 (0x01000000)
613#define MPI_IOCPAGE1_EEDP_MODE_LSI_1 (0x02000000)
614#define MPI_IOCPAGE1_REPLY_COALESCING (0x00000001)
615
616#define MPI_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
617
618
619typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
620{
621 U8 VolumeID; /* 00h */
622 U8 VolumeBus; /* 01h */
623 U8 VolumeIOC; /* 02h */
624 U8 VolumePageNumber; /* 03h */
625 U8 VolumeType; /* 04h */
626 U8 Flags; /* 05h */
627 U16 Reserved3; /* 06h */
628} fCONFIG_PAGE_IOC_2_RAID_VOL, MPI_POINTER PTR_CONFIG_PAGE_IOC_2_RAID_VOL,
629 ConfigPageIoc2RaidVol_t, MPI_POINTER pConfigPageIoc2RaidVol_t;
630
631/* IOC Page 2 Volume RAID Type values, also used in RAID Volume pages */
632
633#define MPI_RAID_VOL_TYPE_IS (0x00)
634#define MPI_RAID_VOL_TYPE_IME (0x01)
635#define MPI_RAID_VOL_TYPE_IM (0x02)
636
637/* IOC Page 2 Volume Flags values */
638
639#define MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE (0x08)
640
641/*
642 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
643 * one and check Header.PageLength at runtime.
644 */
645#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX
646#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX (1)
647#endif
648
649typedef struct _CONFIG_PAGE_IOC_2
650{
651 fCONFIG_PAGE_HEADER Header; /* 00h */
652 U32 CapabilitiesFlags; /* 04h */
653 U8 NumActiveVolumes; /* 08h */
654 U8 MaxVolumes; /* 09h */
655 U8 NumActivePhysDisks; /* 0Ah */
656 U8 MaxPhysDisks; /* 0Bh */
657 fCONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */
658} fCONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
659 IOCPage2_t, MPI_POINTER pIOCPage2_t;
660
661#define MPI_IOCPAGE2_PAGEVERSION (0x02)
662
663/* IOC Page 2 Capabilities flags */
664
665#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001)
666#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002)
667#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004)
668#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000)
669#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000)
670#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000)
671
672
673typedef struct _IOC_3_PHYS_DISK
674{
675 U8 PhysDiskID; /* 00h */
676 U8 PhysDiskBus; /* 01h */
677 U8 PhysDiskIOC; /* 02h */
678 U8 PhysDiskNum; /* 03h */
679} IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK,
680 Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t;
681
682/*
683 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
684 * one and check Header.PageLength at runtime.
685 */
686#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX
687#define MPI_IOC_PAGE_3_PHYSDISK_MAX (1)
688#endif
689
690typedef struct _CONFIG_PAGE_IOC_3
691{
692 fCONFIG_PAGE_HEADER Header; /* 00h */
693 U8 NumPhysDisks; /* 04h */
694 U8 Reserved1; /* 05h */
695 U16 Reserved2; /* 06h */
696 IOC_3_PHYS_DISK PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */
697} fCONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3,
698 IOCPage3_t, MPI_POINTER pIOCPage3_t;
699
700#define MPI_IOCPAGE3_PAGEVERSION (0x00)
701
702
703typedef struct _IOC_4_SEP
704{
705 U8 SEPTargetID; /* 00h */
706 U8 SEPBus; /* 01h */
707 U16 Reserved; /* 02h */
708} IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP,
709 Ioc4Sep_t, MPI_POINTER pIoc4Sep_t;
710
711/*
712 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
713 * one and check Header.PageLength at runtime.
714 */
715#ifndef MPI_IOC_PAGE_4_SEP_MAX
716#define MPI_IOC_PAGE_4_SEP_MAX (1)
717#endif
718
719typedef struct _CONFIG_PAGE_IOC_4
720{
721 fCONFIG_PAGE_HEADER Header; /* 00h */
722 U8 ActiveSEP; /* 04h */
723 U8 MaxSEP; /* 05h */
724 U16 Reserved1; /* 06h */
725 IOC_4_SEP SEP[MPI_IOC_PAGE_4_SEP_MAX]; /* 08h */
726} fCONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4,
727 IOCPage4_t, MPI_POINTER pIOCPage4_t;
728
729#define MPI_IOCPAGE4_PAGEVERSION (0x00)
730
731
732typedef struct _IOC_5_HOT_SPARE
733{
734 U8 PhysDiskNum; /* 00h */
735 U8 Reserved; /* 01h */
736 U8 HotSparePool; /* 02h */
737 U8 Flags; /* 03h */
738} IOC_5_HOT_SPARE, MPI_POINTER PTR_IOC_5_HOT_SPARE,
739 Ioc5HotSpare_t, MPI_POINTER pIoc5HotSpare_t;
740
741/* IOC Page 5 HotSpare Flags */
742#define MPI_IOC_PAGE_5_HOT_SPARE_ACTIVE (0x01)
743
744/*
745 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
746 * one and check Header.PageLength at runtime.
747 */
748#ifndef MPI_IOC_PAGE_5_HOT_SPARE_MAX
749#define MPI_IOC_PAGE_5_HOT_SPARE_MAX (1)
750#endif
751
752typedef struct _CONFIG_PAGE_IOC_5
753{
754 fCONFIG_PAGE_HEADER Header; /* 00h */
755 U32 Reserved1; /* 04h */
756 U8 NumHotSpares; /* 08h */
757 U8 Reserved2; /* 09h */
758 U16 Reserved3; /* 0Ah */
759 IOC_5_HOT_SPARE HotSpare[MPI_IOC_PAGE_5_HOT_SPARE_MAX]; /* 0Ch */
760} fCONFIG_PAGE_IOC_5, MPI_POINTER PTR_CONFIG_PAGE_IOC_5,
761 IOCPage5_t, MPI_POINTER pIOCPage5_t;
762
763#define MPI_IOCPAGE5_PAGEVERSION (0x00)
764
765
766/****************************************************************************
767* BIOS Port Config Pages
768****************************************************************************/
769
770typedef struct _CONFIG_PAGE_BIOS_1
771{
772 fCONFIG_PAGE_HEADER Header; /* 00h */
773 U32 BiosOptions; /* 04h */
774 U32 IOCSettings; /* 08h */
775 U32 Reserved1; /* 0Ch */
776 U32 DeviceSettings; /* 10h */
777 U16 NumberOfDevices; /* 14h */
778 U16 Reserved2; /* 16h */
779 U16 IOTimeoutBlockDevicesNonRM; /* 18h */
780 U16 IOTimeoutSequential; /* 1Ah */
781 U16 IOTimeoutOther; /* 1Ch */
782 U16 IOTimeoutBlockDevicesRM; /* 1Eh */
783} fCONFIG_PAGE_BIOS_1, MPI_POINTER PTR_CONFIG_PAGE_BIOS_1,
784 BIOSPage1_t, MPI_POINTER pBIOSPage1_t;
785
786#define MPI_BIOSPAGE1_PAGEVERSION (0x00)
787
788/* values for the BiosOptions field */
789#define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE (0x00000400)
790#define MPI_BIOSPAGE1_OPTIONS_FC_ENABLE (0x00000200)
791#define MPI_BIOSPAGE1_OPTIONS_SAS_ENABLE (0x00000100)
792#define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
793
794/* values for the IOCSettings field */
795#define MPI_BIOSPAGE1_IOCSET_MASK_SPINUP_DELAY (0x00000F00)
796#define MPI_BIOSPAGE1_IOCSET_SHIFT_SPINUP_DELAY (8)
797
798#define MPI_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
799#define MPI_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
800#define MPI_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
801#define MPI_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
802
803#define MPI_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
804#define MPI_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
805#define MPI_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
806#define MPI_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
807#define MPI_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
808
809#define MPI_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
810
811/* values for the DeviceSettings field */
812#define MPI_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
813#define MPI_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
814#define MPI_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
815#define MPI_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
816
817
818/****************************************************************************
819* SCSI Port Config Pages
820****************************************************************************/
821
822typedef struct _CONFIG_PAGE_SCSI_PORT_0
823{
824 fCONFIG_PAGE_HEADER Header; /* 00h */
825 U32 Capabilities; /* 04h */
826 U32 PhysicalInterface; /* 08h */
827} fCONFIG_PAGE_SCSI_PORT_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_0,
828 SCSIPortPage0_t, MPI_POINTER pSCSIPortPage0_t;
829
830#define MPI_SCSIPORTPAGE0_PAGEVERSION (0x01)
831
832#define MPI_SCSIPORTPAGE0_CAP_IU (0x00000001)
833#define MPI_SCSIPORTPAGE0_CAP_DT (0x00000002)
834#define MPI_SCSIPORTPAGE0_CAP_QAS (0x00000004)
835#define MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK (0x0000FF00)
836#define MPI_SCSIPORTPAGE0_SYNC_ASYNC (0x00)
837#define MPI_SCSIPORTPAGE0_SYNC_5 (0x32)
838#define MPI_SCSIPORTPAGE0_SYNC_10 (0x19)
839#define MPI_SCSIPORTPAGE0_SYNC_20 (0x0C)
840#define MPI_SCSIPORTPAGE0_SYNC_33_33 (0x0B)
841#define MPI_SCSIPORTPAGE0_SYNC_40 (0x0A)
842#define MPI_SCSIPORTPAGE0_SYNC_80 (0x09)
843#define MPI_SCSIPORTPAGE0_SYNC_160 (0x08)
844#define MPI_SCSIPORTPAGE0_SYNC_UNKNOWN (0xFF)
845
846#define MPI_SCSIPORTPAGE0_CAP_SHIFT_MIN_SYNC_PERIOD (8)
847#define MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(Cap) \
848 ( ((Cap) & MPI_SCSIPORTPAGE0_CAP_MASK_MIN_SYNC_PERIOD) \
849 >> MPI_SCSIPORTPAGE0_CAP_SHIFT_MIN_SYNC_PERIOD \
850 )
851#define MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK (0x00FF0000)
852#define MPI_SCSIPORTPAGE0_CAP_SHIFT_MAX_SYNC_OFFSET (16)
853#define MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(Cap) \
854 ( ((Cap) & MPI_SCSIPORTPAGE0_CAP_MASK_MAX_SYNC_OFFSET) \
855 >> MPI_SCSIPORTPAGE0_CAP_SHIFT_MAX_SYNC_OFFSET \
856 )
857#define MPI_SCSIPORTPAGE0_CAP_WIDE (0x20000000)
858#define MPI_SCSIPORTPAGE0_CAP_AIP (0x80000000)
859
860#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK (0x00000003)
861#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD (0x01)
862#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE (0x02)
863#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_LVD (0x03)
864#define MPI_SCSIPORTPAGE0_PHY_MASK_CONNECTED_ID (0xFF000000)
865#define MPI_SCSIPORTPAGE0_PHY_SHIFT_CONNECTED_ID (24)
866#define MPI_SCSIPORTPAGE0_PHY_BUS_FREE_CONNECTED_ID (0xFE)
867#define MPI_SCSIPORTPAGE0_PHY_UNKNOWN_CONNECTED_ID (0xFF)
868
869
870typedef struct _CONFIG_PAGE_SCSI_PORT_1
871{
872 fCONFIG_PAGE_HEADER Header; /* 00h */
873 U32 Configuration; /* 04h */
874 U32 OnBusTimerValue; /* 08h */
875 U8 TargetConfig; /* 0Ch */
876 U8 Reserved1; /* 0Dh */
877 U16 IDConfig; /* 0Eh */
878} fCONFIG_PAGE_SCSI_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_1,
879 SCSIPortPage1_t, MPI_POINTER pSCSIPortPage1_t;
880
881#define MPI_SCSIPORTPAGE1_PAGEVERSION (0x03)
882
883/* Configuration values */
884#define MPI_SCSIPORTPAGE1_CFG_PORT_SCSI_ID_MASK (0x000000FF)
885#define MPI_SCSIPORTPAGE1_CFG_PORT_RESPONSE_ID_MASK (0xFFFF0000)
886#define MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID (16)
887
888/* TargetConfig values */
889#define MPI_SCSIPORTPAGE1_TARGCONFIG_TARG_ONLY (0x01)
890#define MPI_SCSIPORTPAGE1_TARGCONFIG_INIT_TARG (0x02)
891
892
893typedef struct _MPI_DEVICE_INFO
894{
895 U8 Timeout; /* 00h */
896 U8 SyncFactor; /* 01h */
897 U16 DeviceFlags; /* 02h */
898} MPI_DEVICE_INFO, MPI_POINTER PTR_MPI_DEVICE_INFO,
899 MpiDeviceInfo_t, MPI_POINTER pMpiDeviceInfo_t;
900
901typedef struct _CONFIG_PAGE_SCSI_PORT_2
902{
903 fCONFIG_PAGE_HEADER Header; /* 00h */
904 U32 PortFlags; /* 04h */
905 U32 PortSettings; /* 08h */
906 MPI_DEVICE_INFO DeviceSettings[16]; /* 0Ch */
907} fCONFIG_PAGE_SCSI_PORT_2, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_2,
908 SCSIPortPage2_t, MPI_POINTER pSCSIPortPage2_t;
909
910#define MPI_SCSIPORTPAGE2_PAGEVERSION (0x02)
911
912/* PortFlags values */
913#define MPI_SCSIPORTPAGE2_PORT_FLAGS_SCAN_HIGH_TO_LOW (0x00000001)
914#define MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET (0x00000004)
915#define MPI_SCSIPORTPAGE2_PORT_FLAGS_ALTERNATE_CHS (0x00000008)
916#define MPI_SCSIPORTPAGE2_PORT_FLAGS_TERMINATION_DISABLE (0x00000010)
917
918#define MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK (0x00000060)
919#define MPI_SCSIPORTPAGE2_PORT_FLAGS_FULL_DV (0x00000000)
920#define MPI_SCSIPORTPAGE2_PORT_FLAGS_BASIC_DV_ONLY (0x00000020)
921#define MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV (0x00000060)
922
923
924/* PortSettings values */
925#define MPI_SCSIPORTPAGE2_PORT_HOST_ID_MASK (0x0000000F)
926#define MPI_SCSIPORTPAGE2_PORT_MASK_INIT_HBA (0x00000030)
927#define MPI_SCSIPORTPAGE2_PORT_DISABLE_INIT_HBA (0x00000000)
928#define MPI_SCSIPORTPAGE2_PORT_BIOS_INIT_HBA (0x00000010)
929#define MPI_SCSIPORTPAGE2_PORT_OS_INIT_HBA (0x00000020)
930#define MPI_SCSIPORTPAGE2_PORT_BIOS_OS_INIT_HBA (0x00000030)
931#define MPI_SCSIPORTPAGE2_PORT_REMOVABLE_MEDIA (0x000000C0)
932#define MPI_SCSIPORTPAGE2_PORT_RM_NONE (0x00000000)
933#define MPI_SCSIPORTPAGE2_PORT_RM_BOOT_ONLY (0x00000040)
934#define MPI_SCSIPORTPAGE2_PORT_RM_WITH_MEDIA (0x00000080)
935#define MPI_SCSIPORTPAGE2_PORT_SPINUP_DELAY_MASK (0x00000F00)
936#define MPI_SCSIPORTPAGE2_PORT_SHIFT_SPINUP_DELAY (8)
937#define MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS (0x00003000)
938#define MPI_SCSIPORTPAGE2_PORT_NEGO_MASTER_SETTINGS (0x00000000)
939#define MPI_SCSIPORTPAGE2_PORT_NONE_MASTER_SETTINGS (0x00001000)
940#define MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS (0x00003000)
941
942#define MPI_SCSIPORTPAGE2_DEVICE_DISCONNECT_ENABLE (0x0001)
943#define MPI_SCSIPORTPAGE2_DEVICE_ID_SCAN_ENABLE (0x0002)
944#define MPI_SCSIPORTPAGE2_DEVICE_LUN_SCAN_ENABLE (0x0004)
945#define MPI_SCSIPORTPAGE2_DEVICE_TAG_QUEUE_ENABLE (0x0008)
946#define MPI_SCSIPORTPAGE2_DEVICE_WIDE_DISABLE (0x0010)
947#define MPI_SCSIPORTPAGE2_DEVICE_BOOT_CHOICE (0x0020)
948
949
950/****************************************************************************
951* SCSI Target Device Config Pages
952****************************************************************************/
953
954typedef struct _CONFIG_PAGE_SCSI_DEVICE_0
955{
956 fCONFIG_PAGE_HEADER Header; /* 00h */
957 U32 NegotiatedParameters; /* 04h */
958 U32 Information; /* 08h */
959} fCONFIG_PAGE_SCSI_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_0,
960 SCSIDevicePage0_t, MPI_POINTER pSCSIDevicePage0_t;
961
962#define MPI_SCSIDEVPAGE0_PAGEVERSION (0x03)
963
964#define MPI_SCSIDEVPAGE0_NP_IU (0x00000001)
965#define MPI_SCSIDEVPAGE0_NP_DT (0x00000002)
966#define MPI_SCSIDEVPAGE0_NP_QAS (0x00000004)
967#define MPI_SCSIDEVPAGE0_NP_HOLD_MCS (0x00000008)
968#define MPI_SCSIDEVPAGE0_NP_WR_FLOW (0x00000010)
969#define MPI_SCSIDEVPAGE0_NP_RD_STRM (0x00000020)
970#define MPI_SCSIDEVPAGE0_NP_RTI (0x00000040)
971#define MPI_SCSIDEVPAGE0_NP_PCOMP_EN (0x00000080)
972#define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK (0x0000FF00)
973#define MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD (8)
974#define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK (0x00FF0000)
975#define MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET (16)
976#define MPI_SCSIDEVPAGE0_NP_WIDE (0x20000000)
977#define MPI_SCSIDEVPAGE0_NP_AIP (0x80000000)
978
979#define MPI_SCSIDEVPAGE0_INFO_PARAMS_NEGOTIATED (0x00000001)
980#define MPI_SCSIDEVPAGE0_INFO_SDTR_REJECTED (0x00000002)
981#define MPI_SCSIDEVPAGE0_INFO_WDTR_REJECTED (0x00000004)
982#define MPI_SCSIDEVPAGE0_INFO_PPR_REJECTED (0x00000008)
983
984
985typedef struct _CONFIG_PAGE_SCSI_DEVICE_1
986{
987 fCONFIG_PAGE_HEADER Header; /* 00h */
988 U32 RequestedParameters; /* 04h */
989 U32 Reserved; /* 08h */
990 U32 Configuration; /* 0Ch */
991} fCONFIG_PAGE_SCSI_DEVICE_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_1,
992 SCSIDevicePage1_t, MPI_POINTER pSCSIDevicePage1_t;
993
994#define MPI_SCSIDEVPAGE1_PAGEVERSION (0x04)
995
996#define MPI_SCSIDEVPAGE1_RP_IU (0x00000001)
997#define MPI_SCSIDEVPAGE1_RP_DT (0x00000002)
998#define MPI_SCSIDEVPAGE1_RP_QAS (0x00000004)
999#define MPI_SCSIDEVPAGE1_RP_HOLD_MCS (0x00000008)
1000#define MPI_SCSIDEVPAGE1_RP_WR_FLOW (0x00000010)
1001#define MPI_SCSIDEVPAGE1_RP_RD_STRM (0x00000020)
1002#define MPI_SCSIDEVPAGE1_RP_RTI (0x00000040)
1003#define MPI_SCSIDEVPAGE1_RP_PCOMP_EN (0x00000080)
1004#define MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK (0x0000FF00)
1005#define MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD (8)
1006#define MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK (0x00FF0000)
1007#define MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET (16)
1008#define MPI_SCSIDEVPAGE1_RP_WIDE (0x20000000)
1009#define MPI_SCSIDEVPAGE1_RP_AIP (0x80000000)
1010
1011#define MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED (0x00000002)
1012#define MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED (0x00000004)
1013#define MPI_SCSIDEVPAGE1_CONF_EXTENDED_PARAMS_ENABLE (0x00000008)
1014#define MPI_SCSIDEVPAGE1_CONF_FORCE_PPR_MSG (0x00000010)
1015
1016
1017typedef struct _CONFIG_PAGE_SCSI_DEVICE_2
1018{
1019 fCONFIG_PAGE_HEADER Header; /* 00h */
1020 U32 DomainValidation; /* 04h */
1021 U32 ParityPipeSelect; /* 08h */
1022 U32 DataPipeSelect; /* 0Ch */
1023} fCONFIG_PAGE_SCSI_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_2,
1024 SCSIDevicePage2_t, MPI_POINTER pSCSIDevicePage2_t;
1025
1026#define MPI_SCSIDEVPAGE2_PAGEVERSION (0x01)
1027
1028#define MPI_SCSIDEVPAGE2_DV_ISI_ENABLE (0x00000010)
1029#define MPI_SCSIDEVPAGE2_DV_SECONDARY_DRIVER_ENABLE (0x00000020)
1030#define MPI_SCSIDEVPAGE2_DV_SLEW_RATE_CTRL (0x00000380)
1031#define MPI_SCSIDEVPAGE2_DV_PRIM_DRIVE_STR_CTRL (0x00001C00)
1032#define MPI_SCSIDEVPAGE2_DV_SECOND_DRIVE_STR_CTRL (0x0000E000)
1033#define MPI_SCSIDEVPAGE2_DV_XCLKH_ST (0x10000000)
1034#define MPI_SCSIDEVPAGE2_DV_XCLKS_ST (0x20000000)
1035#define MPI_SCSIDEVPAGE2_DV_XCLKH_DT (0x40000000)
1036#define MPI_SCSIDEVPAGE2_DV_XCLKS_DT (0x80000000)
1037
1038#define MPI_SCSIDEVPAGE2_PPS_PPS_MASK (0x00000003)
1039
1040#define MPI_SCSIDEVPAGE2_DPS_BIT_0_PL_SELECT_MASK (0x00000003)
1041#define MPI_SCSIDEVPAGE2_DPS_BIT_1_PL_SELECT_MASK (0x0000000C)
1042#define MPI_SCSIDEVPAGE2_DPS_BIT_2_PL_SELECT_MASK (0x00000030)
1043#define MPI_SCSIDEVPAGE2_DPS_BIT_3_PL_SELECT_MASK (0x000000C0)
1044#define MPI_SCSIDEVPAGE2_DPS_BIT_4_PL_SELECT_MASK (0x00000300)
1045#define MPI_SCSIDEVPAGE2_DPS_BIT_5_PL_SELECT_MASK (0x00000C00)
1046#define MPI_SCSIDEVPAGE2_DPS_BIT_6_PL_SELECT_MASK (0x00003000)
1047#define MPI_SCSIDEVPAGE2_DPS_BIT_7_PL_SELECT_MASK (0x0000C000)
1048#define MPI_SCSIDEVPAGE2_DPS_BIT_8_PL_SELECT_MASK (0x00030000)
1049#define MPI_SCSIDEVPAGE2_DPS_BIT_9_PL_SELECT_MASK (0x000C0000)
1050#define MPI_SCSIDEVPAGE2_DPS_BIT_10_PL_SELECT_MASK (0x00300000)
1051#define MPI_SCSIDEVPAGE2_DPS_BIT_11_PL_SELECT_MASK (0x00C00000)
1052#define MPI_SCSIDEVPAGE2_DPS_BIT_12_PL_SELECT_MASK (0x03000000)
1053#define MPI_SCSIDEVPAGE2_DPS_BIT_13_PL_SELECT_MASK (0x0C000000)
1054#define MPI_SCSIDEVPAGE2_DPS_BIT_14_PL_SELECT_MASK (0x30000000)
1055#define MPI_SCSIDEVPAGE2_DPS_BIT_15_PL_SELECT_MASK (0xC0000000)
1056
1057
1058typedef struct _CONFIG_PAGE_SCSI_DEVICE_3
1059{
1060 fCONFIG_PAGE_HEADER Header; /* 00h */
1061 U16 MsgRejectCount; /* 04h */
1062 U16 PhaseErrorCount; /* 06h */
1063 U16 ParityErrorCount; /* 08h */
1064 U16 Reserved; /* 0Ah */
1065} fCONFIG_PAGE_SCSI_DEVICE_3, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_3,
1066 SCSIDevicePage3_t, MPI_POINTER pSCSIDevicePage3_t;
1067
1068#define MPI_SCSIDEVPAGE3_PAGEVERSION (0x00)
1069
1070#define MPI_SCSIDEVPAGE3_MAX_COUNTER (0xFFFE)
1071#define MPI_SCSIDEVPAGE3_UNSUPPORTED_COUNTER (0xFFFF)
1072
1073
1074/****************************************************************************
1075* FC Port Config Pages
1076****************************************************************************/
1077
1078typedef struct _CONFIG_PAGE_FC_PORT_0
1079{
1080 fCONFIG_PAGE_HEADER Header; /* 00h */
1081 U32 Flags; /* 04h */
1082 U8 MPIPortNumber; /* 08h */
1083 U8 LinkType; /* 09h */
1084 U8 PortState; /* 0Ah */
1085 U8 Reserved; /* 0Bh */
1086 U32 PortIdentifier; /* 0Ch */
1087 U64 WWNN; /* 10h */
1088 U64 WWPN; /* 18h */
1089 U32 SupportedServiceClass; /* 20h */
1090 U32 SupportedSpeeds; /* 24h */
1091 U32 CurrentSpeed; /* 28h */
1092 U32 MaxFrameSize; /* 2Ch */
1093 U64 FabricWWNN; /* 30h */
1094 U64 FabricWWPN; /* 38h */
1095 U32 DiscoveredPortsCount; /* 40h */
1096 U32 MaxInitiators; /* 44h */
1097 U8 MaxAliasesSupported; /* 48h */
1098 U8 MaxHardAliasesSupported; /* 49h */
1099 U8 NumCurrentAliases; /* 4Ah */
1100 U8 Reserved1; /* 4Bh */
1101} fCONFIG_PAGE_FC_PORT_0, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_0,
1102 FCPortPage0_t, MPI_POINTER pFCPortPage0_t;
1103
1104#define MPI_FCPORTPAGE0_PAGEVERSION (0x02)
1105
1106#define MPI_FCPORTPAGE0_FLAGS_PROT_MASK (0x0000000F)
1107#define MPI_FCPORTPAGE0_FLAGS_PROT_FCP_INIT (MPI_PORTFACTS_PROTOCOL_INITIATOR)
1108#define MPI_FCPORTPAGE0_FLAGS_PROT_FCP_TARG (MPI_PORTFACTS_PROTOCOL_TARGET)
1109#define MPI_FCPORTPAGE0_FLAGS_PROT_LAN (MPI_PORTFACTS_PROTOCOL_LAN)
1110#define MPI_FCPORTPAGE0_FLAGS_PROT_LOGBUSADDR (MPI_PORTFACTS_PROTOCOL_LOGBUSADDR)
1111
1112#define MPI_FCPORTPAGE0_FLAGS_ALIAS_ALPA_SUPPORTED (0x00000010)
1113#define MPI_FCPORTPAGE0_FLAGS_ALIAS_WWN_SUPPORTED (0x00000020)
1114#define MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID (0x00000040)
1115
1116#define MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK (0x00000F00)
1117#define MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT (0x00000000)
1118#define MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT (0x00000100)
1119#define MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP (0x00000200)
1120#define MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT (0x00000400)
1121#define MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP (0x00000800)
1122
1123#define MPI_FCPORTPAGE0_LTYPE_RESERVED (0x00)
1124#define MPI_FCPORTPAGE0_LTYPE_OTHER (0x01)
1125#define MPI_FCPORTPAGE0_LTYPE_UNKNOWN (0x02)
1126#define MPI_FCPORTPAGE0_LTYPE_COPPER (0x03)
1127#define MPI_FCPORTPAGE0_LTYPE_SINGLE_1300 (0x04)
1128#define MPI_FCPORTPAGE0_LTYPE_SINGLE_1500 (0x05)
1129#define MPI_FCPORTPAGE0_LTYPE_50_LASER_MULTI (0x06)
1130#define MPI_FCPORTPAGE0_LTYPE_50_LED_MULTI (0x07)
1131#define MPI_FCPORTPAGE0_LTYPE_62_LASER_MULTI (0x08)
1132#define MPI_FCPORTPAGE0_LTYPE_62_LED_MULTI (0x09)
1133#define MPI_FCPORTPAGE0_LTYPE_MULTI_LONG_WAVE (0x0A)
1134#define MPI_FCPORTPAGE0_LTYPE_MULTI_SHORT_WAVE (0x0B)
1135#define MPI_FCPORTPAGE0_LTYPE_LASER_SHORT_WAVE (0x0C)
1136#define MPI_FCPORTPAGE0_LTYPE_LED_SHORT_WAVE (0x0D)
1137#define MPI_FCPORTPAGE0_LTYPE_1300_LONG_WAVE (0x0E)
1138#define MPI_FCPORTPAGE0_LTYPE_1500_LONG_WAVE (0x0F)
1139
1140#define MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN (0x01) /*(SNIA)HBA_PORTSTATE_UNKNOWN 1 Unknown */
1141#define MPI_FCPORTPAGE0_PORTSTATE_ONLINE (0x02) /*(SNIA)HBA_PORTSTATE_ONLINE 2 Operational */
1142#define MPI_FCPORTPAGE0_PORTSTATE_OFFLINE (0x03) /*(SNIA)HBA_PORTSTATE_OFFLINE 3 User Offline */
1143#define MPI_FCPORTPAGE0_PORTSTATE_BYPASSED (0x04) /*(SNIA)HBA_PORTSTATE_BYPASSED 4 Bypassed */
1144#define MPI_FCPORTPAGE0_PORTSTATE_DIAGNOST (0x05) /*(SNIA)HBA_PORTSTATE_DIAGNOSTICS 5 In diagnostics mode */
1145#define MPI_FCPORTPAGE0_PORTSTATE_LINKDOWN (0x06) /*(SNIA)HBA_PORTSTATE_LINKDOWN 6 Link Down */
1146#define MPI_FCPORTPAGE0_PORTSTATE_ERROR (0x07) /*(SNIA)HBA_PORTSTATE_ERROR 7 Port Error */
1147#define MPI_FCPORTPAGE0_PORTSTATE_LOOPBACK (0x08) /*(SNIA)HBA_PORTSTATE_LOOPBACK 8 Loopback */
1148
1149#define MPI_FCPORTPAGE0_SUPPORT_CLASS_1 (0x00000001)
1150#define MPI_FCPORTPAGE0_SUPPORT_CLASS_2 (0x00000002)
1151#define MPI_FCPORTPAGE0_SUPPORT_CLASS_3 (0x00000004)
1152
1153#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0 Unknown - transceiver incapable of reporting */
1154#define MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED (0x00000001) /* (SNIA)HBA_PORTSPEED_1GBIT 1 1 GBit/sec */
1155#define MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED (0x00000002) /* (SNIA)HBA_PORTSPEED_2GBIT 2 2 GBit/sec */
1156#define MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED (0x00000004) /* (SNIA)HBA_PORTSPEED_10GBIT 4 10 GBit/sec */
1157#define MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED (0x00000008) /* (SNIA)HBA_PORTSPEED_4GBIT 8 4 GBit/sec */
1158
1159#define MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN
1160#define MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED
1161#define MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED
1162#define MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED
1163#define MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED
1164#define MPI_FCPORTPAGE0_CURRENT_SPEED_NOT_NEGOTIATED (0x00008000) /* (SNIA)HBA_PORTSPEED_NOT_NEGOTIATED (1<<15) Speed not established */
1165
1166
1167
1168typedef struct _CONFIG_PAGE_FC_PORT_1
1169{
1170 fCONFIG_PAGE_HEADER Header; /* 00h */
1171 U32 Flags; /* 04h */
1172 U64 NoSEEPROMWWNN; /* 08h */
1173 U64 NoSEEPROMWWPN; /* 10h */
1174 U8 HardALPA; /* 18h */
1175 U8 LinkConfig; /* 19h */
1176 U8 TopologyConfig; /* 1Ah */
1177 U8 AltConnector; /* 1Bh */
1178 U8 NumRequestedAliases; /* 1Ch */
1179 U8 RR_TOV; /* 1Dh */
1180 U8 InitiatorDeviceTimeout; /* 1Eh */
1181 U8 InitiatorIoPendTimeout; /* 1Fh */
1182} fCONFIG_PAGE_FC_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_1,
1183 FCPortPage1_t, MPI_POINTER pFCPortPage1_t;
1184
1185#define MPI_FCPORTPAGE1_PAGEVERSION (0x06)
1186
1187#define MPI_FCPORTPAGE1_FLAGS_EXT_FCP_STATUS_EN (0x08000000)
1188#define MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY (0x04000000)
1189#define MPI_FCPORTPAGE1_FLAGS_FORCE_USE_NOSEEPROM_WWNS (0x02000000)
1190#define MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS (0x01000000)
1191#define MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID (0x00800000)
1192#define MPI_FCPORTPAGE1_FLAGS_PORT_OFFLINE (0x00400000)
1193#define MPI_FCPORTPAGE1_FLAGS_SOFT_ALPA_FALLBACK (0x00200000)
1194#define MPI_FCPORTPAGE1_FLAGS_MASK_RR_TOV_UNITS (0x00000070)
1195#define MPI_FCPORTPAGE1_FLAGS_SUPPRESS_PROT_REG (0x00000008)
1196#define MPI_FCPORTPAGE1_FLAGS_PLOGI_ON_LOGO (0x00000004)
1197#define MPI_FCPORTPAGE1_FLAGS_MAINTAIN_LOGINS (0x00000002)
1198#define MPI_FCPORTPAGE1_FLAGS_SORT_BY_DID (0x00000001)
1199#define MPI_FCPORTPAGE1_FLAGS_SORT_BY_WWN (0x00000000)
1200
1201#define MPI_FCPORTPAGE1_FLAGS_PROT_MASK (0xF0000000)
1202#define MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT (28)
1203#define MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT ((U32)MPI_PORTFACTS_PROTOCOL_INITIATOR << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
1204#define MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG ((U32)MPI_PORTFACTS_PROTOCOL_TARGET << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
1205#define MPI_FCPORTPAGE1_FLAGS_PROT_LAN ((U32)MPI_PORTFACTS_PROTOCOL_LAN << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
1206#define MPI_FCPORTPAGE1_FLAGS_PROT_LOGBUSADDR ((U32)MPI_PORTFACTS_PROTOCOL_LOGBUSADDR << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
1207
1208#define MPI_FCPORTPAGE1_FLAGS_NONE_RR_TOV_UNITS (0x00000000)
1209#define MPI_FCPORTPAGE1_FLAGS_THOUSANDTH_RR_TOV_UNITS (0x00000010)
1210#define MPI_FCPORTPAGE1_FLAGS_TENTH_RR_TOV_UNITS (0x00000030)
1211#define MPI_FCPORTPAGE1_FLAGS_TEN_RR_TOV_UNITS (0x00000050)
1212
1213#define MPI_FCPORTPAGE1_HARD_ALPA_NOT_USED (0xFF)
1214
1215#define MPI_FCPORTPAGE1_LCONFIG_SPEED_MASK (0x0F)
1216#define MPI_FCPORTPAGE1_LCONFIG_SPEED_1GIG (0x00)
1217#define MPI_FCPORTPAGE1_LCONFIG_SPEED_2GIG (0x01)
1218#define MPI_FCPORTPAGE1_LCONFIG_SPEED_4GIG (0x02)
1219#define MPI_FCPORTPAGE1_LCONFIG_SPEED_10GIG (0x03)
1220#define MPI_FCPORTPAGE1_LCONFIG_SPEED_AUTO (0x0F)
1221
1222#define MPI_FCPORTPAGE1_TOPOLOGY_MASK (0x0F)
1223#define MPI_FCPORTPAGE1_TOPOLOGY_NLPORT (0x01)
1224#define MPI_FCPORTPAGE1_TOPOLOGY_NPORT (0x02)
1225#define MPI_FCPORTPAGE1_TOPOLOGY_AUTO (0x0F)
1226
1227#define MPI_FCPORTPAGE1_ALT_CONN_UNKNOWN (0x00)
1228
1229#define MPI_FCPORTPAGE1_INITIATOR_DEV_TIMEOUT_MASK (0x7F)
1230
1231
1232typedef struct _CONFIG_PAGE_FC_PORT_2
1233{
1234 fCONFIG_PAGE_HEADER Header; /* 00h */
1235 U8 NumberActive; /* 04h */
1236 U8 ALPA[127]; /* 05h */
1237} fCONFIG_PAGE_FC_PORT_2, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_2,
1238 FCPortPage2_t, MPI_POINTER pFCPortPage2_t;
1239
1240#define MPI_FCPORTPAGE2_PAGEVERSION (0x01)
1241
1242
1243typedef struct _WWN_FORMAT
1244{
1245 U64 WWNN; /* 00h */
1246 U64 WWPN; /* 08h */
1247} WWN_FORMAT, MPI_POINTER PTR_WWN_FORMAT,
1248 WWNFormat, MPI_POINTER pWWNFormat;
1249
1250typedef union _FC_PORT_PERSISTENT_PHYSICAL_ID
1251{
1252 WWN_FORMAT WWN;
1253 U32 Did;
1254} FC_PORT_PERSISTENT_PHYSICAL_ID, MPI_POINTER PTR_FC_PORT_PERSISTENT_PHYSICAL_ID,
1255 PersistentPhysicalId_t, MPI_POINTER pPersistentPhysicalId_t;
1256
1257typedef struct _FC_PORT_PERSISTENT
1258{
1259 FC_PORT_PERSISTENT_PHYSICAL_ID PhysicalIdentifier; /* 00h */
1260 U8 TargetID; /* 10h */
1261 U8 Bus; /* 11h */
1262 U16 Flags; /* 12h */
1263} FC_PORT_PERSISTENT, MPI_POINTER PTR_FC_PORT_PERSISTENT,
1264 PersistentData_t, MPI_POINTER pPersistentData_t;
1265
1266#define MPI_PERSISTENT_FLAGS_SHIFT (16)
1267#define MPI_PERSISTENT_FLAGS_ENTRY_VALID (0x0001)
1268#define MPI_PERSISTENT_FLAGS_SCAN_ID (0x0002)
1269#define MPI_PERSISTENT_FLAGS_SCAN_LUNS (0x0004)
1270#define MPI_PERSISTENT_FLAGS_BOOT_DEVICE (0x0008)
1271#define MPI_PERSISTENT_FLAGS_BY_DID (0x0080)
1272
1273/*
1274 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1275 * one and check Header.PageLength at runtime.
1276 */
1277#ifndef MPI_FC_PORT_PAGE_3_ENTRY_MAX
1278#define MPI_FC_PORT_PAGE_3_ENTRY_MAX (1)
1279#endif
1280
1281typedef struct _CONFIG_PAGE_FC_PORT_3
1282{
1283 fCONFIG_PAGE_HEADER Header; /* 00h */
1284 FC_PORT_PERSISTENT Entry[MPI_FC_PORT_PAGE_3_ENTRY_MAX]; /* 04h */
1285} fCONFIG_PAGE_FC_PORT_3, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_3,
1286 FCPortPage3_t, MPI_POINTER pFCPortPage3_t;
1287
1288#define MPI_FCPORTPAGE3_PAGEVERSION (0x01)
1289
1290
1291typedef struct _CONFIG_PAGE_FC_PORT_4
1292{
1293 fCONFIG_PAGE_HEADER Header; /* 00h */
1294 U32 PortFlags; /* 04h */
1295 U32 PortSettings; /* 08h */
1296} fCONFIG_PAGE_FC_PORT_4, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_4,
1297 FCPortPage4_t, MPI_POINTER pFCPortPage4_t;
1298
1299#define MPI_FCPORTPAGE4_PAGEVERSION (0x00)
1300
1301#define MPI_FCPORTPAGE4_PORT_FLAGS_ALTERNATE_CHS (0x00000008)
1302
1303#define MPI_FCPORTPAGE4_PORT_MASK_INIT_HBA (0x00000030)
1304#define MPI_FCPORTPAGE4_PORT_DISABLE_INIT_HBA (0x00000000)
1305#define MPI_FCPORTPAGE4_PORT_BIOS_INIT_HBA (0x00000010)
1306#define MPI_FCPORTPAGE4_PORT_OS_INIT_HBA (0x00000020)
1307#define MPI_FCPORTPAGE4_PORT_BIOS_OS_INIT_HBA (0x00000030)
1308#define MPI_FCPORTPAGE4_PORT_REMOVABLE_MEDIA (0x000000C0)
1309#define MPI_FCPORTPAGE4_PORT_SPINUP_DELAY_MASK (0x00000F00)
1310
1311
1312typedef struct _CONFIG_PAGE_FC_PORT_5_ALIAS_INFO
1313{
1314 U8 Flags; /* 00h */
1315 U8 AliasAlpa; /* 01h */
1316 U16 Reserved; /* 02h */
1317 U64 AliasWWNN; /* 04h */
1318 U64 AliasWWPN; /* 0Ch */
1319} fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO,
1320 MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5_ALIAS_INFO,
1321 FcPortPage5AliasInfo_t, MPI_POINTER pFcPortPage5AliasInfo_t;
1322
1323typedef struct _CONFIG_PAGE_FC_PORT_5
1324{
1325 fCONFIG_PAGE_HEADER Header; /* 00h */
1326 fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO AliasInfo; /* 04h */
1327} fCONFIG_PAGE_FC_PORT_5, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5,
1328 FCPortPage5_t, MPI_POINTER pFCPortPage5_t;
1329
1330#define MPI_FCPORTPAGE5_PAGEVERSION (0x02)
1331
1332#define MPI_FCPORTPAGE5_FLAGS_ALPA_ACQUIRED (0x01)
1333#define MPI_FCPORTPAGE5_FLAGS_HARD_ALPA (0x02)
1334#define MPI_FCPORTPAGE5_FLAGS_HARD_WWNN (0x04)
1335#define MPI_FCPORTPAGE5_FLAGS_HARD_WWPN (0x08)
1336#define MPI_FCPORTPAGE5_FLAGS_DISABLE (0x10)
1337
1338typedef struct _CONFIG_PAGE_FC_PORT_6
1339{
1340 fCONFIG_PAGE_HEADER Header; /* 00h */
1341 U32 Reserved; /* 04h */
1342 U64 TimeSinceReset; /* 08h */
1343 U64 TxFrames; /* 10h */
1344 U64 RxFrames; /* 18h */
1345 U64 TxWords; /* 20h */
1346 U64 RxWords; /* 28h */
1347 U64 LipCount; /* 30h */
1348 U64 NosCount; /* 38h */
1349 U64 ErrorFrames; /* 40h */
1350 U64 DumpedFrames; /* 48h */
1351 U64 LinkFailureCount; /* 50h */
1352 U64 LossOfSyncCount; /* 58h */
1353 U64 LossOfSignalCount; /* 60h */
1354 U64 PrimativeSeqErrCount; /* 68h */
1355 U64 InvalidTxWordCount; /* 70h */
1356 U64 InvalidCrcCount; /* 78h */
1357 U64 FcpInitiatorIoCount; /* 80h */
1358} fCONFIG_PAGE_FC_PORT_6, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_6,
1359 FCPortPage6_t, MPI_POINTER pFCPortPage6_t;
1360
1361#define MPI_FCPORTPAGE6_PAGEVERSION (0x00)
1362
1363
1364typedef struct _CONFIG_PAGE_FC_PORT_7
1365{
1366 fCONFIG_PAGE_HEADER Header; /* 00h */
1367 U32 Reserved; /* 04h */
1368 U8 PortSymbolicName[256]; /* 08h */
1369} fCONFIG_PAGE_FC_PORT_7, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_7,
1370 FCPortPage7_t, MPI_POINTER pFCPortPage7_t;
1371
1372#define MPI_FCPORTPAGE7_PAGEVERSION (0x00)
1373
1374
1375typedef struct _CONFIG_PAGE_FC_PORT_8
1376{
1377 fCONFIG_PAGE_HEADER Header; /* 00h */
1378 U32 BitVector[8]; /* 04h */
1379} fCONFIG_PAGE_FC_PORT_8, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_8,
1380 FCPortPage8_t, MPI_POINTER pFCPortPage8_t;
1381
1382#define MPI_FCPORTPAGE8_PAGEVERSION (0x00)
1383
1384
1385typedef struct _CONFIG_PAGE_FC_PORT_9
1386{
1387 fCONFIG_PAGE_HEADER Header; /* 00h */
1388 U32 Reserved; /* 04h */
1389 U64 GlobalWWPN; /* 08h */
1390 U64 GlobalWWNN; /* 10h */
1391 U32 UnitType; /* 18h */
1392 U32 PhysicalPortNumber; /* 1Ch */
1393 U32 NumAttachedNodes; /* 20h */
1394 U16 IPVersion; /* 24h */
1395 U16 UDPPortNumber; /* 26h */
1396 U8 IPAddress[16]; /* 28h */
1397 U16 Reserved1; /* 38h */
1398 U16 TopologyDiscoveryFlags; /* 3Ah */
1399} fCONFIG_PAGE_FC_PORT_9, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_9,
1400 FCPortPage9_t, MPI_POINTER pFCPortPage9_t;
1401
1402#define MPI_FCPORTPAGE9_PAGEVERSION (0x00)
1403
1404
1405typedef struct _CONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA
1406{
1407 U8 Id; /* 10h */
1408 U8 ExtId; /* 11h */
1409 U8 Connector; /* 12h */
1410 U8 Transceiver[8]; /* 13h */
1411 U8 Encoding; /* 1Bh */
1412 U8 BitRate_100mbs; /* 1Ch */
1413 U8 Reserved1; /* 1Dh */
1414 U8 Length9u_km; /* 1Eh */
1415 U8 Length9u_100m; /* 1Fh */
1416 U8 Length50u_10m; /* 20h */
1417 U8 Length62p5u_10m; /* 21h */
1418 U8 LengthCopper_m; /* 22h */
1419 U8 Reseverved2; /* 22h */
1420 U8 VendorName[16]; /* 24h */
1421 U8 Reserved3; /* 34h */
1422 U8 VendorOUI[3]; /* 35h */
1423 U8 VendorPN[16]; /* 38h */
1424 U8 VendorRev[4]; /* 48h */
1425 U16 Reserved4; /* 4Ch */
1426 U8 Reserved5; /* 4Eh */
1427 U8 CC_BASE; /* 4Fh */
1428} fCONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA,
1429 MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA,
1430 FCPortPage10BaseSfpData_t, MPI_POINTER pFCPortPage10BaseSfpData_t;
1431
1432#define MPI_FCPORT10_BASE_ID_UNKNOWN (0x00)
1433#define MPI_FCPORT10_BASE_ID_GBIC (0x01)
1434#define MPI_FCPORT10_BASE_ID_FIXED (0x02)
1435#define MPI_FCPORT10_BASE_ID_SFP (0x03)
1436#define MPI_FCPORT10_BASE_ID_SFP_MIN (0x04)
1437#define MPI_FCPORT10_BASE_ID_SFP_MAX (0x7F)
1438#define MPI_FCPORT10_BASE_ID_VEND_SPEC_MASK (0x80)
1439
1440#define MPI_FCPORT10_BASE_EXTID_UNKNOWN (0x00)
1441#define MPI_FCPORT10_BASE_EXTID_MODDEF1 (0x01)
1442#define MPI_FCPORT10_BASE_EXTID_MODDEF2 (0x02)
1443#define MPI_FCPORT10_BASE_EXTID_MODDEF3 (0x03)
1444#define MPI_FCPORT10_BASE_EXTID_SEEPROM (0x04)
1445#define MPI_FCPORT10_BASE_EXTID_MODDEF5 (0x05)
1446#define MPI_FCPORT10_BASE_EXTID_MODDEF6 (0x06)
1447#define MPI_FCPORT10_BASE_EXTID_MODDEF7 (0x07)
1448#define MPI_FCPORT10_BASE_EXTID_VNDSPC_MASK (0x80)
1449
1450#define MPI_FCPORT10_BASE_CONN_UNKNOWN (0x00)
1451#define MPI_FCPORT10_BASE_CONN_SC (0x01)
1452#define MPI_FCPORT10_BASE_CONN_COPPER1 (0x02)
1453#define MPI_FCPORT10_BASE_CONN_COPPER2 (0x03)
1454#define MPI_FCPORT10_BASE_CONN_BNC_TNC (0x04)
1455#define MPI_FCPORT10_BASE_CONN_COAXIAL (0x05)
1456#define MPI_FCPORT10_BASE_CONN_FIBERJACK (0x06)
1457#define MPI_FCPORT10_BASE_CONN_LC (0x07)
1458#define MPI_FCPORT10_BASE_CONN_MT_RJ (0x08)
1459#define MPI_FCPORT10_BASE_CONN_MU (0x09)
1460#define MPI_FCPORT10_BASE_CONN_SG (0x0A)
1461#define MPI_FCPORT10_BASE_CONN_OPT_PIGT (0x0B)
1462#define MPI_FCPORT10_BASE_CONN_RSV1_MIN (0x0C)
1463#define MPI_FCPORT10_BASE_CONN_RSV1_MAX (0x1F)
1464#define MPI_FCPORT10_BASE_CONN_HSSDC_II (0x20)
1465#define MPI_FCPORT10_BASE_CONN_CPR_PIGT (0x21)
1466#define MPI_FCPORT10_BASE_CONN_RSV2_MIN (0x22)
1467#define MPI_FCPORT10_BASE_CONN_RSV2_MAX (0x7F)
1468#define MPI_FCPORT10_BASE_CONN_VNDSPC_MASK (0x80)
1469
1470#define MPI_FCPORT10_BASE_ENCODE_UNSPEC (0x00)
1471#define MPI_FCPORT10_BASE_ENCODE_8B10B (0x01)
1472#define MPI_FCPORT10_BASE_ENCODE_4B5B (0x02)
1473#define MPI_FCPORT10_BASE_ENCODE_NRZ (0x03)
1474#define MPI_FCPORT10_BASE_ENCODE_MANCHESTER (0x04)
1475
1476
1477typedef struct _CONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA
1478{
1479 U8 Options[2]; /* 50h */
1480 U8 BitRateMax; /* 52h */
1481 U8 BitRateMin; /* 53h */
1482 U8 VendorSN[16]; /* 54h */
1483 U8 DateCode[8]; /* 64h */
1484 U8 Reserved5[3]; /* 6Ch */
1485 U8 CC_EXT; /* 6Fh */
1486} fCONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA,
1487 MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA,
1488 FCPortPage10ExtendedSfpData_t, MPI_POINTER pFCPortPage10ExtendedSfpData_t;
1489
1490#define MPI_FCPORT10_EXT_OPTION1_RATESEL (0x20)
1491#define MPI_FCPORT10_EXT_OPTION1_TX_DISABLE (0x10)
1492#define MPI_FCPORT10_EXT_OPTION1_TX_FAULT (0x08)
1493#define MPI_FCPORT10_EXT_OPTION1_LOS_INVERT (0x04)
1494#define MPI_FCPORT10_EXT_OPTION1_LOS (0x02)
1495
1496
1497typedef struct _CONFIG_PAGE_FC_PORT_10
1498{
1499 fCONFIG_PAGE_HEADER Header; /* 00h */
1500 U8 Flags; /* 04h */
1501 U8 Reserved1; /* 05h */
1502 U16 Reserved2; /* 06h */
1503 U32 HwConfig1; /* 08h */
1504 U32 HwConfig2; /* 0Ch */
1505 fCONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA Base; /* 10h */
1506 fCONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA Extended; /* 50h */
1507 U8 VendorSpecific[32]; /* 70h */
1508} fCONFIG_PAGE_FC_PORT_10, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10,
1509 FCPortPage10_t, MPI_POINTER pFCPortPage10_t;
1510
1511#define MPI_FCPORTPAGE10_PAGEVERSION (0x00)
1512
1513/* standard MODDEF pin definitions (from GBIC spec.) */
1514#define MPI_FCPORTPAGE10_FLAGS_MODDEF_MASK (0x00000007)
1515#define MPI_FCPORTPAGE10_FLAGS_MODDEF2 (0x00000001)
1516#define MPI_FCPORTPAGE10_FLAGS_MODDEF1 (0x00000002)
1517#define MPI_FCPORTPAGE10_FLAGS_MODDEF0 (0x00000004)
1518#define MPI_FCPORTPAGE10_FLAGS_MODDEF_NOGBIC (0x00000007)
1519#define MPI_FCPORTPAGE10_FLAGS_MODDEF_CPR_IEEE_CX (0x00000006)
1520#define MPI_FCPORTPAGE10_FLAGS_MODDEF_COPPER (0x00000005)
1521#define MPI_FCPORTPAGE10_FLAGS_MODDEF_OPTICAL_LW (0x00000004)
1522#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SEEPROM (0x00000003)
1523#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SW_OPTICAL (0x00000002)
1524#define MPI_FCPORTPAGE10_FLAGS_MODDEF_LX_IEEE_OPT_LW (0x00000001)
1525#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SX_IEEE_OPT_SW (0x00000000)
1526
1527#define MPI_FCPORTPAGE10_FLAGS_CC_BASE_OK (0x00000010)
1528#define MPI_FCPORTPAGE10_FLAGS_CC_EXT_OK (0x00000020)
1529
1530
1531/****************************************************************************
1532* FC Device Config Pages
1533****************************************************************************/
1534
1535typedef struct _CONFIG_PAGE_FC_DEVICE_0
1536{
1537 fCONFIG_PAGE_HEADER Header; /* 00h */
1538 U64 WWNN; /* 04h */
1539 U64 WWPN; /* 0Ch */
1540 U32 PortIdentifier; /* 14h */
1541 U8 Protocol; /* 18h */
1542 U8 Flags; /* 19h */
1543 U16 BBCredit; /* 1Ah */
1544 U16 MaxRxFrameSize; /* 1Ch */
1545 U8 ADISCHardALPA; /* 1Eh */
1546 U8 PortNumber; /* 1Fh */
1547 U8 FcPhLowestVersion; /* 20h */
1548 U8 FcPhHighestVersion; /* 21h */
1549 U8 CurrentTargetID; /* 22h */
1550 U8 CurrentBus; /* 23h */
1551} fCONFIG_PAGE_FC_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_FC_DEVICE_0,
1552 FCDevicePage0_t, MPI_POINTER pFCDevicePage0_t;
1553
1554#define MPI_FC_DEVICE_PAGE0_PAGEVERSION (0x03)
1555
1556#define MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID (0x01)
1557#define MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID (0x02)
1558#define MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID (0x04)
1559
1560#define MPI_FC_DEVICE_PAGE0_PROT_IP (0x01)
1561#define MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET (0x02)
1562#define MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR (0x04)
1563#define MPI_FC_DEVICE_PAGE0_PROT_FCP_RETRY (0x08)
1564
1565#define MPI_FC_DEVICE_PAGE0_PGAD_PORT_MASK (MPI_FC_DEVICE_PGAD_PORT_MASK)
1566#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_MASK (MPI_FC_DEVICE_PGAD_FORM_MASK)
1567#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_NEXT_DID (MPI_FC_DEVICE_PGAD_FORM_NEXT_DID)
1568#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_BUS_TID (MPI_FC_DEVICE_PGAD_FORM_BUS_TID)
1569#define MPI_FC_DEVICE_PAGE0_PGAD_DID_MASK (MPI_FC_DEVICE_PGAD_ND_DID_MASK)
1570#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_MASK (MPI_FC_DEVICE_PGAD_BT_BUS_MASK)
1571#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_SHIFT (MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT)
1572#define MPI_FC_DEVICE_PAGE0_PGAD_TID_MASK (MPI_FC_DEVICE_PGAD_BT_TID_MASK)
1573
1574#define MPI_FC_DEVICE_PAGE0_HARD_ALPA_UNKNOWN (0xFF)
1575
1576/****************************************************************************
1577* RAID Volume Config Pages
1578****************************************************************************/
1579
1580typedef struct _RAID_VOL0_PHYS_DISK
1581{
1582 U16 Reserved; /* 00h */
1583 U8 PhysDiskMap; /* 02h */
1584 U8 PhysDiskNum; /* 03h */
1585} RAID_VOL0_PHYS_DISK, MPI_POINTER PTR_RAID_VOL0_PHYS_DISK,
1586 RaidVol0PhysDisk_t, MPI_POINTER pRaidVol0PhysDisk_t;
1587
1588#define MPI_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
1589#define MPI_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
1590
1591typedef struct _RAID_VOL0_STATUS
1592{
1593 U8 Flags; /* 00h */
1594 U8 State; /* 01h */
1595 U16 Reserved; /* 02h */
1596} RAID_VOL0_STATUS, MPI_POINTER PTR_RAID_VOL0_STATUS,
1597 RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t;
1598
1599/* RAID Volume Page 0 VolumeStatus defines */
1600
1601#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01)
1602#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02)
1603#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04)
1604#define MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x08)
1605
1606#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00)
1607#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01)
1608#define MPI_RAIDVOL0_STATUS_STATE_FAILED (0x02)
1609
1610typedef struct _RAID_VOL0_SETTINGS
1611{
1612 U16 Settings; /* 00h */
1613 U8 HotSparePool; /* 01h */ /* MPI_RAID_HOT_SPARE_POOL_ */
1614 U8 Reserved; /* 02h */
1615} RAID_VOL0_SETTINGS, MPI_POINTER PTR_RAID_VOL0_SETTINGS,
1616 RaidVol0Settings, MPI_POINTER pRaidVol0Settings;
1617
1618/* RAID Volume Page 0 VolumeSettings defines */
1619
1620#define MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE (0x0001)
1621#define MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART (0x0002)
1622#define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE (0x0004)
1623#define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC (0x0008)
1624#define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0010)
1625#define MPI_RAIDVOL0_SETTING_USE_DEFAULTS (0x8000)
1626
1627/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
1628#define MPI_RAID_HOT_SPARE_POOL_0 (0x01)
1629#define MPI_RAID_HOT_SPARE_POOL_1 (0x02)
1630#define MPI_RAID_HOT_SPARE_POOL_2 (0x04)
1631#define MPI_RAID_HOT_SPARE_POOL_3 (0x08)
1632#define MPI_RAID_HOT_SPARE_POOL_4 (0x10)
1633#define MPI_RAID_HOT_SPARE_POOL_5 (0x20)
1634#define MPI_RAID_HOT_SPARE_POOL_6 (0x40)
1635#define MPI_RAID_HOT_SPARE_POOL_7 (0x80)
1636
1637/*
1638 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1639 * one and check Header.PageLength at runtime.
1640 */
1641#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX
1642#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
1643#endif
1644
1645typedef struct _CONFIG_PAGE_RAID_VOL_0
1646{
1647 fCONFIG_PAGE_HEADER Header; /* 00h */
1648 U8 VolumeID; /* 04h */
1649 U8 VolumeBus; /* 05h */
1650 U8 VolumeIOC; /* 06h */
1651 U8 VolumeType; /* 07h */ /* MPI_RAID_VOL_TYPE_ */
1652 RAID_VOL0_STATUS VolumeStatus; /* 08h */
1653 RAID_VOL0_SETTINGS VolumeSettings; /* 0Ch */
1654 U32 MaxLBA; /* 10h */
1655 U32 Reserved1; /* 14h */
1656 U32 StripeSize; /* 18h */
1657 U32 Reserved2; /* 1Ch */
1658 U32 Reserved3; /* 20h */
1659 U8 NumPhysDisks; /* 24h */
1660 U8 Reserved4; /* 25h */
1661 U16 Reserved5; /* 26h */
1662 RAID_VOL0_PHYS_DISK PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */
1663} fCONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
1664 RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
1665
1666#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x01)
1667
1668
1669/****************************************************************************
1670* RAID Physical Disk Config Pages
1671****************************************************************************/
1672
1673typedef struct _RAID_PHYS_DISK0_ERROR_DATA
1674{
1675 U8 ErrorCdbByte; /* 00h */
1676 U8 ErrorSenseKey; /* 01h */
1677 U16 Reserved; /* 02h */
1678 U16 ErrorCount; /* 04h */
1679 U8 ErrorASC; /* 06h */
1680 U8 ErrorASCQ; /* 07h */
1681 U16 SmartCount; /* 08h */
1682 U8 SmartASC; /* 0Ah */
1683 U8 SmartASCQ; /* 0Bh */
1684} RAID_PHYS_DISK0_ERROR_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_ERROR_DATA,
1685 RaidPhysDisk0ErrorData_t, MPI_POINTER pRaidPhysDisk0ErrorData_t;
1686
1687typedef struct _RAID_PHYS_DISK_INQUIRY_DATA
1688{
1689 U8 VendorID[8]; /* 00h */
1690 U8 ProductID[16]; /* 08h */
1691 U8 ProductRevLevel[4]; /* 18h */
1692 U8 Info[32]; /* 1Ch */
1693} RAID_PHYS_DISK0_INQUIRY_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_INQUIRY_DATA,
1694 RaidPhysDisk0InquiryData, MPI_POINTER pRaidPhysDisk0InquiryData;
1695
1696typedef struct _RAID_PHYS_DISK0_SETTINGS
1697{
1698 U8 SepID; /* 00h */
1699 U8 SepBus; /* 01h */
1700 U8 HotSparePool; /* 02h */ /* MPI_RAID_HOT_SPARE_POOL_ */
1701 U8 PhysDiskSettings; /* 03h */
1702} RAID_PHYS_DISK0_SETTINGS, MPI_POINTER PTR_RAID_PHYS_DISK0_SETTINGS,
1703 RaidPhysDiskSettings_t, MPI_POINTER pRaidPhysDiskSettings_t;
1704
1705typedef struct _RAID_PHYS_DISK0_STATUS
1706{
1707 U8 Flags; /* 00h */
1708 U8 State; /* 01h */
1709 U16 Reserved; /* 02h */
1710} RAID_PHYS_DISK0_STATUS, MPI_POINTER PTR_RAID_PHYS_DISK0_STATUS,
1711 RaidPhysDiskStatus_t, MPI_POINTER pRaidPhysDiskStatus_t;
1712
1713/* RAID Volume 2 IM Physical Disk DiskStatus flags */
1714
1715#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01)
1716#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02)
1717
1718#define MPI_PHYSDISK0_STATUS_ONLINE (0x00)
1719#define MPI_PHYSDISK0_STATUS_MISSING (0x01)
1720#define MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE (0x02)
1721#define MPI_PHYSDISK0_STATUS_FAILED (0x03)
1722#define MPI_PHYSDISK0_STATUS_INITIALIZING (0x04)
1723#define MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED (0x05)
1724#define MPI_PHYSDISK0_STATUS_FAILED_REQUESTED (0x06)
1725#define MPI_PHYSDISK0_STATUS_OTHER_OFFLINE (0xFF)
1726
1727typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_0
1728{
1729 fCONFIG_PAGE_HEADER Header; /* 00h */
1730 U8 PhysDiskID; /* 04h */
1731 U8 PhysDiskBus; /* 05h */
1732 U8 PhysDiskIOC; /* 06h */
1733 U8 PhysDiskNum; /* 07h */
1734 RAID_PHYS_DISK0_SETTINGS PhysDiskSettings; /* 08h */
1735 U32 Reserved1; /* 0Ch */
1736 U32 Reserved2; /* 10h */
1737 U32 Reserved3; /* 14h */
1738 U8 DiskIdentifier[16]; /* 18h */
1739 RAID_PHYS_DISK0_INQUIRY_DATA InquiryData; /* 28h */
1740 RAID_PHYS_DISK0_STATUS PhysDiskStatus; /* 64h */
1741 U32 MaxLBA; /* 68h */
1742 RAID_PHYS_DISK0_ERROR_DATA ErrorData; /* 6Ch */
1743} fCONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0,
1744 RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t;
1745
1746#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x00)
1747
1748
1749/****************************************************************************
1750* LAN Config Pages
1751****************************************************************************/
1752
1753typedef struct _CONFIG_PAGE_LAN_0
1754{
1755 ConfigPageHeader_t Header; /* 00h */
1756 U16 TxRxModes; /* 04h */
1757 U16 Reserved; /* 06h */
1758 U32 PacketPrePad; /* 08h */
1759} fCONFIG_PAGE_LAN_0, MPI_POINTER PTR_CONFIG_PAGE_LAN_0,
1760 LANPage0_t, MPI_POINTER pLANPage0_t;
1761
1762#define MPI_LAN_PAGE0_PAGEVERSION (0x01)
1763
1764#define MPI_LAN_PAGE0_RETURN_LOOPBACK (0x0000)
1765#define MPI_LAN_PAGE0_SUPPRESS_LOOPBACK (0x0001)
1766#define MPI_LAN_PAGE0_LOOPBACK_MASK (0x0001)
1767
1768typedef struct _CONFIG_PAGE_LAN_1
1769{
1770 ConfigPageHeader_t Header; /* 00h */
1771 U16 Reserved; /* 04h */
1772 U8 CurrentDeviceState; /* 06h */
1773 U8 Reserved1; /* 07h */
1774 U32 MinPacketSize; /* 08h */
1775 U32 MaxPacketSize; /* 0Ch */
1776 U32 HardwareAddressLow; /* 10h */
1777 U32 HardwareAddressHigh; /* 14h */
1778 U32 MaxWireSpeedLow; /* 18h */
1779 U32 MaxWireSpeedHigh; /* 1Ch */
1780 U32 BucketsRemaining; /* 20h */
1781 U32 MaxReplySize; /* 24h */
1782 U32 NegWireSpeedLow; /* 28h */
1783 U32 NegWireSpeedHigh; /* 2Ch */
1784} fCONFIG_PAGE_LAN_1, MPI_POINTER PTR_CONFIG_PAGE_LAN_1,
1785 LANPage1_t, MPI_POINTER pLANPage1_t;
1786
1787#define MPI_LAN_PAGE1_PAGEVERSION (0x03)
1788
1789#define MPI_LAN_PAGE1_DEV_STATE_RESET (0x00)
1790#define MPI_LAN_PAGE1_DEV_STATE_OPERATIONAL (0x01)
1791
1792
1793/****************************************************************************
1794* Inband Config Pages
1795****************************************************************************/
1796
1797typedef struct _CONFIG_PAGE_INBAND_0
1798{
1799 fCONFIG_PAGE_HEADER Header; /* 00h */
1800 MPI_VERSION_FORMAT InbandVersion; /* 04h */
1801 U16 MaximumBuffers; /* 08h */
1802 U16 Reserved1; /* 0Ah */
1803} fCONFIG_PAGE_INBAND_0, MPI_POINTER PTR_CONFIG_PAGE_INBAND_0,
1804 InbandPage0_t, MPI_POINTER pInbandPage0_t;
1805
1806#define MPI_INBAND_PAGEVERSION (0x00)
1807
1808
1809
1810/****************************************************************************
1811* SAS IO Unit Config Pages
1812****************************************************************************/
1813
1814typedef struct _MPI_SAS_IO_UNIT0_PHY_DATA
1815{
1816 U8 Port; /* 00h */
1817 U8 PortFlags; /* 01h */
1818 U8 PhyFlags; /* 02h */
1819 U8 NegotiatedLinkRate; /* 03h */
1820 U32 ControllerPhyDeviceInfo;/* 04h */
1821 U16 AttachedDeviceHandle; /* 08h */
1822 U16 ControllerDevHandle; /* 0Ah */
1823 U32 Reserved2; /* 0Ch */
1824} MPI_SAS_IO_UNIT0_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT0_PHY_DATA,
1825 SasIOUnit0PhyData, MPI_POINTER pSasIOUnit0PhyData;
1826
1827/*
1828 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1829 * one and check Header.PageLength at runtime.
1830 */
1831#ifndef MPI_SAS_IOUNIT0_PHY_MAX
1832#define MPI_SAS_IOUNIT0_PHY_MAX (1)
1833#endif
1834
1835typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
1836{
1837 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
1838 U32 Reserved1; /* 08h */
1839 U8 NumPhys; /* 0Ch */
1840 U8 Reserved2; /* 0Dh */
1841 U16 Reserved3; /* 0Eh */
1842 MPI_SAS_IO_UNIT0_PHY_DATA PhyData[MPI_SAS_IOUNIT0_PHY_MAX]; /* 10h */
1843} fCONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
1844 SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
1845
1846#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x00)
1847
1848/* values for SAS IO Unit Page 0 PortFlags */
1849#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08)
1850#define MPI_SAS_IOUNIT0_PORT_FLAGS_0_TARGET_IOC_NUM (0x00)
1851#define MPI_SAS_IOUNIT0_PORT_FLAGS_1_TARGET_IOC_NUM (0x04)
1852#define MPI_SAS_IOUNIT0_PORT_FLAGS_WAIT_FOR_PORTENABLE (0x02)
1853#define MPI_SAS_IOUNIT0_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
1854
1855/* values for SAS IO Unit Page 0 PhyFlags */
1856#define MPI_SAS_IOUNIT0_PHY_FLAGS_PHY_DISABLED (0x04)
1857#define MPI_SAS_IOUNIT0_PHY_FLAGS_TX_INVERT (0x02)
1858#define MPI_SAS_IOUNIT0_PHY_FLAGS_RX_INVERT (0x01)
1859
1860/* values for SAS IO Unit Page 0 NegotiatedLinkRate */
1861#define MPI_SAS_IOUNIT0_RATE_UNKNOWN (0x00)
1862#define MPI_SAS_IOUNIT0_RATE_PHY_DISABLED (0x01)
1863#define MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION (0x02)
1864#define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03)
1865#define MPI_SAS_IOUNIT0_RATE_1_5 (0x08)
1866#define MPI_SAS_IOUNIT0_RATE_3_0 (0x09)
1867
1868/* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
1869
1870
1871typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
1872{
1873 U8 Port; /* 00h */
1874 U8 PortFlags; /* 01h */
1875 U8 PhyFlags; /* 02h */
1876 U8 MaxMinLinkRate; /* 03h */
1877 U32 ControllerPhyDeviceInfo;/* 04h */
1878 U32 Reserved1; /* 08h */
1879} MPI_SAS_IO_UNIT1_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT1_PHY_DATA,
1880 SasIOUnit1PhyData, MPI_POINTER pSasIOUnit1PhyData;
1881
1882/*
1883 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1884 * one and check Header.PageLength at runtime.
1885 */
1886#ifndef MPI_SAS_IOUNIT1_PHY_MAX
1887#define MPI_SAS_IOUNIT1_PHY_MAX (1)
1888#endif
1889
1890typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
1891{
1892 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
1893 U32 Reserved1; /* 08h */
1894 U8 NumPhys; /* 0Ch */
1895 U8 Reserved2; /* 0Dh */
1896 U16 Reserved3; /* 0Eh */
1897 MPI_SAS_IO_UNIT1_PHY_DATA PhyData[MPI_SAS_IOUNIT1_PHY_MAX]; /* 10h */
1898} fCONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1,
1899 SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t;
1900
1901#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x00)
1902
1903/* values for SAS IO Unit Page 0 PortFlags */
1904#define MPI_SAS_IOUNIT1_PORT_FLAGS_0_TARGET_IOC_NUM (0x00)
1905#define MPI_SAS_IOUNIT1_PORT_FLAGS_1_TARGET_IOC_NUM (0x04)
1906#define MPI_SAS_IOUNIT1_PORT_FLAGS_WAIT_FOR_PORTENABLE (0x02)
1907#define MPI_SAS_IOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
1908
1909/* values for SAS IO Unit Page 0 PhyFlags */
1910#define MPI_SAS_IOUNIT1_PHY_FLAGS_PHY_DISABLE (0x04)
1911#define MPI_SAS_IOUNIT1_PHY_FLAGS_TX_INVERT (0x02)
1912#define MPI_SAS_IOUNIT1_PHY_FLAGS_RX_INVERT (0x01)
1913
1914/* values for SAS IO Unit Page 0 MaxMinLinkRate */
1915#define MPI_SAS_IOUNIT1_MAX_RATE_MASK (0xF0)
1916#define MPI_SAS_IOUNIT1_MAX_RATE_1_5 (0x80)
1917#define MPI_SAS_IOUNIT1_MAX_RATE_3_0 (0x90)
1918#define MPI_SAS_IOUNIT1_MIN_RATE_MASK (0x0F)
1919#define MPI_SAS_IOUNIT1_MIN_RATE_1_5 (0x08)
1920#define MPI_SAS_IOUNIT1_MIN_RATE_3_0 (0x09)
1921
1922/* see mpi_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
1923
1924
1925typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
1926{
1927 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
1928 U32 Reserved1; /* 08h */
1929 U16 MaxPersistentIDs; /* 0Ch */
1930 U16 NumPersistentIDsUsed; /* 0Eh */
1931 U8 Status; /* 10h */
1932 U8 Flags; /* 11h */
1933 U16 Reserved2; /* 12h */
1934} fCONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2,
1935 SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t;
1936
1937#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x00)
1938
1939/* values for SAS IO Unit Page 2 Status field */
1940#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02)
1941#define MPI_SAS_IOUNIT2_STATUS_FULL_PERSISTENT_MAPPINGS (0x01)
1942
1943/* values for SAS IO Unit Page 2 Flags field */
1944#define MPI_SAS_IOUNIT2_FLAGS_DISABLE_PERSISTENT_MAPPINGS (0x01)
1945
1946
1947typedef struct _CONFIG_PAGE_SAS_IO_UNIT_3
1948{
1949 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
1950 U32 Reserved1; /* 08h */
1951 U32 MaxInvalidDwordCount; /* 0Ch */
1952 U32 InvalidDwordCountTime; /* 10h */
1953 U32 MaxRunningDisparityErrorCount; /* 14h */
1954 U32 RunningDisparityErrorTime; /* 18h */
1955 U32 MaxLossDwordSynchCount; /* 1Ch */
1956 U32 LossDwordSynchCountTime; /* 20h */
1957 U32 MaxPhyResetProblemCount; /* 24h */
1958 U32 PhyResetProblemTime; /* 28h */
1959} fCONFIG_PAGE_SAS_IO_UNIT_3, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_3,
1960 SasIOUnitPage3_t, MPI_POINTER pSasIOUnitPage3_t;
1961
1962#define MPI_SASIOUNITPAGE3_PAGEVERSION (0x00)
1963
1964
1965typedef struct _CONFIG_PAGE_SAS_EXPANDER_0
1966{
1967 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
1968 U32 Reserved1; /* 08h */
1969 U64 SASAddress; /* 0Ch */
1970 U32 Reserved2; /* 14h */
1971 U16 DevHandle; /* 18h */
1972 U16 ParentDevHandle; /* 1Ah */
1973 U16 ExpanderChangeCount; /* 1Ch */
1974 U16 ExpanderRouteIndexes; /* 1Eh */
1975 U8 NumPhys; /* 20h */
1976 U8 SASLevel; /* 21h */
1977 U8 Flags; /* 22h */
1978 U8 Reserved3; /* 23h */
1979} fCONFIG_PAGE_SAS_EXPANDER_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_EXPANDER_0,
1980 SasExpanderPage0_t, MPI_POINTER pSasExpanderPage0_t;
1981
1982#define MPI_SASEXPANDER0_PAGEVERSION (0x00)
1983
1984/* values for SAS Expander Page 0 Flags field */
1985#define MPI_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x02)
1986#define MPI_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x01)
1987
1988
1989typedef struct _CONFIG_PAGE_SAS_DEVICE_0
1990{
1991 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
1992 U32 Reserved1; /* 08h */
1993 U64 SASAddress; /* 0Ch */
1994 U32 Reserved2; /* 14h */
1995 U16 DevHandle; /* 18h */
1996 U8 TargetID; /* 1Ah */
1997 U8 Bus; /* 1Bh */
1998 U32 DeviceInfo; /* 1Ch */
1999 U16 Flags; /* 20h */
2000 U8 PhysicalPort; /* 22h */
2001 U8 Reserved3; /* 23h */
2002} fCONFIG_PAGE_SAS_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_0,
2003 SasDevicePage0_t, MPI_POINTER pSasDevicePage0_t;
2004
2005#define MPI_SASDEVICE0_PAGEVERSION (0x00)
2006
2007/* values for SAS Device Page 0 Flags field */
2008#define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT (0x04)
2009#define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED (0x02)
2010#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x01)
2011
2012/* see mpi_sas.h for values for SAS Device Page 0 DeviceInfo values */
2013
2014
2015typedef struct _CONFIG_PAGE_SAS_DEVICE_1
2016{
2017 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
2018 U32 Reserved1; /* 08h */
2019 U64 SASAddress; /* 0Ch */
2020 U32 Reserved2; /* 14h */
2021 U16 DevHandle; /* 18h */
2022 U8 TargetID; /* 1Ah */
2023 U8 Bus; /* 1Bh */
2024 U8 InitialRegDeviceFIS[20];/* 1Ch */
2025} fCONFIG_PAGE_SAS_DEVICE_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_1,
2026 SasDevicePage1_t, MPI_POINTER pSasDevicePage1_t;
2027
2028#define MPI_SASDEVICE1_PAGEVERSION (0x00)
2029
2030
2031typedef struct _CONFIG_PAGE_SAS_PHY_0
2032{
2033 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
2034 U32 Reserved1; /* 08h */
2035 U64 SASAddress; /* 0Ch */
2036 U16 AttachedDevHandle; /* 14h */
2037 U8 AttachedPhyIdentifier; /* 16h */
2038 U8 Reserved2; /* 17h */
2039 U32 AttachedDeviceInfo; /* 18h */
2040 U8 ProgrammedLinkRate; /* 20h */
2041 U8 HwLinkRate; /* 21h */
2042 U8 ChangeCount; /* 22h */
2043 U8 Reserved3; /* 23h */
2044 U32 PhyInfo; /* 24h */
2045} fCONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0,
2046 SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t;
2047
2048#define MPI_SASPHY0_PAGEVERSION (0x00)
2049
2050/* values for SAS PHY Page 0 ProgrammedLinkRate field */
2051#define MPI_SAS_PHY0_PRATE_MAX_RATE_MASK (0xF0)
2052#define MPI_SAS_PHY0_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
2053#define MPI_SAS_PHY0_PRATE_MAX_RATE_1_5 (0x80)
2054#define MPI_SAS_PHY0_PRATE_MAX_RATE_3_0 (0x90)
2055#define MPI_SAS_PHY0_PRATE_MIN_RATE_MASK (0x0F)
2056#define MPI_SAS_PHY0_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
2057#define MPI_SAS_PHY0_PRATE_MIN_RATE_1_5 (0x08)
2058#define MPI_SAS_PHY0_PRATE_MIN_RATE_3_0 (0x09)
2059
2060/* values for SAS PHY Page 0 HwLinkRate field */
2061#define MPI_SAS_PHY0_HWRATE_MAX_RATE_MASK (0xF0)
2062#define MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5 (0x80)
2063#define MPI_SAS_PHY0_HWRATE_MAX_RATE_3_0 (0x90)
2064#define MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK (0x0F)
2065#define MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5 (0x08)
2066#define MPI_SAS_PHY0_HWRATE_MIN_RATE_3_0 (0x09)
2067
2068/* values for SAS PHY Page 0 PhyInfo field */
2069#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
2070#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR (0x00002000)
2071#define MPI_SAS_PHY0_PHYINFO_VIRTUAL_PHY (0x00001000)
2072
2073#define MPI_SAS_PHY0_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
2074#define MPI_SAS_PHY0_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
2075
2076#define MPI_SAS_PHY0_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
2077#define MPI_SAS_PHY0_PHYINFO_DIRECT_ROUTING (0x00000000)
2078#define MPI_SAS_PHY0_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
2079#define MPI_SAS_PHY0_PHYINFO_TABLE_ROUTING (0x00000020)
2080
2081#define MPI_SAS_PHY0_PHYINFO_MASK_LINK_RATE (0x0000000F)
2082#define MPI_SAS_PHY0_PHYINFO_UNKNOWN_LINK_RATE (0x00000000)
2083#define MPI_SAS_PHY0_PHYINFO_PHY_DISABLED (0x00000001)
2084#define MPI_SAS_PHY0_PHYINFO_NEGOTIATION_FAILED (0x00000002)
2085#define MPI_SAS_PHY0_PHYINFO_SATA_OOB_COMPLETE (0x00000003)
2086#define MPI_SAS_PHY0_PHYINFO_RATE_1_5 (0x00000008)
2087#define MPI_SAS_PHY0_PHYINFO_RATE_3_0 (0x00000009)
2088
2089
2090typedef struct _CONFIG_PAGE_SAS_PHY_1
2091{
2092 fCONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
2093 U32 Reserved1; /* 08h */
2094 U32 InvalidDwordCount; /* 0Ch */
2095 U32 RunningDisparityErrorCount; /* 10h */
2096 U32 LossDwordSynchCount; /* 14h */
2097 U32 PhyResetProblemCount; /* 18h */
2098} fCONFIG_PAGE_SAS_PHY_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_1,
2099 SasPhyPage1_t, MPI_POINTER pSasPhyPage1_t;
2100
2101#define MPI_SASPHY1_PAGEVERSION (0x00)
2102
2103
2104#endif
2105
diff --git a/drivers/message/fusion/lsi/mpi_fc.h b/drivers/message/fusion/lsi/mpi_fc.h
new file mode 100644
index 000000000000..ea266b236c1f
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_fc.h
@@ -0,0 +1,363 @@
1/*
2 * Copyright (c) 2000-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_fc.h
6 * Title: MPI Fibre Channel messages and structures
7 * Creation Date: June 12, 2000
8 *
9 * mpi_fc.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
17 * 06-06-00 01.00.01 Update version number for 1.0 release.
18 * 06-12-00 01.00.02 Added _MSG_FC_ABORT_REPLY structure.
19 * 11-02-00 01.01.01 Original release for post 1.0 work
20 * 12-04-00 01.01.02 Added messages for Common Transport Send and
21 * Primitive Send.
22 * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix
23 * and modified the FcPrimitiveSend flags.
24 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
25 * field.
26 * Added FC_ABORT_TYPE_CT_SEND_REQUEST and
27 * FC_ABORT_TYPE_EXLINKSEND_REQUEST for FcAbort request.
28 * Added MPI_FC_PRIM_SEND_FLAGS_STOP_SEND.
29 * 02-20-01 01.01.05 Started using MPI_POINTER.
30 * 03-27-01 01.01.06 Added Flags field to MSG_LINK_SERVICE_BUFFER_POST_REPLY
31 * and defined MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED.
32 * Added MPI_FC_PRIM_SEND_FLAGS_RESET_LINK define.
33 * Added structure offset comments.
34 * 04-09-01 01.01.07 Added RspLength field to MSG_LINK_SERVICE_RSP_REQUEST.
35 * 08-08-01 01.02.01 Original release for v1.2 work.
36 * 09-28-01 01.02.02 Change name of reserved field in
37 * MSG_LINK_SERVICE_RSP_REPLY.
38 * 05-31-02 01.02.03 Adding AliasIndex to FC Direct Access requests.
39 * --------------------------------------------------------------------------
40 */
41
42#ifndef MPI_FC_H
43#define MPI_FC_H
44
45
46/*****************************************************************************
47*
48* F C D i r e c t A c c e s s M e s s a g e s
49*
50*****************************************************************************/
51
52/****************************************************************************/
53/* Link Service Buffer Post messages */
54/****************************************************************************/
55
56typedef struct _MSG_LINK_SERVICE_BUFFER_POST_REQUEST
57{
58 U8 BufferPostFlags; /* 00h */
59 U8 BufferCount; /* 01h */
60 U8 ChainOffset; /* 02h */
61 U8 Function; /* 03h */
62 U16 Reserved; /* 04h */
63 U8 Reserved1; /* 06h */
64 U8 MsgFlags; /* 07h */
65 U32 MsgContext; /* 08h */
66 SGE_TRANS_SIMPLE_UNION SGL;
67} MSG_LINK_SERVICE_BUFFER_POST_REQUEST,
68 MPI_POINTER PTR_MSG_LINK_SERVICE_BUFFER_POST_REQUEST,
69 LinkServiceBufferPostRequest_t, MPI_POINTER pLinkServiceBufferPostRequest_t;
70
71#define LINK_SERVICE_BUFFER_POST_FLAGS_PORT_MASK (0x01)
72
73typedef struct _WWNFORMAT
74{
75 U32 PortNameHigh; /* 00h */
76 U32 PortNameLow; /* 04h */
77 U32 NodeNameHigh; /* 08h */
78 U32 NodeNameLow; /* 0Ch */
79} WWNFORMAT,
80 WwnFormat_t;
81
82/* Link Service Buffer Post Reply */
83typedef struct _MSG_LINK_SERVICE_BUFFER_POST_REPLY
84{
85 U8 Flags; /* 00h */
86 U8 Reserved; /* 01h */
87 U8 MsgLength; /* 02h */
88 U8 Function; /* 03h */
89 U16 Reserved1; /* 04h */
90 U8 PortNumber; /* 06h */
91 U8 MsgFlags; /* 07h */
92 U32 MsgContext; /* 08h */
93 U16 Reserved2; /* 0Ch */
94 U16 IOCStatus; /* 0Eh */
95 U32 IOCLogInfo; /* 10h */
96 U32 TransferLength; /* 14h */
97 U32 TransactionContext; /* 18h */
98 U32 Rctl_Did; /* 1Ch */
99 U32 Csctl_Sid; /* 20h */
100 U32 Type_Fctl; /* 24h */
101 U16 SeqCnt; /* 28h */
102 U8 Dfctl; /* 2Ah */
103 U8 SeqId; /* 2Bh */
104 U16 Rxid; /* 2Ch */
105 U16 Oxid; /* 2Eh */
106 U32 Parameter; /* 30h */
107 WWNFORMAT Wwn; /* 34h */
108} MSG_LINK_SERVICE_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY,
109 LinkServiceBufferPostReply_t, MPI_POINTER pLinkServiceBufferPostReply_t;
110
111#define MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED (0x80)
112
113#define MPI_FC_DID_MASK (0x00FFFFFF)
114#define MPI_FC_DID_SHIFT (0)
115#define MPI_FC_RCTL_MASK (0xFF000000)
116#define MPI_FC_RCTL_SHIFT (24)
117#define MPI_FC_SID_MASK (0x00FFFFFF)
118#define MPI_FC_SID_SHIFT (0)
119#define MPI_FC_CSCTL_MASK (0xFF000000)
120#define MPI_FC_CSCTL_SHIFT (24)
121#define MPI_FC_FCTL_MASK (0x00FFFFFF)
122#define MPI_FC_FCTL_SHIFT (0)
123#define MPI_FC_TYPE_MASK (0xFF000000)
124#define MPI_FC_TYPE_SHIFT (24)
125
126/* obsolete name for the above */
127#define FCP_TARGET_DID_MASK (0x00FFFFFF)
128#define FCP_TARGET_DID_SHIFT (0)
129#define FCP_TARGET_RCTL_MASK (0xFF000000)
130#define FCP_TARGET_RCTL_SHIFT (24)
131#define FCP_TARGET_SID_MASK (0x00FFFFFF)
132#define FCP_TARGET_SID_SHIFT (0)
133#define FCP_TARGET_CSCTL_MASK (0xFF000000)
134#define FCP_TARGET_CSCTL_SHIFT (24)
135#define FCP_TARGET_FCTL_MASK (0x00FFFFFF)
136#define FCP_TARGET_FCTL_SHIFT (0)
137#define FCP_TARGET_TYPE_MASK (0xFF000000)
138#define FCP_TARGET_TYPE_SHIFT (24)
139
140
141/****************************************************************************/
142/* Link Service Response messages */
143/****************************************************************************/
144
145typedef struct _MSG_LINK_SERVICE_RSP_REQUEST
146{
147 U8 RspFlags; /* 00h */
148 U8 RspLength; /* 01h */
149 U8 ChainOffset; /* 02h */
150 U8 Function; /* 03h */
151 U16 Reserved1; /* 04h */
152 U8 Reserved2; /* 06h */
153 U8 MsgFlags; /* 07h */
154 U32 MsgContext; /* 08h */
155 U32 Rctl_Did; /* 0Ch */
156 U32 Csctl_Sid; /* 10h */
157 U32 Type_Fctl; /* 14h */
158 U16 SeqCnt; /* 18h */
159 U8 Dfctl; /* 1Ah */
160 U8 SeqId; /* 1Bh */
161 U16 Rxid; /* 1Ch */
162 U16 Oxid; /* 1Eh */
163 U32 Parameter; /* 20h */
164 SGE_SIMPLE_UNION SGL; /* 24h */
165} MSG_LINK_SERVICE_RSP_REQUEST, MPI_POINTER PTR_MSG_LINK_SERVICE_RSP_REQUEST,
166 LinkServiceRspRequest_t, MPI_POINTER pLinkServiceRspRequest_t;
167
168#define LINK_SERVICE_RSP_FLAGS_IMMEDIATE (0x80)
169#define LINK_SERVICE_RSP_FLAGS_PORT_MASK (0x01)
170
171
172/* Link Service Response Reply */
173typedef struct _MSG_LINK_SERVICE_RSP_REPLY
174{
175 U16 Reserved; /* 00h */
176 U8 MsgLength; /* 02h */
177 U8 Function; /* 03h */
178 U16 Reserved1; /* 04h */
179 U8 Reserved_0100_InitiatorIndex; /* 06h */ /* obsolete InitiatorIndex */
180 U8 MsgFlags; /* 07h */
181 U32 MsgContext; /* 08h */
182 U16 Reserved3; /* 0Ch */
183 U16 IOCStatus; /* 0Eh */
184 U32 IOCLogInfo; /* 10h */
185 U32 InitiatorIndex; /* 14h */
186} MSG_LINK_SERVICE_RSP_REPLY, MPI_POINTER PTR_MSG_LINK_SERVICE_RSP_REPLY,
187 LinkServiceRspReply_t, MPI_POINTER pLinkServiceRspReply_t;
188
189
190/****************************************************************************/
191/* Extended Link Service Send messages */
192/****************************************************************************/
193
194typedef struct _MSG_EXLINK_SERVICE_SEND_REQUEST
195{
196 U8 SendFlags; /* 00h */
197 U8 AliasIndex; /* 01h */
198 U8 ChainOffset; /* 02h */
199 U8 Function; /* 03h */
200 U32 MsgFlags_Did; /* 04h */
201 U32 MsgContext; /* 08h */
202 U32 ElsCommandCode; /* 0Ch */
203 SGE_SIMPLE_UNION SGL; /* 10h */
204} MSG_EXLINK_SERVICE_SEND_REQUEST, MPI_POINTER PTR_MSG_EXLINK_SERVICE_SEND_REQUEST,
205 ExLinkServiceSendRequest_t, MPI_POINTER pExLinkServiceSendRequest_t;
206
207#define EX_LINK_SERVICE_SEND_DID_MASK (0x00FFFFFF)
208#define EX_LINK_SERVICE_SEND_DID_SHIFT (0)
209#define EX_LINK_SERVICE_SEND_MSGFLAGS_MASK (0xFF000000)
210#define EX_LINK_SERVICE_SEND_MSGFLAGS_SHIFT (24)
211
212
213/* Extended Link Service Send Reply */
214typedef struct _MSG_EXLINK_SERVICE_SEND_REPLY
215{
216 U8 Reserved; /* 00h */
217 U8 AliasIndex; /* 01h */
218 U8 MsgLength; /* 02h */
219 U8 Function; /* 03h */
220 U16 Reserved1; /* 04h */
221 U8 Reserved2; /* 06h */
222 U8 MsgFlags; /* 07h */
223 U32 MsgContext; /* 08h */
224 U16 Reserved3; /* 0Ch */
225 U16 IOCStatus; /* 0Eh */
226 U32 IOCLogInfo; /* 10h */
227 U32 ResponseLength; /* 14h */
228} MSG_EXLINK_SERVICE_SEND_REPLY, MPI_POINTER PTR_MSG_EXLINK_SERVICE_SEND_REPLY,
229 ExLinkServiceSendReply_t, MPI_POINTER pExLinkServiceSendReply_t;
230
231/****************************************************************************/
232/* FC Abort messages */
233/****************************************************************************/
234
235typedef struct _MSG_FC_ABORT_REQUEST
236{
237 U8 AbortFlags; /* 00h */
238 U8 AbortType; /* 01h */
239 U8 ChainOffset; /* 02h */
240 U8 Function; /* 03h */
241 U16 Reserved1; /* 04h */
242 U8 Reserved2; /* 06h */
243 U8 MsgFlags; /* 07h */
244 U32 MsgContext; /* 08h */
245 U32 TransactionContextToAbort; /* 0Ch */
246} MSG_FC_ABORT_REQUEST, MPI_POINTER PTR_MSG_FC_ABORT_REQUEST,
247 FcAbortRequest_t, MPI_POINTER pFcAbortRequest_t;
248
249#define FC_ABORT_FLAG_PORT_MASK (0x01)
250
251#define FC_ABORT_TYPE_ALL_FC_BUFFERS (0x00)
252#define FC_ABORT_TYPE_EXACT_FC_BUFFER (0x01)
253#define FC_ABORT_TYPE_CT_SEND_REQUEST (0x02)
254#define FC_ABORT_TYPE_EXLINKSEND_REQUEST (0x03)
255
256/* FC Abort Reply */
257typedef struct _MSG_FC_ABORT_REPLY
258{
259 U16 Reserved; /* 00h */
260 U8 MsgLength; /* 02h */
261 U8 Function; /* 03h */
262 U16 Reserved1; /* 04h */
263 U8 Reserved2; /* 06h */
264 U8 MsgFlags; /* 07h */
265 U32 MsgContext; /* 08h */
266 U16 Reserved3; /* 0Ch */
267 U16 IOCStatus; /* 0Eh */
268 U32 IOCLogInfo; /* 10h */
269} MSG_FC_ABORT_REPLY, MPI_POINTER PTR_MSG_FC_ABORT_REPLY,
270 FcAbortReply_t, MPI_POINTER pFcAbortReply_t;
271
272
273/****************************************************************************/
274/* FC Common Transport Send messages */
275/****************************************************************************/
276
277typedef struct _MSG_FC_COMMON_TRANSPORT_SEND_REQUEST
278{
279 U8 SendFlags; /* 00h */
280 U8 AliasIndex; /* 01h */
281 U8 ChainOffset; /* 02h */
282 U8 Function; /* 03h */
283 U32 MsgFlags_Did; /* 04h */
284 U32 MsgContext; /* 08h */
285 U16 CTCommandCode; /* 0Ch */
286 U8 FsType; /* 0Eh */
287 U8 Reserved1; /* 0Fh */
288 SGE_SIMPLE_UNION SGL; /* 10h */
289} MSG_FC_COMMON_TRANSPORT_SEND_REQUEST,
290 MPI_POINTER PTR_MSG_FC_COMMON_TRANSPORT_SEND_REQUEST,
291 FcCommonTransportSendRequest_t, MPI_POINTER pFcCommonTransportSendRequest_t;
292
293#define MPI_FC_CT_SEND_DID_MASK (0x00FFFFFF)
294#define MPI_FC_CT_SEND_DID_SHIFT (0)
295#define MPI_FC_CT_SEND_MSGFLAGS_MASK (0xFF000000)
296#define MPI_FC_CT_SEND_MSGFLAGS_SHIFT (24)
297
298
299/* FC Common Transport Send Reply */
300typedef struct _MSG_FC_COMMON_TRANSPORT_SEND_REPLY
301{
302 U8 Reserved; /* 00h */
303 U8 AliasIndex; /* 01h */
304 U8 MsgLength; /* 02h */
305 U8 Function; /* 03h */
306 U16 Reserved1; /* 04h */
307 U8 Reserved2; /* 06h */
308 U8 MsgFlags; /* 07h */
309 U32 MsgContext; /* 08h */
310 U16 Reserved3; /* 0Ch */
311 U16 IOCStatus; /* 0Eh */
312 U32 IOCLogInfo; /* 10h */
313 U32 ResponseLength; /* 14h */
314} MSG_FC_COMMON_TRANSPORT_SEND_REPLY, MPI_POINTER PTR_MSG_FC_COMMON_TRANSPORT_SEND_REPLY,
315 FcCommonTransportSendReply_t, MPI_POINTER pFcCommonTransportSendReply_t;
316
317
318/****************************************************************************/
319/* FC Primitive Send messages */
320/****************************************************************************/
321
322typedef struct _MSG_FC_PRIMITIVE_SEND_REQUEST
323{
324 U8 SendFlags; /* 00h */
325 U8 Reserved; /* 01h */
326 U8 ChainOffset; /* 02h */
327 U8 Function; /* 03h */
328 U16 Reserved1; /* 04h */
329 U8 Reserved2; /* 06h */
330 U8 MsgFlags; /* 07h */
331 U32 MsgContext; /* 08h */
332 U8 FcPrimitive[4]; /* 0Ch */
333} MSG_FC_PRIMITIVE_SEND_REQUEST, MPI_POINTER PTR_MSG_FC_PRIMITIVE_SEND_REQUEST,
334 FcPrimitiveSendRequest_t, MPI_POINTER pFcPrimitiveSendRequest_t;
335
336#define MPI_FC_PRIM_SEND_FLAGS_PORT_MASK (0x01)
337#define MPI_FC_PRIM_SEND_FLAGS_ML_RESET_LINK (0x02)
338#define MPI_FC_PRIM_SEND_FLAGS_RESET_LINK (0x04)
339#define MPI_FC_PRIM_SEND_FLAGS_STOP_SEND (0x08)
340#define MPI_FC_PRIM_SEND_FLAGS_SEND_ONCE (0x10)
341#define MPI_FC_PRIM_SEND_FLAGS_SEND_AROUND (0x20)
342#define MPI_FC_PRIM_SEND_FLAGS_UNTIL_FULL (0x40)
343#define MPI_FC_PRIM_SEND_FLAGS_FOREVER (0x80)
344
345/* FC Primitive Send Reply */
346typedef struct _MSG_FC_PRIMITIVE_SEND_REPLY
347{
348 U8 SendFlags; /* 00h */
349 U8 Reserved; /* 01h */
350 U8 MsgLength; /* 02h */
351 U8 Function; /* 03h */
352 U16 Reserved1; /* 04h */
353 U8 Reserved2; /* 06h */
354 U8 MsgFlags; /* 07h */
355 U32 MsgContext; /* 08h */
356 U16 Reserved3; /* 0Ch */
357 U16 IOCStatus; /* 0Eh */
358 U32 IOCLogInfo; /* 10h */
359} MSG_FC_PRIMITIVE_SEND_REPLY, MPI_POINTER PTR_MSG_FC_PRIMITIVE_SEND_REPLY,
360 FcPrimitiveSendReply_t, MPI_POINTER pFcPrimitiveSendReply_t;
361
362#endif
363
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
new file mode 100644
index 000000000000..0deb7721e936
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -0,0 +1,276 @@
1
2 ==============================
3 MPI Header File Change History
4 ==============================
5
6 Copyright (c) 2000-2001 LSI Logic Corporation.
7
8 ---------------------------------------
9 Header Set Release Version: 01.01.10
10 Header Set Release Date: 04-09-01
11 ---------------------------------------
12
13 Filename Current version Prior version
14 ---------- --------------- -------------
15 mpi.h 01.01.07 01.01.06
16 mpi_ioc.h 01.01.07 01.01.06
17 mpi_cnfg.h 01.01.11 01.01.10
18 mpi_init.h 01.01.05 01.01.04
19 mpi_targ.h 01.01.04 01.01.04
20 mpi_fc.h 01.01.07 01.01.06
21 mpi_lan.h 01.01.03 01.01.03
22 mpi_raid.h 01.01.02 01.01.02
23 mpi_type.h 01.01.02 01.01.02
24 mpi_history.txt 01.01.09 01.01.09
25
26
27 * Date Version Description
28 * -------- -------- ------------------------------------------------------
29
30mpi.h
31 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
32 * 05-24-00 00.10.02 Added MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH definition.
33 * 06-06-00 01.00.01 Update MPI_VERSION_MAJOR and MPI_VERSION_MINOR.
34 * 06-22-00 01.00.02 Added MPI_IOCSTATUS_LAN_ definitions.
35 * Removed LAN_SUSPEND function definition.
36 * Added MPI_MSGFLAGS_CONTINUATION_REPLY definition.
37 * 06-30-00 01.00.03 Added MPI_CONTEXT_REPLY_TYPE_LAN definition.
38 * Added MPI_GET/SET_CONTEXT_REPLY_TYPE macros.
39 * 07-27-00 01.00.04 Added MPI_FAULT_ definitions.
40 * Removed MPI_IOCSTATUS_MSG/DATA_XFER_ERROR definitions.
41 * Added MPI_IOCSTATUS_INTERNAL_ERROR definition.
42 * Added MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH.
43 * 11-02-00 01.01.01 Original release for post 1.0 work
44 * 12-04-00 01.01.02 Added new function codes.
45 * 01-09-01 01.01.03 Added more definitions to the system interface section
46 * Added MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT.
47 * 01-25-01 01.01.04 Changed MPI_VERSION_MINOR from 0x00 to 0x01.
48 * 02-20-01 01.01.05 Started using MPI_POINTER.
49 * Added defines for MPI_DIAG_PREVENT_IOC_BOOT and
50 * MPI_DIAG_CLEAR_FLASH_BAD_SIG.
51 * Obsoleted MPI_IOCSTATUS_TARGET_FC_ defines.
52 * 02-27-01 01.01.06 Removed MPI_HOST_INDEX_REGISTER define.
53 * Added function codes for RAID.
54 * 04-09-01 01.01.07 Added alternate define for MPI_DOORBELL_ACTIVE,
55 * MPI_DOORBELL_USED, to better match the spec.
56 * --------------------------------------------------------------------------
57
58mpi_ioc.h
59 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
60 * 05-24-00 00.10.02 Added _MSG_IOC_INIT_REPLY structure.
61 * 06-06-00 01.00.01 Added CurReplyFrameSize field to _MSG_IOC_FACTS_REPLY.
62 * 06-12-00 01.00.02 Added _MSG_PORT_ENABLE_REPLY structure.
63 * Added _MSG_EVENT_ACK_REPLY structure.
64 * Added _MSG_FW_DOWNLOAD_REPLY structure.
65 * Added _MSG_TOOLBOX_REPLY structure.
66 * 06-30-00 01.00.03 Added MaxLanBuckets to _PORT_FACT_REPLY structure.
67 * 07-27-00 01.00.04 Added _EVENT_DATA structure definitions for _SCSI,
68 * _LINK_STATUS, _LOOP_STATE and _LOGOUT.
69 * 08-11-00 01.00.05 Switched positions of MsgLength and Function fields in
70 * _MSG_EVENT_ACK_REPLY structure to match specification.
71 * 11-02-00 01.01.01 Original release for post 1.0 work
72 * Added a value for Manufacturer to WhoInit
73 * 12-04-00 01.01.02 Modified IOCFacts reply, added FWUpload messages, and
74 * removed toolbox message.
75 * 01-09-01 01.01.03 Added event enabled and disabled defines.
76 * Added structures for FwHeader and DataHeader.
77 * Added ImageType to FwUpload reply.
78 * 02-20-01 01.01.04 Started using MPI_POINTER.
79 * 02-27-01 01.01.05 Added event for RAID status change and its event data.
80 * Added IocNumber field to MSG_IOC_FACTS_REPLY.
81 * 03-27-01 01.01.06 Added defines for ProductId field of MPI_FW_HEADER.
82 * Added structure offset comments.
83 * 04-09-01 01.01.07 Added structure EVENT_DATA_EVENT_CHANGE.
84 * --------------------------------------------------------------------------
85
86mpi_cnfg.h
87 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
88 * 06-06-00 01.00.01 Update version number for 1.0 release.
89 * 06-08-00 01.00.02 Added _PAGEVERSION definitions for all pages.
90 * Added FcPhLowestVersion, FcPhHighestVersion, Reserved2
91 * fields to FC_DEVICE_0 page, updated the page version.
92 * Changed _FREE_RUNNING_CLOCK to _PACING_TRANSFERS in
93 * SCSI_PORT_0, SCSI_DEVICE_0 and SCSI_DEVICE_1 pages
94 * and updated the page versions.
95 * Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
96 * page and updated the page version.
97 * Added Information field and _INFO_PARAMS_NEGOTIATED
98 * definitionto SCSI_DEVICE_0 page.
99 * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
100 * page version.
101 * Added BucketsRemaining to LAN_1 page, redefined the
102 * state values, and updated the page version.
103 * Revised bus width definitions in SCSI_PORT_0,
104 * SCSI_DEVICE_0 and SCSI_DEVICE_1 pages.
105 * 06-30-00 01.00.04 Added MaxReplySize to LAN_1 page and updated the page
106 * version.
107 * Moved FC_DEVICE_0 PageAddress description to spec.
108 * 07-27-00 01.00.05 Corrected the SubsystemVendorID and SubsystemID field
109 * widths in IOC_0 page and updated the page version.
110 * 11-02-00 01.01.01 Original release for post 1.0 work
111 * Added Manufacturing pages, IO Unit Page 2, SCSI SPI
112 * Port Page 2, FC Port Page 4, FC Port Page 5
113 * 12-04-00 01.01.03 Config page changes to match MPI rev 1.00.01.
114 * 12-05-00 01.01.04 Modified config page actions.
115 * 01-09-01 01.01.05 Added defines for page address formats.
116 * Data size for Manufacturing pages 2 and 3 no longer
117 * defined here.
118 * Io Unit Page 2 size is fixed at 4 adapters and some
119 * flags were changed.
120 * SCSI Port Page 2 Device Settings modified.
121 * New fields added to FC Port Page 0 and some flags
122 * cleaned up.
123 * Removed impedance flash from FC Port Page 1.
124 * Added FC Port pages 6 and 7.
125 * 01-25-01 01.01.06 Added MaxInitiators field to FcPortPage0.
126 * 01-29-01 01.01.07 Changed some defines to make them 32 character unique.
127 * Added some LinkType defines for FcPortPage0.
128 * 02-20-01 01.01.08 Started using MPI_POINTER.
129 * 02-27-01 01.01.09 Replaced MPI_CONFIG_PAGETYPE_SCSI_LUN with
130 * MPI_CONFIG_PAGETYPE_RAID_VOLUME.
131 * Added definitions and structures for IOC Page 2 and
132 * RAID Volume Page 2.
133 * 03-27-01 01.01.10 Added CONFIG_PAGE_FC_PORT_8 and CONFIG_PAGE_FC_PORT_9.
134 * CONFIG_PAGE_FC_PORT_3 now supports persistent by DID.
135 * Added VendorId and ProductRevLevel fields to
136 * RAIDVOL2_IM_PHYS_ID struct.
137 * Modified values for MPI_FCPORTPAGE0_FLAGS_ATTACH_
138 * defines to make them compatible to MPI version 1.0.
139 * Added structure offset comments.
140 * 04-09-01 01.01.11 Added some new defines for the PageAddress field and
141 * removed some obsolete ones.
142 * Added IO Unit Page 3.
143 * Modified defines for Scsi Port Page 2.
144 * Modified RAID Volume Pages.
145 * --------------------------------------------------------------------------
146
147mpi_init.h
148 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
149 * 05-24-00 00.10.02 Added SenseBufferLength to _MSG_SCSI_IO_REPLY.
150 * 06-06-00 01.00.01 Update version number for 1.0 release.
151 * 06-08-00 01.00.02 Added MPI_SCSI_RSP_INFO_ definitions.
152 * 11-02-00 01.01.01 Original release for post 1.0 work
153 * 12-04-00 01.01.02 Added MPI_SCSIIO_CONTROL_NO_DISCONNECT.
154 * 02-20-01 01.01.03 Started using MPI_POINTER.
155 * 03-27-01 01.01.04 Added structure offset comments.
156 * 04-10-01 01.01.05 Added new MsgFlag for MSG_SCSI_TASK_MGMT.
157 * --------------------------------------------------------------------------
158
159mpi_targ.h
160 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
161 * 06-06-00 01.00.01 Update version number for 1.0 release.
162 * 06-22-00 01.00.02 Added _MSG_TARGET_CMD_BUFFER_POST_REPLY structure.
163 * Corrected DECSRIPTOR typo to DESCRIPTOR.
164 * 11-02-00 01.01.01 Original release for post 1.0 work
165 * Modified target mode to use IoIndex instead of
166 * HostIndex and IocIndex. Added Alias.
167 * 01-09-01 01.01.02 Added defines for TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER
168 * and TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER.
169 * 02-20-01 01.01.03 Started using MPI_POINTER.
170 * Added structures for MPI_TARGET_SCSI_SPI_CMD_BUFFER and
171 * MPI_TARGET_FCP_CMD_BUFFER.
172 * 03-27-01 01.01.04 Added structure offset comments.
173 * --------------------------------------------------------------------------
174
175mpi_fc.h
176 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
177 * 06-06-00 01.00.01 Update version number for 1.0 release.
178 * 06-12-00 01.00.02 Added _MSG_FC_ABORT_REPLY structure.
179 * 11-02-00 01.01.01 Original release for post 1.0 work
180 * 12-04-00 01.01.02 Added messages for Common Transport Send and
181 * Primitive Send.
182 * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix
183 * and modified the FcPrimitiveSend flags.
184 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
185 * field.
186 * Added FC_ABORT_TYPE_CT_SEND_REQUEST and
187 * FC_ABORT_TYPE_EXLINKSEND_REQUEST for FcAbort request.
188 * Added MPI_FC_PRIM_SEND_FLAGS_STOP_SEND.
189 * 02-20-01 01.01.05 Started using MPI_POINTER.
190 * 03-27-01 01.01.06 Added Flags field to MSG_LINK_SERVICE_BUFFER_POST_REPLY
191 * and defined MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED.
192 * Added MPI_FC_PRIM_SEND_FLAGS_RESET_LINK define.
193 * Added structure offset comments.
194 * 04-09-01 01.01.07 Added RspLength field to MSG_LINK_SERVICE_RSP_REQUEST.
195 * --------------------------------------------------------------------------
196
197mpi_lan.h
198 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
199 * 05-24-00 00.10.02 Added LANStatus field to _MSG_LAN_SEND_REPLY.
200 * Added LANStatus field to _MSG_LAN_RECEIVE_POST_REPLY.
201 * Moved ListCount field in _MSG_LAN_RECEIVE_POST_REPLY.
202 * 06-06-00 01.00.01 Update version number for 1.0 release.
203 * 06-12-00 01.00.02 Added MPI_ to BUCKETSTATUS_ definitions.
204 * 06-22-00 01.00.03 Major changes to match new LAN definition in 1.0 spec.
205 * 06-30-00 01.00.04 Added Context Reply definitions per revised proposal.
206 * Changed transaction context usage to bucket/buffer.
207 * 07-05-00 01.00.05 Removed LAN_RECEIVE_POST_BUCKET_CONTEXT_MASK definition
208 * to lan private header file
209 * 11-02-00 01.01.01 Original release for post 1.0 work
210 * 02-20-01 01.01.02 Started using MPI_POINTER.
211 * 03-27-01 01.01.03 Added structure offset comments.
212 * --------------------------------------------------------------------------
213
214mpi_raid.h
215 * 02-27-01 01.01.01 Original release for this file.
216 * 03-27-01 01.01.02 Added structure offset comments.
217 * --------------------------------------------------------------------------
218
219mpi_type.h
220 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
221 * 06-06-00 01.00.01 Update version number for 1.0 release.
222 * 11-02-00 01.01.01 Original release for post 1.0 work
223 * 02-20-01 01.01.02 Added define and ifdef for MPI_POINTER.
224 * --------------------------------------------------------------------------
225
226mpi_history.txt Parts list history
227
228Filename 01.01.10
229---------- --------
230mpi.h 01.01.07
231mpi_ioc.h 01.01.07
232mpi_cnfg.h 01.01.11
233mpi_init.h 01.01.05
234mpi_targ.h 01.01.04
235mpi_fc.h 01.01.07
236mpi_lan.h 01.01.03
237mpi_raid.h 01.01.02
238mpi_type.h 01.01.02
239
240Filename 01.01.09 01.01.08 01.01.07 01.01.06 01.01.05 01.01.04
241---------- -------- -------- -------- -------- -------- --------
242mpi.h 01.01.06 01.01.06 01.01.05 01.01.04 01.01.04 01.01.03
243mpi_ioc.h 01.01.06 01.01.05 01.01.04 01.01.03 01.01.03 01.01.03
244mpi_cnfg.h 01.01.10 01.01.09 01.01.08 01.01.07 01.01.06 01.01.05
245mpi_init.h 01.01.04 01.01.03 01.01.03 01.01.02 01.01.02 01.01.02
246mpi_targ.h 01.01.04 01.01.03 01.01.03 01.01.02 01.01.02 01.01.02
247mpi_fc.h 01.01.06 01.01.05 01.01.05 01.01.04 01.01.04 01.01.03
248mpi_lan.h 01.01.03 01.01.02 01.01.02 01.01.01 01.01.01 01.01.01
249mpi_raid.h 01.01.02 01.01.01
250mpi_type.h 01.01.02 01.01.02 01.01.02 01.01.01 01.01.01 01.01.01
251
252Filename 01.01.03 01.01.02 01.01.01 01.00.07 01.00.06 01.00.05
253---------- -------- -------- -------- -------- -------- --------
254mpi.h 01.01.02 01.01.02 01.01.01 01.00.04 01.00.04 01.00.03
255mpi_ioc.h 01.01.02 01.01.02 01.01.01 01.00.05 01.00.04 01.00.03
256mpi_cnfg.h 01.01.04 01.01.03 01.01.01 01.00.05 01.00.05 01.00.04
257mpi_init.h 01.01.02 01.01.02 01.01.01 01.00.02 01.00.02 01.00.02
258mpi_targ.h 01.01.01 01.01.01 01.01.01 01.00.02 01.00.02 01.00.02
259mpi_fc.h 01.01.02 01.01.02 01.01.01 01.00.02 01.00.02 01.00.02
260mpi_lan.h 01.01.01 01.01.01 01.01.01 01.00.05 01.00.05 01.00.05
261mpi_type.h 01.01.01 01.01.01 01.01.01 01.00.01 01.00.01 01.00.01
262
263Filename 01.00.04 01.00.03 01.00.02 01.00.01 00.10.02 00.10.01
264---------- -------- -------- -------- -------- -------- --------
265mpi.h 01.00.02 01.00.01 01.00.01 01.00.01 00.10.02 00.10.01
266mpi_ioc.h 01.00.02 01.00.02 01.00.01 01.00.01 00.10.02 00.10.01
267mpi_cnfg.h 01.00.03 01.00.02 01.00.02 01.00.01 00.10.01 00.10.01
268mpi_init.h 01.00.02 01.00.02 01.00.02 01.00.01 00.10.02 00.10.01
269mpi_targ.h 01.00.02 01.00.01 01.00.01 01.00.01 00.10.01 00.10.01
270mpi_fc.h 01.00.02 01.00.02 01.00.01 01.00.01 00.10.01 00.10.01
271mpi_lan.h 01.00.03 01.00.02 01.00.01 01.00.01 00.10.02 00.10.01
272mpi_type.h 01.00.01 01.00.01 01.00.01 01.00.01 00.10.01 00.10.01
273
274
275 * --------------------------------------------------------------------------
276
diff --git a/drivers/message/fusion/lsi/mpi_inb.h b/drivers/message/fusion/lsi/mpi_inb.h
new file mode 100644
index 000000000000..dae29fbed56f
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_inb.h
@@ -0,0 +1,220 @@
1/*
2 * Copyright (c) 2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_inb.h
6 * Title: MPI Inband structures and definitions
7 * Creation Date: September 30, 2003
8 *
9 * mpi_inb.h Version: 01.03.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * ??-??-?? 01.03.01 Original release.
17 * --------------------------------------------------------------------------
18 */
19
20#ifndef MPI_INB_H
21#define MPI_INB_H
22
23/******************************************************************************
24*
25* I n b a n d M e s s a g e s
26*
27*******************************************************************************/
28
29
30/****************************************************************************/
31/* Inband Buffer Post Request */
32/****************************************************************************/
33
34typedef struct _MSG_INBAND_BUFFER_POST_REQUEST
35{
36 U8 Reserved1; /* 00h */
37 U8 BufferCount; /* 01h */
38 U8 ChainOffset; /* 02h */
39 U8 Function; /* 03h */
40 U16 Reserved2; /* 04h */
41 U8 Reserved3; /* 06h */
42 U8 MsgFlags; /* 07h */
43 U32 MsgContext; /* 08h */
44 U32 Reserved4; /* 0Ch */
45 SGE_TRANS_SIMPLE_UNION SGL; /* 10h */
46} MSG_INBAND_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REQUEST,
47 MpiInbandBufferPostRequest_t , MPI_POINTER pMpiInbandBufferPostRequest_t;
48
49
50typedef struct _WWN_FC_FORMAT
51{
52 U64 NodeName; /* 00h */
53 U64 PortName; /* 08h */
54} WWN_FC_FORMAT, MPI_POINTER PTR_WWN_FC_FORMAT,
55 WwnFcFormat_t, MPI_POINTER pWwnFcFormat_t;
56
57typedef struct _WWN_SAS_FORMAT
58{
59 U64 WorldWideID; /* 00h */
60 U32 Reserved1; /* 08h */
61 U32 Reserved2; /* 0Ch */
62} WWN_SAS_FORMAT, MPI_POINTER PTR_WWN_SAS_FORMAT,
63 WwnSasFormat_t, MPI_POINTER pWwnSasFormat_t;
64
65typedef union _WWN_INBAND_FORMAT
66{
67 WWN_FC_FORMAT Fc;
68 WWN_SAS_FORMAT Sas;
69} WWN_INBAND_FORMAT, MPI_POINTER PTR_WWN_INBAND_FORMAT,
70 WwnInbandFormat, MPI_POINTER pWwnInbandFormat;
71
72
73/* Inband Buffer Post reply message */
74
75typedef struct _MSG_INBAND_BUFFER_POST_REPLY
76{
77 U16 Reserved1; /* 00h */
78 U8 MsgLength; /* 02h */
79 U8 Function; /* 03h */
80 U16 Reserved2; /* 04h */
81 U8 Reserved3; /* 06h */
82 U8 MsgFlags; /* 07h */
83 U32 MsgContext; /* 08h */
84 U16 Reserved4; /* 0Ch */
85 U16 IOCStatus; /* 0Eh */
86 U32 IOCLogInfo; /* 10h */
87 U32 TransferLength; /* 14h */
88 U32 TransactionContext; /* 18h */
89 WWN_INBAND_FORMAT Wwn; /* 1Ch */
90 U32 IOCIdentifier[4]; /* 2Ch */
91} MSG_INBAND_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REPLY,
92 MpiInbandBufferPostReply_t, MPI_POINTER pMpiInbandBufferPostReply_t;
93
94
95/****************************************************************************/
96/* Inband Send Request */
97/****************************************************************************/
98
99typedef struct _MSG_INBAND_SEND_REQUEST
100{
101 U16 Reserved1; /* 00h */
102 U8 ChainOffset; /* 02h */
103 U8 Function; /* 03h */
104 U16 Reserved2; /* 04h */
105 U8 Reserved3; /* 06h */
106 U8 MsgFlags; /* 07h */
107 U32 MsgContext; /* 08h */
108 U32 Reserved4; /* 0Ch */
109 WWN_INBAND_FORMAT Wwn; /* 10h */
110 U32 Reserved5; /* 20h */
111 SGE_IO_UNION SGL; /* 24h */
112} MSG_INBAND_SEND_REQUEST, MPI_POINTER PTR_MSG_INBAND_SEND_REQUEST,
113 MpiInbandSendRequest_t , MPI_POINTER pMpiInbandSendRequest_t;
114
115
116/* Inband Send reply message */
117
118typedef struct _MSG_INBAND_SEND_REPLY
119{
120 U16 Reserved1; /* 00h */
121 U8 MsgLength; /* 02h */
122 U8 Function; /* 03h */
123 U16 Reserved2; /* 04h */
124 U8 Reserved3; /* 06h */
125 U8 MsgFlags; /* 07h */
126 U32 MsgContext; /* 08h */
127 U16 Reserved4; /* 0Ch */
128 U16 IOCStatus; /* 0Eh */
129 U32 IOCLogInfo; /* 10h */
130 U32 ResponseLength; /* 14h */
131} MSG_INBAND_SEND_REPLY, MPI_POINTER PTR_MSG_INBAND_SEND_REPLY,
132 MpiInbandSendReply_t, MPI_POINTER pMpiInbandSendReply_t;
133
134
135/****************************************************************************/
136/* Inband Response Request */
137/****************************************************************************/
138
139typedef struct _MSG_INBAND_RSP_REQUEST
140{
141 U16 Reserved1; /* 00h */
142 U8 ChainOffset; /* 02h */
143 U8 Function; /* 03h */
144 U16 Reserved2; /* 04h */
145 U8 Reserved3; /* 06h */
146 U8 MsgFlags; /* 07h */
147 U32 MsgContext; /* 08h */
148 U32 Reserved4; /* 0Ch */
149 WWN_INBAND_FORMAT Wwn; /* 10h */
150 U32 IOCIdentifier[4]; /* 20h */
151 U32 ResponseLength; /* 30h */
152 SGE_IO_UNION SGL; /* 34h */
153} MSG_INBAND_RSP_REQUEST, MPI_POINTER PTR_MSG_INBAND_RSP_REQUEST,
154 MpiInbandRspRequest_t , MPI_POINTER pMpiInbandRspRequest_t;
155
156
157/* Inband Response reply message */
158
159typedef struct _MSG_INBAND_RSP_REPLY
160{
161 U16 Reserved1; /* 00h */
162 U8 MsgLength; /* 02h */
163 U8 Function; /* 03h */
164 U16 Reserved2; /* 04h */
165 U8 Reserved3; /* 06h */
166 U8 MsgFlags; /* 07h */
167 U32 MsgContext; /* 08h */
168 U16 Reserved4; /* 0Ch */
169 U16 IOCStatus; /* 0Eh */
170 U32 IOCLogInfo; /* 10h */
171} MSG_INBAND_RSP_REPLY, MPI_POINTER PTR_MSG_INBAND_RSP_REPLY,
172 MpiInbandRspReply_t, MPI_POINTER pMpiInbandRspReply_t;
173
174
175/****************************************************************************/
176/* Inband Abort Request */
177/****************************************************************************/
178
179typedef struct _MSG_INBAND_ABORT_REQUEST
180{
181 U8 Reserved1; /* 00h */
182 U8 AbortType; /* 01h */
183 U8 ChainOffset; /* 02h */
184 U8 Function; /* 03h */
185 U16 Reserved2; /* 04h */
186 U8 Reserved3; /* 06h */
187 U8 MsgFlags; /* 07h */
188 U32 MsgContext; /* 08h */
189 U32 Reserved4; /* 0Ch */
190 U32 ContextToAbort; /* 10h */
191} MSG_INBAND_ABORT_REQUEST, MPI_POINTER PTR_MSG_INBAND_ABORT_REQUEST,
192 MpiInbandAbortRequest_t , MPI_POINTER pMpiInbandAbortRequest_t;
193
194#define MPI_INBAND_ABORT_TYPE_ALL_BUFFERS (0x00)
195#define MPI_INBAND_ABORT_TYPE_EXACT_BUFFER (0x01)
196#define MPI_INBAND_ABORT_TYPE_SEND_REQUEST (0x02)
197#define MPI_INBAND_ABORT_TYPE_RESPONSE_REQUEST (0x03)
198
199
200/* Inband Abort reply message */
201
202typedef struct _MSG_INBAND_ABORT_REPLY
203{
204 U8 Reserved1; /* 00h */
205 U8 AbortType; /* 01h */
206 U8 MsgLength; /* 02h */
207 U8 Function; /* 03h */
208 U16 Reserved2; /* 04h */
209 U8 Reserved3; /* 06h */
210 U8 MsgFlags; /* 07h */
211 U32 MsgContext; /* 08h */
212 U16 Reserved4; /* 0Ch */
213 U16 IOCStatus; /* 0Eh */
214 U32 IOCLogInfo; /* 10h */
215} MSG_INBAND_ABORT_REPLY, MPI_POINTER PTR_MSG_INBAND_ABORT_REPLY,
216 MpiInbandAbortReply_t, MPI_POINTER pMpiInbandAbortReply_t;
217
218
219#endif
220
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
new file mode 100644
index 000000000000..b3c95fd7256f
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -0,0 +1,362 @@
1/*
2 * Copyright (c) 2000-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_init.h
6 * Title: MPI initiator mode messages and structures
7 * Creation Date: June 8, 2000
8 *
9 * mpi_init.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
17 * 05-24-00 00.10.02 Added SenseBufferLength to _MSG_SCSI_IO_REPLY.
18 * 06-06-00 01.00.01 Update version number for 1.0 release.
19 * 06-08-00 01.00.02 Added MPI_SCSI_RSP_INFO_ definitions.
20 * 11-02-00 01.01.01 Original release for post 1.0 work.
21 * 12-04-00 01.01.02 Added MPI_SCSIIO_CONTROL_NO_DISCONNECT.
22 * 02-20-01 01.01.03 Started using MPI_POINTER.
23 * 03-27-01 01.01.04 Added structure offset comments.
24 * 04-10-01 01.01.05 Added new MsgFlag for MSG_SCSI_TASK_MGMT.
25 * 08-08-01 01.02.01 Original release for v1.2 work.
26 * 08-29-01 01.02.02 Added MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET.
27 * Added MPI_SCSI_STATE_QUEUE_TAG_REJECTED for
28 * MSG_SCSI_IO_REPLY.
29 * 09-28-01 01.02.03 Added structures and defines for SCSI Enclosure
30 * Processor messages.
31 * 10-04-01 01.02.04 Added defines for SEP request Action field.
32 * 05-31-02 01.02.05 Added MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR define
33 * for SCSI IO requests.
34 * 11-15-02 01.02.06 Added special extended SCSI Status defines for FCP.
35 * 06-26-03 01.02.07 Added MPI_SCSI_STATUS_FCPEXT_UNASSIGNED define.
36 * --------------------------------------------------------------------------
37 */
38
39#ifndef MPI_INIT_H
40#define MPI_INIT_H
41
42
43/*****************************************************************************
44*
45* S C S I I n i t i a t o r M e s s a g e s
46*
47*****************************************************************************/
48
49/****************************************************************************/
50/* SCSI IO messages and associated structures */
51/****************************************************************************/
52
53typedef struct _MSG_SCSI_IO_REQUEST
54{
55 U8 TargetID; /* 00h */
56 U8 Bus; /* 01h */
57 U8 ChainOffset; /* 02h */
58 U8 Function; /* 03h */
59 U8 CDBLength; /* 04h */
60 U8 SenseBufferLength; /* 05h */
61 U8 Reserved; /* 06h */
62 U8 MsgFlags; /* 07h */
63 U32 MsgContext; /* 08h */
64 U8 LUN[8]; /* 0Ch */
65 U32 Control; /* 14h */
66 U8 CDB[16]; /* 18h */
67 U32 DataLength; /* 28h */
68 U32 SenseBufferLowAddr; /* 2Ch */
69 SGE_IO_UNION SGL; /* 30h */
70} MSG_SCSI_IO_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO_REQUEST,
71 SCSIIORequest_t, MPI_POINTER pSCSIIORequest_t;
72
73
74/* SCSI IO MsgFlags bits */
75
76#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH (0x01)
77#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 (0x00)
78#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 (0x01)
79#define MPI_SCSIIO_MSGFLGS_SENSE_LOCATION (0x02)
80#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_HOST (0x00)
81#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_IOC (0x02)
82#define MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR (0x04)
83#define MPI_SCSIIO_MSGFLGS_EEDP_TYPE_MASK (0xE0)
84#define MPI_SCSIIO_MSGFLGS_EEDP_NONE (0x00)
85#define MPI_SCSIIO_MSGFLGS_EEDP_RDPROTECT_T10 (0x20)
86#define MPI_SCSIIO_MSGFLGS_EEDP_VRPROTECT_T10 (0x40)
87#define MPI_SCSIIO_MSGFLGS_EEDP_WRPROTECT_T10 (0x60)
88#define MPI_SCSIIO_MSGFLGS_EEDP_520_READ_MODE1 (0x20)
89#define MPI_SCSIIO_MSGFLGS_EEDP_520_WRITE_MODE1 (0x40)
90#define MPI_SCSIIO_MSGFLGS_EEDP_8_9_READ_MODE1 (0x60)
91#define MPI_SCSIIO_MSGFLGS_EEDP_8_9_WRITE_MODE1 (0x80)
92
93
94/* SCSI IO LUN fields */
95
96#define MPI_SCSIIO_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
97#define MPI_SCSIIO_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
98#define MPI_SCSIIO_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
99#define MPI_SCSIIO_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
100#define MPI_SCSIIO_LUN_LEVEL_1_WORD (0xFF00)
101#define MPI_SCSIIO_LUN_LEVEL_1_DWORD (0x0000FF00)
102
103/* SCSI IO Control bits */
104
105#define MPI_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
106#define MPI_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
107#define MPI_SCSIIO_CONTROL_WRITE (0x01000000)
108#define MPI_SCSIIO_CONTROL_READ (0x02000000)
109
110#define MPI_SCSIIO_CONTROL_ADDCDBLEN_MASK (0x3C000000)
111#define MPI_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
112
113#define MPI_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
114#define MPI_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
115#define MPI_SCSIIO_CONTROL_HEADOFQ (0x00000100)
116#define MPI_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
117#define MPI_SCSIIO_CONTROL_ACAQ (0x00000400)
118#define MPI_SCSIIO_CONTROL_UNTAGGED (0x00000500)
119#define MPI_SCSIIO_CONTROL_NO_DISCONNECT (0x00000700)
120
121#define MPI_SCSIIO_CONTROL_TASKMANAGE_MASK (0x00FF0000)
122#define MPI_SCSIIO_CONTROL_OBSOLETE (0x00800000)
123#define MPI_SCSIIO_CONTROL_CLEAR_ACA_RSV (0x00400000)
124#define MPI_SCSIIO_CONTROL_TARGET_RESET (0x00200000)
125#define MPI_SCSIIO_CONTROL_LUN_RESET_RSV (0x00100000)
126#define MPI_SCSIIO_CONTROL_RESERVED (0x00080000)
127#define MPI_SCSIIO_CONTROL_CLR_TASK_SET_RSV (0x00040000)
128#define MPI_SCSIIO_CONTROL_ABORT_TASK_SET (0x00020000)
129#define MPI_SCSIIO_CONTROL_RESERVED2 (0x00010000)
130
131
132/* SCSI IO reply structure */
133typedef struct _MSG_SCSI_IO_REPLY
134{
135 U8 TargetID; /* 00h */
136 U8 Bus; /* 01h */
137 U8 MsgLength; /* 02h */
138 U8 Function; /* 03h */
139 U8 CDBLength; /* 04h */
140 U8 SenseBufferLength; /* 05h */
141 U8 Reserved; /* 06h */
142 U8 MsgFlags; /* 07h */
143 U32 MsgContext; /* 08h */
144 U8 SCSIStatus; /* 0Ch */
145 U8 SCSIState; /* 0Dh */
146 U16 IOCStatus; /* 0Eh */
147 U32 IOCLogInfo; /* 10h */
148 U32 TransferCount; /* 14h */
149 U32 SenseCount; /* 18h */
150 U32 ResponseInfo; /* 1Ch */
151} MSG_SCSI_IO_REPLY, MPI_POINTER PTR_MSG_SCSI_IO_REPLY,
152 SCSIIOReply_t, MPI_POINTER pSCSIIOReply_t;
153
154
155/* SCSI IO Reply SCSIStatus values (SAM-2 status codes) */
156
157#define MPI_SCSI_STATUS_SUCCESS (0x00)
158#define MPI_SCSI_STATUS_CHECK_CONDITION (0x02)
159#define MPI_SCSI_STATUS_CONDITION_MET (0x04)
160#define MPI_SCSI_STATUS_BUSY (0x08)
161#define MPI_SCSI_STATUS_INTERMEDIATE (0x10)
162#define MPI_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
163#define MPI_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
164#define MPI_SCSI_STATUS_COMMAND_TERMINATED (0x22)
165#define MPI_SCSI_STATUS_TASK_SET_FULL (0x28)
166#define MPI_SCSI_STATUS_ACA_ACTIVE (0x30)
167
168#define MPI_SCSI_STATUS_FCPEXT_DEVICE_LOGGED_OUT (0x80)
169#define MPI_SCSI_STATUS_FCPEXT_NO_LINK (0x81)
170#define MPI_SCSI_STATUS_FCPEXT_UNASSIGNED (0x82)
171
172
173/* SCSI IO Reply SCSIState values */
174
175#define MPI_SCSI_STATE_AUTOSENSE_VALID (0x01)
176#define MPI_SCSI_STATE_AUTOSENSE_FAILED (0x02)
177#define MPI_SCSI_STATE_NO_SCSI_STATUS (0x04)
178#define MPI_SCSI_STATE_TERMINATED (0x08)
179#define MPI_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
180#define MPI_SCSI_STATE_QUEUE_TAG_REJECTED (0x20)
181
182/* SCSI IO Reply ResponseInfo values */
183/* (FCP-1 RSP_CODE values and SPI-3 Packetized Failure codes) */
184
185#define MPI_SCSI_RSP_INFO_FUNCTION_COMPLETE (0x00000000)
186#define MPI_SCSI_RSP_INFO_FCP_BURST_LEN_ERROR (0x01000000)
187#define MPI_SCSI_RSP_INFO_CMND_FIELDS_INVALID (0x02000000)
188#define MPI_SCSI_RSP_INFO_FCP_DATA_RO_ERROR (0x03000000)
189#define MPI_SCSI_RSP_INFO_TASK_MGMT_UNSUPPORTED (0x04000000)
190#define MPI_SCSI_RSP_INFO_TASK_MGMT_FAILED (0x05000000)
191#define MPI_SCSI_RSP_INFO_SPI_LQ_INVALID_TYPE (0x06000000)
192
193
194/****************************************************************************/
195/* SCSI IO 32 Request message structure */
196/****************************************************************************/
197
198typedef struct _MSG_SCSI_IO32_REQUEST
199{
200 U8 TargetID; /* 00h */
201 U8 Bus; /* 01h */
202 U8 ChainOffset; /* 02h */
203 U8 Function; /* 03h */
204 U8 CDBLength; /* 04h */
205 U8 SenseBufferLength; /* 05h */
206 U8 Reserved; /* 06h */
207 U8 MsgFlags; /* 07h */
208 U32 MsgContext; /* 08h */
209 U8 LUN[8]; /* 0Ch */
210 U32 Control; /* 14h */
211 U8 CDB[32]; /* 18h */
212 U32 DataLength; /* 38h */
213 U32 SenseBufferLowAddr; /* 3Ch */
214 SGE_IO_UNION SGL; /* 40h */
215} MSG_SCSI_IO32_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO32_REQUEST,
216 SCSIIO32Request_t, MPI_POINTER pSCSIIO32Request_t;
217
218/* SCSI IO 32 uses the same defines as above for SCSI IO */
219
220
221/****************************************************************************/
222/* SCSI Task Management messages */
223/****************************************************************************/
224
225typedef struct _MSG_SCSI_TASK_MGMT
226{
227 U8 TargetID; /* 00h */
228 U8 Bus; /* 01h */
229 U8 ChainOffset; /* 02h */
230 U8 Function; /* 03h */
231 U8 Reserved; /* 04h */
232 U8 TaskType; /* 05h */
233 U8 Reserved1; /* 06h */
234 U8 MsgFlags; /* 07h */
235 U32 MsgContext; /* 08h */
236 U8 LUN[8]; /* 0Ch */
237 U32 Reserved2[7]; /* 14h */
238 U32 TaskMsgContext; /* 30h */
239} MSG_SCSI_TASK_MGMT, MPI_POINTER PTR_SCSI_TASK_MGMT,
240 SCSITaskMgmt_t, MPI_POINTER pSCSITaskMgmt_t;
241
242/* TaskType values */
243
244#define MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
245#define MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
246#define MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
247#define MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS (0x04)
248#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
249#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
250
251/* MsgFlags bits */
252#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00)
253#define MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION (0x02)
254#define MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION (0x04)
255
256/* SCSI Task Management Reply */
257typedef struct _MSG_SCSI_TASK_MGMT_REPLY
258{
259 U8 TargetID; /* 00h */
260 U8 Bus; /* 01h */
261 U8 MsgLength; /* 02h */
262 U8 Function; /* 03h */
263 U8 Reserved; /* 04h */
264 U8 TaskType; /* 05h */
265 U8 Reserved1; /* 06h */
266 U8 MsgFlags; /* 07h */
267 U32 MsgContext; /* 08h */
268 U8 Reserved2[2]; /* 0Ch */
269 U16 IOCStatus; /* 0Eh */
270 U32 IOCLogInfo; /* 10h */
271 U32 TerminationCount; /* 14h */
272} MSG_SCSI_TASK_MGMT_REPLY, MPI_POINTER PTR_MSG_SCSI_TASK_MGMT_REPLY,
273 SCSITaskMgmtReply_t, MPI_POINTER pSCSITaskMgmtReply_t;
274
275
276/****************************************************************************/
277/* SCSI Enclosure Processor messages */
278/****************************************************************************/
279
280typedef struct _MSG_SEP_REQUEST
281{
282 U8 TargetID; /* 00h */
283 U8 Bus; /* 01h */
284 U8 ChainOffset; /* 02h */
285 U8 Function; /* 03h */
286 U8 Action; /* 04h */
287 U8 Reserved1; /* 05h */
288 U8 Reserved2; /* 06h */
289 U8 MsgFlags; /* 07h */
290 U32 MsgContext; /* 08h */
291 U32 SlotStatus; /* 0Ch */
292} MSG_SEP_REQUEST, MPI_POINTER PTR_MSG_SEP_REQUEST,
293 SEPRequest_t, MPI_POINTER pSEPRequest_t;
294
295/* Action defines */
296#define MPI_SEP_REQ_ACTION_WRITE_STATUS (0x00)
297#define MPI_SEP_REQ_ACTION_READ_STATUS (0x01)
298
299/* SlotStatus bits for MSG_SEP_REQUEST */
300#define MPI_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
301#define MPI_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
302#define MPI_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
303#define MPI_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
304#define MPI_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
305#define MPI_SEP_REQ_SLOTSTATUS_PARITY_CHECK (0x00000020)
306#define MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
307#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
308#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
309#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
310#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
311#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
312#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000)
313#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000)
314#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)
315#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000)
316#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000)
317#define MPI_SEP_REQ_SLOTSTATUS_SWAP_RESET (0x80000000)
318
319
320typedef struct _MSG_SEP_REPLY
321{
322 U8 TargetID; /* 00h */
323 U8 Bus; /* 01h */
324 U8 MsgLength; /* 02h */
325 U8 Function; /* 03h */
326 U8 Action; /* 04h */
327 U8 Reserved1; /* 05h */
328 U8 Reserved2; /* 06h */
329 U8 MsgFlags; /* 07h */
330 U32 MsgContext; /* 08h */
331 U16 Reserved3; /* 0Ch */
332 U16 IOCStatus; /* 0Eh */
333 U32 IOCLogInfo; /* 10h */
334 U32 SlotStatus; /* 14h */
335} MSG_SEP_REPLY, MPI_POINTER PTR_MSG_SEP_REPLY,
336 SEPReply_t, MPI_POINTER pSEPReply_t;
337
338/* SlotStatus bits for MSG_SEP_REPLY */
339#define MPI_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
340#define MPI_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
341#define MPI_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
342#define MPI_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
343#define MPI_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
344#define MPI_SEP_REPLY_SLOTSTATUS_PARITY_CHECK (0x00000020)
345#define MPI_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
346#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
347#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
348#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
349#define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000)
350#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
351#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
352#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000)
353#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
354#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000)
355#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000)
356#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)
357#define MPI_SEP_REPLY_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000)
358#define MPI_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x10000000)
359#define MPI_SEP_REPLY_SLOTSTATUS_FAULT_SENSED (0x40000000)
360#define MPI_SEP_REPLY_SLOTSTATUS_SWAPPED (0x80000000)
361
362#endif
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
new file mode 100644
index 000000000000..82445d18b4d5
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -0,0 +1,770 @@
1/*
2 * Copyright (c) 2000-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_ioc.h
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: August 11, 2000
8 *
9 * mpi_ioc.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
17 * 05-24-00 00.10.02 Added _MSG_IOC_INIT_REPLY structure.
18 * 06-06-00 01.00.01 Added CurReplyFrameSize field to _MSG_IOC_FACTS_REPLY.
19 * 06-12-00 01.00.02 Added _MSG_PORT_ENABLE_REPLY structure.
20 * Added _MSG_EVENT_ACK_REPLY structure.
21 * Added _MSG_FW_DOWNLOAD_REPLY structure.
22 * Added _MSG_TOOLBOX_REPLY structure.
23 * 06-30-00 01.00.03 Added MaxLanBuckets to _PORT_FACT_REPLY structure.
24 * 07-27-00 01.00.04 Added _EVENT_DATA structure definitions for _SCSI,
25 * _LINK_STATUS, _LOOP_STATE and _LOGOUT.
26 * 08-11-00 01.00.05 Switched positions of MsgLength and Function fields in
27 * _MSG_EVENT_ACK_REPLY structure to match specification.
28 * 11-02-00 01.01.01 Original release for post 1.0 work.
29 * Added a value for Manufacturer to WhoInit.
30 * 12-04-00 01.01.02 Modified IOCFacts reply, added FWUpload messages, and
31 * removed toolbox message.
32 * 01-09-01 01.01.03 Added event enabled and disabled defines.
33 * Added structures for FwHeader and DataHeader.
34 * Added ImageType to FwUpload reply.
35 * 02-20-01 01.01.04 Started using MPI_POINTER.
36 * 02-27-01 01.01.05 Added event for RAID status change and its event data.
37 * Added IocNumber field to MSG_IOC_FACTS_REPLY.
38 * 03-27-01 01.01.06 Added defines for ProductId field of MPI_FW_HEADER.
39 * Added structure offset comments.
40 * 04-09-01 01.01.07 Added structure EVENT_DATA_EVENT_CHANGE.
41 * 08-08-01 01.02.01 Original release for v1.2 work.
42 * New format for FWVersion and ProductId in
43 * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
44 * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
45 * related structure and defines.
46 * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
47 * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
48 * Replaced a reserved field in MSG_IOC_FACTS_REPLY with
49 * IOCExceptions and changed DataImageSize to reserved.
50 * Added MPI_FW_DOWNLOAD_ITYPE_NVSTORE_DATA and
51 * MPI_FW_UPLOAD_ITYPE_NVDATA.
52 * 09-28-01 01.02.03 Modified Event Data for Integrated RAID.
53 * 11-01-01 01.02.04 Added defines for MPI_EXT_IMAGE_HEADER ImageType field.
54 * 03-14-02 01.02.05 Added HeaderVersion field to MSG_IOC_FACTS_REPLY.
55 * 05-31-02 01.02.06 Added define for
56 * MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID.
57 * Added AliasIndex to EVENT_DATA_LOGOUT structure.
58 * 04-01-03 01.02.07 Added defines for MPI_FW_HEADER_SIGNATURE_.
59 * 06-26-03 01.02.08 Added new values to the product family defines.
60 * --------------------------------------------------------------------------
61 */
62
63#ifndef MPI_IOC_H
64#define MPI_IOC_H
65
66
67/*****************************************************************************
68*
69* I O C M e s s a g e s
70*
71*****************************************************************************/
72
73/****************************************************************************/
74/* IOCInit message */
75/****************************************************************************/
76
77typedef struct _MSG_IOC_INIT
78{
79 U8 WhoInit; /* 00h */
80 U8 Reserved; /* 01h */
81 U8 ChainOffset; /* 02h */
82 U8 Function; /* 03h */
83 U8 Flags; /* 04h */
84 U8 MaxDevices; /* 05h */
85 U8 MaxBuses; /* 06h */
86 U8 MsgFlags; /* 07h */
87 U32 MsgContext; /* 08h */
88 U16 ReplyFrameSize; /* 0Ch */
89 U8 Reserved1[2]; /* 0Eh */
90 U32 HostMfaHighAddr; /* 10h */
91 U32 SenseBufferHighAddr; /* 14h */
92 U32 ReplyFifoHostSignalingAddr; /* 18h */
93} MSG_IOC_INIT, MPI_POINTER PTR_MSG_IOC_INIT,
94 IOCInit_t, MPI_POINTER pIOCInit_t;
95
96/* WhoInit values */
97#define MPI_WHOINIT_NO_ONE (0x00)
98#define MPI_WHOINIT_SYSTEM_BIOS (0x01)
99#define MPI_WHOINIT_ROM_BIOS (0x02)
100#define MPI_WHOINIT_PCI_PEER (0x03)
101#define MPI_WHOINIT_HOST_DRIVER (0x04)
102#define MPI_WHOINIT_MANUFACTURER (0x05)
103
104/* Flags values */
105#define MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE (0x01)
106#define MPI_IOCINIT_FLAGS_REPLY_FIFO_HOST_SIGNAL (0x02)
107
108typedef struct _MSG_IOC_INIT_REPLY
109{
110 U8 WhoInit; /* 00h */
111 U8 Reserved; /* 01h */
112 U8 MsgLength; /* 02h */
113 U8 Function; /* 03h */
114 U8 Flags; /* 04h */
115 U8 MaxDevices; /* 05h */
116 U8 MaxBuses; /* 06h */
117 U8 MsgFlags; /* 07h */
118 U32 MsgContext; /* 08h */
119 U16 Reserved2; /* 0Ch */
120 U16 IOCStatus; /* 0Eh */
121 U32 IOCLogInfo; /* 10h */
122} MSG_IOC_INIT_REPLY, MPI_POINTER PTR_MSG_IOC_INIT_REPLY,
123 IOCInitReply_t, MPI_POINTER pIOCInitReply_t;
124
125
126
127/****************************************************************************/
128/* IOC Facts message */
129/****************************************************************************/
130
131typedef struct _MSG_IOC_FACTS
132{
133 U8 Reserved[2]; /* 00h */
134 U8 ChainOffset; /* 01h */
135 U8 Function; /* 02h */
136 U8 Reserved1[3]; /* 03h */
137 U8 MsgFlags; /* 04h */
138 U32 MsgContext; /* 08h */
139} MSG_IOC_FACTS, MPI_POINTER PTR_IOC_FACTS,
140 IOCFacts_t, MPI_POINTER pIOCFacts_t;
141
142typedef struct _MPI_FW_VERSION_STRUCT
143{
144 U8 Dev; /* 00h */
145 U8 Unit; /* 01h */
146 U8 Minor; /* 02h */
147 U8 Major; /* 03h */
148} MPI_FW_VERSION_STRUCT;
149
150typedef union _MPI_FW_VERSION
151{
152 MPI_FW_VERSION_STRUCT Struct;
153 U32 Word;
154} MPI_FW_VERSION;
155
156/* IOC Facts Reply */
157typedef struct _MSG_IOC_FACTS_REPLY
158{
159 U16 MsgVersion; /* 00h */
160 U8 MsgLength; /* 02h */
161 U8 Function; /* 03h */
162 U16 HeaderVersion; /* 04h */
163 U8 IOCNumber; /* 06h */
164 U8 MsgFlags; /* 07h */
165 U32 MsgContext; /* 08h */
166 U16 IOCExceptions; /* 0Ch */
167 U16 IOCStatus; /* 0Eh */
168 U32 IOCLogInfo; /* 10h */
169 U8 MaxChainDepth; /* 14h */
170 U8 WhoInit; /* 15h */
171 U8 BlockSize; /* 16h */
172 U8 Flags; /* 17h */
173 U16 ReplyQueueDepth; /* 18h */
174 U16 RequestFrameSize; /* 1Ah */
175 U16 Reserved_0101_FWVersion; /* 1Ch */ /* obsolete 16-bit FWVersion */
176 U16 ProductID; /* 1Eh */
177 U32 CurrentHostMfaHighAddr; /* 20h */
178 U16 GlobalCredits; /* 24h */
179 U8 NumberOfPorts; /* 26h */
180 U8 EventState; /* 27h */
181 U32 CurrentSenseBufferHighAddr; /* 28h */
182 U16 CurReplyFrameSize; /* 2Ch */
183 U8 MaxDevices; /* 2Eh */
184 U8 MaxBuses; /* 2Fh */
185 U32 FWImageSize; /* 30h */
186 U32 IOCCapabilities; /* 34h */
187 MPI_FW_VERSION FWVersion; /* 38h */
188 U16 HighPriorityQueueDepth; /* 3Ch */
189 U16 Reserved2; /* 3Eh */
190} MSG_IOC_FACTS_REPLY, MPI_POINTER PTR_MSG_IOC_FACTS_REPLY,
191 IOCFactsReply_t, MPI_POINTER pIOCFactsReply_t;
192
193#define MPI_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
194#define MPI_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
195
196#define MPI_IOCFACTS_HEADERVERSION_UNIT_MASK (0xFF00)
197#define MPI_IOCFACTS_HEADERVERSION_DEV_MASK (0x00FF)
198
199#define MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
200#define MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
201#define MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
202#define MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL (0x0008)
203
204#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01)
205
206#define MPI_IOCFACTS_EVENTSTATE_DISABLED (0x00)
207#define MPI_IOCFACTS_EVENTSTATE_ENABLED (0x01)
208
209#define MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q (0x00000001)
210#define MPI_IOCFACTS_CAPABILITY_REPLY_HOST_SIGNAL (0x00000002)
211#define MPI_IOCFACTS_CAPABILITY_QUEUE_FULL_HANDLING (0x00000004)
212#define MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
213#define MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
214#define MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
215#define MPI_IOCFACTS_CAPABILITY_EEDP (0x00000040)
216
217
218
219/*****************************************************************************
220*
221* P o r t M e s s a g e s
222*
223*****************************************************************************/
224
225/****************************************************************************/
226/* Port Facts message and Reply */
227/****************************************************************************/
228
229typedef struct _MSG_PORT_FACTS
230{
231 U8 Reserved[2]; /* 00h */
232 U8 ChainOffset; /* 02h */
233 U8 Function; /* 03h */
234 U8 Reserved1[2]; /* 04h */
235 U8 PortNumber; /* 06h */
236 U8 MsgFlags; /* 07h */
237 U32 MsgContext; /* 08h */
238} MSG_PORT_FACTS, MPI_POINTER PTR_MSG_PORT_FACTS,
239 PortFacts_t, MPI_POINTER pPortFacts_t;
240
241typedef struct _MSG_PORT_FACTS_REPLY
242{
243 U16 Reserved; /* 00h */
244 U8 MsgLength; /* 02h */
245 U8 Function; /* 03h */
246 U16 Reserved1; /* 04h */
247 U8 PortNumber; /* 06h */
248 U8 MsgFlags; /* 07h */
249 U32 MsgContext; /* 08h */
250 U16 Reserved2; /* 0Ch */
251 U16 IOCStatus; /* 0Eh */
252 U32 IOCLogInfo; /* 10h */
253 U8 Reserved3; /* 14h */
254 U8 PortType; /* 15h */
255 U16 MaxDevices; /* 16h */
256 U16 PortSCSIID; /* 18h */
257 U16 ProtocolFlags; /* 1Ah */
258 U16 MaxPostedCmdBuffers; /* 1Ch */
259 U16 MaxPersistentIDs; /* 1Eh */
260 U16 MaxLanBuckets; /* 20h */
261 U16 Reserved4; /* 22h */
262 U32 Reserved5; /* 24h */
263} MSG_PORT_FACTS_REPLY, MPI_POINTER PTR_MSG_PORT_FACTS_REPLY,
264 PortFactsReply_t, MPI_POINTER pPortFactsReply_t;
265
266
267/* PortTypes values */
268
269#define MPI_PORTFACTS_PORTTYPE_INACTIVE (0x00)
270#define MPI_PORTFACTS_PORTTYPE_SCSI (0x01)
271#define MPI_PORTFACTS_PORTTYPE_FC (0x10)
272#define MPI_PORTFACTS_PORTTYPE_ISCSI (0x20)
273#define MPI_PORTFACTS_PORTTYPE_SAS (0x30)
274
275/* ProtocolFlags values */
276
277#define MPI_PORTFACTS_PROTOCOL_LOGBUSADDR (0x01)
278#define MPI_PORTFACTS_PROTOCOL_LAN (0x02)
279#define MPI_PORTFACTS_PROTOCOL_TARGET (0x04)
280#define MPI_PORTFACTS_PROTOCOL_INITIATOR (0x08)
281
282
283/****************************************************************************/
284/* Port Enable Message */
285/****************************************************************************/
286
287typedef struct _MSG_PORT_ENABLE
288{
289 U8 Reserved[2]; /* 00h */
290 U8 ChainOffset; /* 02h */
291 U8 Function; /* 03h */
292 U8 Reserved1[2]; /* 04h */
293 U8 PortNumber; /* 06h */
294 U8 MsgFlags; /* 07h */
295 U32 MsgContext; /* 08h */
296} MSG_PORT_ENABLE, MPI_POINTER PTR_MSG_PORT_ENABLE,
297 PortEnable_t, MPI_POINTER pPortEnable_t;
298
299typedef struct _MSG_PORT_ENABLE_REPLY
300{
301 U8 Reserved[2]; /* 00h */
302 U8 MsgLength; /* 02h */
303 U8 Function; /* 03h */
304 U8 Reserved1[2]; /* 04h */
305 U8 PortNumber; /* 05h */
306 U8 MsgFlags; /* 07h */
307 U32 MsgContext; /* 08h */
308 U16 Reserved2; /* 0Ch */
309 U16 IOCStatus; /* 0Eh */
310 U32 IOCLogInfo; /* 10h */
311} MSG_PORT_ENABLE_REPLY, MPI_POINTER PTR_MSG_PORT_ENABLE_REPLY,
312 PortEnableReply_t, MPI_POINTER pPortEnableReply_t;
313
314
315/*****************************************************************************
316*
317* E v e n t M e s s a g e s
318*
319*****************************************************************************/
320
321/****************************************************************************/
322/* Event Notification messages */
323/****************************************************************************/
324
325typedef struct _MSG_EVENT_NOTIFY
326{
327 U8 Switch; /* 00h */
328 U8 Reserved; /* 01h */
329 U8 ChainOffset; /* 02h */
330 U8 Function; /* 03h */
331 U8 Reserved1[3]; /* 04h */
332 U8 MsgFlags; /* 07h */
333 U32 MsgContext; /* 08h */
334} MSG_EVENT_NOTIFY, MPI_POINTER PTR_MSG_EVENT_NOTIFY,
335 EventNotification_t, MPI_POINTER pEventNotification_t;
336
337/* Event Notification Reply */
338
339typedef struct _MSG_EVENT_NOTIFY_REPLY
340{
341 U16 EventDataLength; /* 00h */
342 U8 MsgLength; /* 02h */
343 U8 Function; /* 03h */
344 U8 Reserved1[2]; /* 04h */
345 U8 AckRequired; /* 06h */
346 U8 MsgFlags; /* 07h */
347 U32 MsgContext; /* 08h */
348 U8 Reserved2[2]; /* 0Ch */
349 U16 IOCStatus; /* 0Eh */
350 U32 IOCLogInfo; /* 10h */
351 U32 Event; /* 14h */
352 U32 EventContext; /* 18h */
353 U32 Data[1]; /* 1Ch */
354} MSG_EVENT_NOTIFY_REPLY, MPI_POINTER PTR_MSG_EVENT_NOTIFY_REPLY,
355 EventNotificationReply_t, MPI_POINTER pEventNotificationReply_t;
356
357/* Event Acknowledge */
358
359typedef struct _MSG_EVENT_ACK
360{
361 U8 Reserved[2]; /* 00h */
362 U8 ChainOffset; /* 02h */
363 U8 Function; /* 03h */
364 U8 Reserved1[3]; /* 04h */
365 U8 MsgFlags; /* 07h */
366 U32 MsgContext; /* 08h */
367 U32 Event; /* 0Ch */
368 U32 EventContext; /* 10h */
369} MSG_EVENT_ACK, MPI_POINTER PTR_MSG_EVENT_ACK,
370 EventAck_t, MPI_POINTER pEventAck_t;
371
372typedef struct _MSG_EVENT_ACK_REPLY
373{
374 U8 Reserved[2]; /* 00h */
375 U8 MsgLength; /* 02h */
376 U8 Function; /* 03h */
377 U8 Reserved1[3]; /* 04h */
378 U8 MsgFlags; /* 07h */
379 U32 MsgContext; /* 08h */
380 U16 Reserved2; /* 0Ch */
381 U16 IOCStatus; /* 0Eh */
382 U32 IOCLogInfo; /* 10h */
383} MSG_EVENT_ACK_REPLY, MPI_POINTER PTR_MSG_EVENT_ACK_REPLY,
384 EventAckReply_t, MPI_POINTER pEventAckReply_t;
385
386/* Switch */
387
388#define MPI_EVENT_NOTIFICATION_SWITCH_OFF (0x00)
389#define MPI_EVENT_NOTIFICATION_SWITCH_ON (0x01)
390
391/* Event */
392
393#define MPI_EVENT_NONE (0x00000000)
394#define MPI_EVENT_LOG_DATA (0x00000001)
395#define MPI_EVENT_STATE_CHANGE (0x00000002)
396#define MPI_EVENT_UNIT_ATTENTION (0x00000003)
397#define MPI_EVENT_IOC_BUS_RESET (0x00000004)
398#define MPI_EVENT_EXT_BUS_RESET (0x00000005)
399#define MPI_EVENT_RESCAN (0x00000006)
400#define MPI_EVENT_LINK_STATUS_CHANGE (0x00000007)
401#define MPI_EVENT_LOOP_STATE_CHANGE (0x00000008)
402#define MPI_EVENT_LOGOUT (0x00000009)
403#define MPI_EVENT_EVENT_CHANGE (0x0000000A)
404#define MPI_EVENT_INTEGRATED_RAID (0x0000000B)
405#define MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE (0x0000000C)
406#define MPI_EVENT_ON_BUS_TIMER_EXPIRED (0x0000000D)
407#define MPI_EVENT_QUEUE_FULL (0x0000000E)
408#define MPI_EVENT_SAS_DEVICE_STATUS_CHANGE (0x0000000F)
409#define MPI_EVENT_SAS_SES (0x00000010)
410#define MPI_EVENT_PERSISTENT_TABLE_FULL (0x00000011)
411
412/* AckRequired field values */
413
414#define MPI_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
415#define MPI_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
416
417/* EventChange Event data */
418
419typedef struct _EVENT_DATA_EVENT_CHANGE
420{
421 U8 EventState; /* 00h */
422 U8 Reserved; /* 01h */
423 U16 Reserved1; /* 02h */
424} EVENT_DATA_EVENT_CHANGE, MPI_POINTER PTR_EVENT_DATA_EVENT_CHANGE,
425 EventDataEventChange_t, MPI_POINTER pEventDataEventChange_t;
426
427/* SCSI Event data for Port, Bus and Device forms */
428
429typedef struct _EVENT_DATA_SCSI
430{
431 U8 TargetID; /* 00h */
432 U8 BusPort; /* 01h */
433 U16 Reserved; /* 02h */
434} EVENT_DATA_SCSI, MPI_POINTER PTR_EVENT_DATA_SCSI,
435 EventDataScsi_t, MPI_POINTER pEventDataScsi_t;
436
437/* SCSI Device Status Change Event data */
438
439typedef struct _EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE
440{
441 U8 TargetID; /* 00h */
442 U8 Bus; /* 01h */
443 U8 ReasonCode; /* 02h */
444 U8 LUN; /* 03h */
445 U8 ASC; /* 04h */
446 U8 ASCQ; /* 05h */
447 U16 Reserved; /* 06h */
448} EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE,
449 MPI_POINTER PTR_EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE,
450 MpiEventDataScsiDeviceStatusChange_t,
451 MPI_POINTER pMpiEventDataScsiDeviceStatusChange_t;
452
453/* MPI SCSI Device Status Change Event data ReasonCode values */
454#define MPI_EVENT_SCSI_DEV_STAT_RC_ADDED (0x03)
455#define MPI_EVENT_SCSI_DEV_STAT_RC_NOT_RESPONDING (0x04)
456#define MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA (0x05)
457
458/* SAS Device Status Change Event data */
459
460typedef struct _EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
461{
462 U8 TargetID; /* 00h */
463 U8 Bus; /* 01h */
464 U8 ReasonCode; /* 02h */
465 U8 Reserved; /* 03h */
466 U8 ASC; /* 04h */
467 U8 ASCQ; /* 05h */
468 U16 DevHandle; /* 06h */
469 U32 DeviceInfo; /* 08h */
470} EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
471 MPI_POINTER PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
472 MpiEventDataSasDeviceStatusChange_t,
473 MPI_POINTER pMpiEventDataSasDeviceStatusChange_t;
474
475/* MPI SAS Device Status Change Event data ReasonCode values */
476#define MPI_EVENT_SAS_DEV_STAT_RC_ADDED (0x03)
477#define MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING (0x04)
478#define MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
479#define MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED (0x06)
480
481/* SCSI Event data for Queue Full event */
482
483typedef struct _EVENT_DATA_QUEUE_FULL
484{
485 U8 TargetID; /* 00h */
486 U8 Bus; /* 01h */
487 U16 CurrentDepth; /* 02h */
488} EVENT_DATA_QUEUE_FULL, MPI_POINTER PTR_EVENT_DATA_QUEUE_FULL,
489 EventDataQueueFull_t, MPI_POINTER pEventDataQueueFull_t;
490
491/* MPI Link Status Change Event data */
492
493typedef struct _EVENT_DATA_LINK_STATUS
494{
495 U8 State; /* 00h */
496 U8 Reserved; /* 01h */
497 U16 Reserved1; /* 02h */
498 U8 Reserved2; /* 04h */
499 U8 Port; /* 05h */
500 U16 Reserved3; /* 06h */
501} EVENT_DATA_LINK_STATUS, MPI_POINTER PTR_EVENT_DATA_LINK_STATUS,
502 EventDataLinkStatus_t, MPI_POINTER pEventDataLinkStatus_t;
503
504#define MPI_EVENT_LINK_STATUS_FAILURE (0x00000000)
505#define MPI_EVENT_LINK_STATUS_ACTIVE (0x00000001)
506
507/* MPI Loop State Change Event data */
508
509typedef struct _EVENT_DATA_LOOP_STATE
510{
511 U8 Character4; /* 00h */
512 U8 Character3; /* 01h */
513 U8 Type; /* 02h */
514 U8 Reserved; /* 03h */
515 U8 Reserved1; /* 04h */
516 U8 Port; /* 05h */
517 U16 Reserved2; /* 06h */
518} EVENT_DATA_LOOP_STATE, MPI_POINTER PTR_EVENT_DATA_LOOP_STATE,
519 EventDataLoopState_t, MPI_POINTER pEventDataLoopState_t;
520
521#define MPI_EVENT_LOOP_STATE_CHANGE_LIP (0x0001)
522#define MPI_EVENT_LOOP_STATE_CHANGE_LPE (0x0002)
523#define MPI_EVENT_LOOP_STATE_CHANGE_LPB (0x0003)
524
525/* MPI LOGOUT Event data */
526
527typedef struct _EVENT_DATA_LOGOUT
528{
529 U32 NPortID; /* 00h */
530 U8 AliasIndex; /* 04h */
531 U8 Port; /* 05h */
532 U16 Reserved1; /* 06h */
533} EVENT_DATA_LOGOUT, MPI_POINTER PTR_EVENT_DATA_LOGOUT,
534 EventDataLogout_t, MPI_POINTER pEventDataLogout_t;
535
536#define MPI_EVENT_LOGOUT_ALL_ALIASES (0xFF)
537
538
539/* MPI Integrated RAID Event data */
540
541typedef struct _EVENT_DATA_RAID
542{
543 U8 VolumeID; /* 00h */
544 U8 VolumeBus; /* 01h */
545 U8 ReasonCode; /* 02h */
546 U8 PhysDiskNum; /* 03h */
547 U8 ASC; /* 04h */
548 U8 ASCQ; /* 05h */
549 U16 Reserved; /* 06h */
550 U32 SettingsStatus; /* 08h */
551} EVENT_DATA_RAID, MPI_POINTER PTR_EVENT_DATA_RAID,
552 MpiEventDataRaid_t, MPI_POINTER pMpiEventDataRaid_t;
553
554/* MPI Integrated RAID Event data ReasonCode values */
555#define MPI_EVENT_RAID_RC_VOLUME_CREATED (0x00)
556#define MPI_EVENT_RAID_RC_VOLUME_DELETED (0x01)
557#define MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED (0x02)
558#define MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED (0x03)
559#define MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED (0x04)
560#define MPI_EVENT_RAID_RC_PHYSDISK_CREATED (0x05)
561#define MPI_EVENT_RAID_RC_PHYSDISK_DELETED (0x06)
562#define MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED (0x07)
563#define MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED (0x08)
564#define MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED (0x09)
565#define MPI_EVENT_RAID_RC_SMART_DATA (0x0A)
566#define MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED (0x0B)
567
568
569/*****************************************************************************
570*
571* F i r m w a r e L o a d M e s s a g e s
572*
573*****************************************************************************/
574
575/****************************************************************************/
576/* Firmware Download message and associated structures */
577/****************************************************************************/
578
579typedef struct _MSG_FW_DOWNLOAD
580{
581 U8 ImageType; /* 00h */
582 U8 Reserved; /* 01h */
583 U8 ChainOffset; /* 02h */
584 U8 Function; /* 03h */
585 U8 Reserved1[3]; /* 04h */
586 U8 MsgFlags; /* 07h */
587 U32 MsgContext; /* 08h */
588 SGE_MPI_UNION SGL; /* 0Ch */
589} MSG_FW_DOWNLOAD, MPI_POINTER PTR_MSG_FW_DOWNLOAD,
590 FWDownload_t, MPI_POINTER pFWDownload_t;
591
592#define MPI_FW_DOWNLOAD_ITYPE_RESERVED (0x00)
593#define MPI_FW_DOWNLOAD_ITYPE_FW (0x01)
594#define MPI_FW_DOWNLOAD_ITYPE_BIOS (0x02)
595#define MPI_FW_DOWNLOAD_ITYPE_NVDATA (0x03)
596#define MPI_FW_DOWNLOAD_ITYPE_BOOTLOADER (0x04)
597
598
599typedef struct _FWDownloadTCSGE
600{
601 U8 Reserved; /* 00h */
602 U8 ContextSize; /* 01h */
603 U8 DetailsLength; /* 02h */
604 U8 Flags; /* 03h */
605 U32 Reserved_0100_Checksum; /* 04h */ /* obsolete Checksum */
606 U32 ImageOffset; /* 08h */
607 U32 ImageSize; /* 0Ch */
608} FW_DOWNLOAD_TCSGE, MPI_POINTER PTR_FW_DOWNLOAD_TCSGE,
609 FWDownloadTCSGE_t, MPI_POINTER pFWDownloadTCSGE_t;
610
611/* Firmware Download reply */
612typedef struct _MSG_FW_DOWNLOAD_REPLY
613{
614 U8 ImageType; /* 00h */
615 U8 Reserved; /* 01h */
616 U8 MsgLength; /* 02h */
617 U8 Function; /* 03h */
618 U8 Reserved1[3]; /* 04h */
619 U8 MsgFlags; /* 07h */
620 U32 MsgContext; /* 08h */
621 U16 Reserved2; /* 0Ch */
622 U16 IOCStatus; /* 0Eh */
623 U32 IOCLogInfo; /* 10h */
624} MSG_FW_DOWNLOAD_REPLY, MPI_POINTER PTR_MSG_FW_DOWNLOAD_REPLY,
625 FWDownloadReply_t, MPI_POINTER pFWDownloadReply_t;
626
627
628/****************************************************************************/
629/* Firmware Upload message and associated structures */
630/****************************************************************************/
631
632typedef struct _MSG_FW_UPLOAD
633{
634 U8 ImageType; /* 00h */
635 U8 Reserved; /* 01h */
636 U8 ChainOffset; /* 02h */
637 U8 Function; /* 03h */
638 U8 Reserved1[3]; /* 04h */
639 U8 MsgFlags; /* 07h */
640 U32 MsgContext; /* 08h */
641 SGE_MPI_UNION SGL; /* 0Ch */
642} MSG_FW_UPLOAD, MPI_POINTER PTR_MSG_FW_UPLOAD,
643 FWUpload_t, MPI_POINTER pFWUpload_t;
644
645#define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM (0x00)
646#define MPI_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
647#define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
648#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03)
649#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04)
650
651typedef struct _FWUploadTCSGE
652{
653 U8 Reserved; /* 00h */
654 U8 ContextSize; /* 01h */
655 U8 DetailsLength; /* 02h */
656 U8 Flags; /* 03h */
657 U32 Reserved1; /* 04h */
658 U32 ImageOffset; /* 08h */
659 U32 ImageSize; /* 0Ch */
660} FW_UPLOAD_TCSGE, MPI_POINTER PTR_FW_UPLOAD_TCSGE,
661 FWUploadTCSGE_t, MPI_POINTER pFWUploadTCSGE_t;
662
663/* Firmware Upload reply */
664typedef struct _MSG_FW_UPLOAD_REPLY
665{
666 U8 ImageType; /* 00h */
667 U8 Reserved; /* 01h */
668 U8 MsgLength; /* 02h */
669 U8 Function; /* 03h */
670 U8 Reserved1[3]; /* 04h */
671 U8 MsgFlags; /* 07h */
672 U32 MsgContext; /* 08h */
673 U16 Reserved2; /* 0Ch */
674 U16 IOCStatus; /* 0Eh */
675 U32 IOCLogInfo; /* 10h */
676 U32 ActualImageSize; /* 14h */
677} MSG_FW_UPLOAD_REPLY, MPI_POINTER PTR_MSG_FW_UPLOAD_REPLY,
678 FWUploadReply_t, MPI_POINTER pFWUploadReply_t;
679
680
681typedef struct _MPI_FW_HEADER
682{
683 U32 ArmBranchInstruction0; /* 00h */
684 U32 Signature0; /* 04h */
685 U32 Signature1; /* 08h */
686 U32 Signature2; /* 0Ch */
687 U32 ArmBranchInstruction1; /* 10h */
688 U32 ArmBranchInstruction2; /* 14h */
689 U32 Reserved; /* 18h */
690 U32 Checksum; /* 1Ch */
691 U16 VendorId; /* 20h */
692 U16 ProductId; /* 22h */
693 MPI_FW_VERSION FWVersion; /* 24h */
694 U32 SeqCodeVersion; /* 28h */
695 U32 ImageSize; /* 2Ch */
696 U32 NextImageHeaderOffset; /* 30h */
697 U32 LoadStartAddress; /* 34h */
698 U32 IopResetVectorValue; /* 38h */
699 U32 IopResetRegAddr; /* 3Ch */
700 U32 VersionNameWhat; /* 40h */
701 U8 VersionName[32]; /* 44h */
702 U32 VendorNameWhat; /* 64h */
703 U8 VendorName[32]; /* 68h */
704} MPI_FW_HEADER, MPI_POINTER PTR_MPI_FW_HEADER,
705 MpiFwHeader_t, MPI_POINTER pMpiFwHeader_t;
706
707#define MPI_FW_HEADER_WHAT_SIGNATURE (0x29232840)
708
709/* defines for using the ProductId field */
710#define MPI_FW_HEADER_PID_TYPE_MASK (0xF000)
711#define MPI_FW_HEADER_PID_TYPE_SCSI (0x0000)
712#define MPI_FW_HEADER_PID_TYPE_FC (0x1000)
713#define MPI_FW_HEADER_PID_TYPE_SAS (0x2000)
714
715#define MPI_FW_HEADER_SIGNATURE_0 (0x5AEAA55A)
716#define MPI_FW_HEADER_SIGNATURE_1 (0xA55AEAA5)
717#define MPI_FW_HEADER_SIGNATURE_2 (0x5AA55AEA)
718
719#define MPI_FW_HEADER_PID_PROD_MASK (0x0F00)
720#define MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI (0x0100)
721#define MPI_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
722#define MPI_FW_HEADER_PID_PROD_TARGET_SCSI (0x0300)
723#define MPI_FW_HEADER_PID_PROD_IM_SCSI (0x0400)
724#define MPI_FW_HEADER_PID_PROD_IS_SCSI (0x0500)
725#define MPI_FW_HEADER_PID_PROD_CTX_SCSI (0x0600)
726
727#define MPI_FW_HEADER_PID_FAMILY_MASK (0x00FF)
728/* SCSI */
729#define MPI_FW_HEADER_PID_FAMILY_1030A0_SCSI (0x0001)
730#define MPI_FW_HEADER_PID_FAMILY_1030B0_SCSI (0x0002)
731#define MPI_FW_HEADER_PID_FAMILY_1030B1_SCSI (0x0003)
732#define MPI_FW_HEADER_PID_FAMILY_1030C0_SCSI (0x0004)
733#define MPI_FW_HEADER_PID_FAMILY_1020A0_SCSI (0x0005)
734#define MPI_FW_HEADER_PID_FAMILY_1020B0_SCSI (0x0006)
735#define MPI_FW_HEADER_PID_FAMILY_1020B1_SCSI (0x0007)
736#define MPI_FW_HEADER_PID_FAMILY_1020C0_SCSI (0x0008)
737#define MPI_FW_HEADER_PID_FAMILY_1035A0_SCSI (0x0009)
738#define MPI_FW_HEADER_PID_FAMILY_1035B0_SCSI (0x000A)
739#define MPI_FW_HEADER_PID_FAMILY_1030TA0_SCSI (0x000B)
740#define MPI_FW_HEADER_PID_FAMILY_1020TA0_SCSI (0x000C)
741/* Fibre Channel */
742#define MPI_FW_HEADER_PID_FAMILY_909_FC (0x0000)
743#define MPI_FW_HEADER_PID_FAMILY_919_FC (0x0001)
744#define MPI_FW_HEADER_PID_FAMILY_919X_FC (0x0002)
745#define MPI_FW_HEADER_PID_FAMILY_919XL_FC (0x0003)
746#define MPI_FW_HEADER_PID_FAMILY_949_FC (0x0004)
747#define MPI_FW_HEADER_PID_FAMILY_959_FC (0x0005)
748/* SAS */
749#define MPI_FW_HEADER_PID_FAMILY_1064_SAS (0x0001)
750
751typedef struct _MPI_EXT_IMAGE_HEADER
752{
753 U8 ImageType; /* 00h */
754 U8 Reserved; /* 01h */
755 U16 Reserved1; /* 02h */
756 U32 Checksum; /* 04h */
757 U32 ImageSize; /* 08h */
758 U32 NextImageHeaderOffset; /* 0Ch */
759 U32 LoadStartAddress; /* 10h */
760 U32 Reserved2; /* 14h */
761} MPI_EXT_IMAGE_HEADER, MPI_POINTER PTR_MPI_EXT_IMAGE_HEADER,
762 MpiExtImageHeader_t, MPI_POINTER pMpiExtImageHeader_t;
763
764/* defines for the ImageType field */
765#define MPI_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
766#define MPI_EXT_IMAGE_TYPE_FW (0x01)
767#define MPI_EXT_IMAGE_TYPE_NVDATA (0x03)
768#define MPI_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
769
770#endif
diff --git a/drivers/message/fusion/lsi/mpi_lan.h b/drivers/message/fusion/lsi/mpi_lan.h
new file mode 100644
index 000000000000..3ced12784ee8
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_lan.h
@@ -0,0 +1,212 @@
1/*
2 * Copyright (c) 2000-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_lan.h
6 * Title: MPI LAN messages and structures
7 * Creation Date: June 30, 2000
8 *
9 * mpi_lan.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
17 * 05-24-00 00.10.02 Added LANStatus field to _MSG_LAN_SEND_REPLY.
18 * Added LANStatus field to _MSG_LAN_RECEIVE_POST_REPLY.
19 * Moved ListCount field in _MSG_LAN_RECEIVE_POST_REPLY.
20 * 06-06-00 01.00.01 Update version number for 1.0 release.
21 * 06-12-00 01.00.02 Added MPI_ to BUCKETSTATUS_ definitions.
22 * 06-22-00 01.00.03 Major changes to match new LAN definition in 1.0 spec.
23 * 06-30-00 01.00.04 Added Context Reply definitions per revised proposal.
24 * Changed transaction context usage to bucket/buffer.
25 * 07-05-00 01.00.05 Removed LAN_RECEIVE_POST_BUCKET_CONTEXT_MASK definition
26 * to lan private header file
27 * 11-02-00 01.01.01 Original release for post 1.0 work
28 * 02-20-01 01.01.02 Started using MPI_POINTER.
29 * 03-27-01 01.01.03 Added structure offset comments.
30 * 08-08-01 01.02.01 Original release for v1.2 work.
31 * --------------------------------------------------------------------------
32 */
33
34#ifndef MPI_LAN_H
35#define MPI_LAN_H
36
37
38/******************************************************************************
39*
40* L A N M e s s a g e s
41*
42*******************************************************************************/
43
44/* LANSend messages */
45
46typedef struct _MSG_LAN_SEND_REQUEST
47{
48 U16 Reserved; /* 00h */
49 U8 ChainOffset; /* 02h */
50 U8 Function; /* 03h */
51 U16 Reserved2; /* 04h */
52 U8 PortNumber; /* 06h */
53 U8 MsgFlags; /* 07h */
54 U32 MsgContext; /* 08h */
55 SGE_MPI_UNION SG_List[1]; /* 0Ch */
56} MSG_LAN_SEND_REQUEST, MPI_POINTER PTR_MSG_LAN_SEND_REQUEST,
57 LANSendRequest_t, MPI_POINTER pLANSendRequest_t;
58
59
60typedef struct _MSG_LAN_SEND_REPLY
61{
62 U16 Reserved; /* 00h */
63 U8 MsgLength; /* 02h */
64 U8 Function; /* 03h */
65 U8 Reserved2; /* 04h */
66 U8 NumberOfContexts; /* 05h */
67 U8 PortNumber; /* 06h */
68 U8 MsgFlags; /* 07h */
69 U32 MsgContext; /* 08h */
70 U16 Reserved3; /* 0Ch */
71 U16 IOCStatus; /* 0Eh */
72 U32 IOCLogInfo; /* 10h */
73 U32 BufferContext; /* 14h */
74} MSG_LAN_SEND_REPLY, MPI_POINTER PTR_MSG_LAN_SEND_REPLY,
75 LANSendReply_t, MPI_POINTER pLANSendReply_t;
76
77
78/* LANReceivePost */
79
80typedef struct _MSG_LAN_RECEIVE_POST_REQUEST
81{
82 U16 Reserved; /* 00h */
83 U8 ChainOffset; /* 02h */
84 U8 Function; /* 03h */
85 U16 Reserved2; /* 04h */
86 U8 PortNumber; /* 06h */
87 U8 MsgFlags; /* 07h */
88 U32 MsgContext; /* 08h */
89 U32 BucketCount; /* 0Ch */
90 SGE_MPI_UNION SG_List[1]; /* 10h */
91} MSG_LAN_RECEIVE_POST_REQUEST, MPI_POINTER PTR_MSG_LAN_RECEIVE_POST_REQUEST,
92 LANReceivePostRequest_t, MPI_POINTER pLANReceivePostRequest_t;
93
94
95typedef struct _MSG_LAN_RECEIVE_POST_REPLY
96{
97 U16 Reserved; /* 00h */
98 U8 MsgLength; /* 02h */
99 U8 Function; /* 03h */
100 U8 Reserved2; /* 04h */
101 U8 NumberOfContexts; /* 05h */
102 U8 PortNumber; /* 06h */
103 U8 MsgFlags; /* 07h */
104 U32 MsgContext; /* 08h */
105 U16 Reserved3; /* 0Ch */
106 U16 IOCStatus; /* 0Eh */
107 U32 IOCLogInfo; /* 10h */
108 U32 BucketsRemaining; /* 14h */
109 U32 PacketOffset; /* 18h */
110 U32 PacketLength; /* 1Ch */
111 U32 BucketContext[1]; /* 20h */
112} MSG_LAN_RECEIVE_POST_REPLY, MPI_POINTER PTR_MSG_LAN_RECEIVE_POST_REPLY,
113 LANReceivePostReply_t, MPI_POINTER pLANReceivePostReply_t;
114
115
116/* LANReset */
117
118typedef struct _MSG_LAN_RESET_REQUEST
119{
120 U16 Reserved; /* 00h */
121 U8 ChainOffset; /* 02h */
122 U8 Function; /* 03h */
123 U16 Reserved2; /* 04h */
124 U8 PortNumber; /* 05h */
125 U8 MsgFlags; /* 07h */
126 U32 MsgContext; /* 08h */
127} MSG_LAN_RESET_REQUEST, MPI_POINTER PTR_MSG_LAN_RESET_REQUEST,
128 LANResetRequest_t, MPI_POINTER pLANResetRequest_t;
129
130
131typedef struct _MSG_LAN_RESET_REPLY
132{
133 U16 Reserved; /* 00h */
134 U8 MsgLength; /* 02h */
135 U8 Function; /* 03h */
136 U16 Reserved2; /* 04h */
137 U8 PortNumber; /* 06h */
138 U8 MsgFlags; /* 07h */
139 U32 MsgContext; /* 08h */
140 U16 Reserved3; /* 0Ch */
141 U16 IOCStatus; /* 0Eh */
142 U32 IOCLogInfo; /* 10h */
143} MSG_LAN_RESET_REPLY, MPI_POINTER PTR_MSG_LAN_RESET_REPLY,
144 LANResetReply_t, MPI_POINTER pLANResetReply_t;
145
146
147/****************************************************************************/
148/* LAN Context Reply defines and macros */
149/****************************************************************************/
150
151#define LAN_REPLY_PACKET_LENGTH_MASK (0x0000FFFF)
152#define LAN_REPLY_PACKET_LENGTH_SHIFT (0)
153#define LAN_REPLY_BUCKET_CONTEXT_MASK (0x07FF0000)
154#define LAN_REPLY_BUCKET_CONTEXT_SHIFT (16)
155#define LAN_REPLY_BUFFER_CONTEXT_MASK (0x07FFFFFF)
156#define LAN_REPLY_BUFFER_CONTEXT_SHIFT (0)
157#define LAN_REPLY_FORM_MASK (0x18000000)
158#define LAN_REPLY_FORM_RECEIVE_SINGLE (0x00)
159#define LAN_REPLY_FORM_RECEIVE_MULTIPLE (0x01)
160#define LAN_REPLY_FORM_SEND_SINGLE (0x02)
161#define LAN_REPLY_FORM_MESSAGE_CONTEXT (0x03)
162#define LAN_REPLY_FORM_SHIFT (27)
163
164#define GET_LAN_PACKET_LENGTH(x) (((x) & LAN_REPLY_PACKET_LENGTH_MASK) \
165 >> LAN_REPLY_PACKET_LENGTH_SHIFT)
166
167#define SET_LAN_PACKET_LENGTH(x, lth) \
168 ((x) = ((x) & ~LAN_REPLY_PACKET_LENGTH_MASK) | \
169 (((lth) << LAN_REPLY_PACKET_LENGTH_SHIFT) & \
170 LAN_REPLY_PACKET_LENGTH_MASK))
171
172#define GET_LAN_BUCKET_CONTEXT(x) (((x) & LAN_REPLY_BUCKET_CONTEXT_MASK) \
173 >> LAN_REPLY_BUCKET_CONTEXT_SHIFT)
174
175#define SET_LAN_BUCKET_CONTEXT(x, ctx) \
176 ((x) = ((x) & ~LAN_REPLY_BUCKET_CONTEXT_MASK) | \
177 (((ctx) << LAN_REPLY_BUCKET_CONTEXT_SHIFT) & \
178 LAN_REPLY_BUCKET_CONTEXT_MASK))
179
180#define GET_LAN_BUFFER_CONTEXT(x) (((x) & LAN_REPLY_BUFFER_CONTEXT_MASK) \
181 >> LAN_REPLY_BUFFER_CONTEXT_SHIFT)
182
183#define SET_LAN_BUFFER_CONTEXT(x, ctx) \
184 ((x) = ((x) & ~LAN_REPLY_BUFFER_CONTEXT_MASK) | \
185 (((ctx) << LAN_REPLY_BUFFER_CONTEXT_SHIFT) & \
186 LAN_REPLY_BUFFER_CONTEXT_MASK))
187
188#define GET_LAN_FORM(x) (((x) & LAN_REPLY_FORM_MASK) \
189 >> LAN_REPLY_FORM_SHIFT)
190
191#define SET_LAN_FORM(x, frm) \
192 ((x) = ((x) & ~LAN_REPLY_FORM_MASK) | \
193 (((frm) << LAN_REPLY_FORM_SHIFT) & \
194 LAN_REPLY_FORM_MASK))
195
196
197/****************************************************************************/
198/* LAN Current Device State defines */
199/****************************************************************************/
200
201#define MPI_LAN_DEVICE_STATE_RESET (0x00)
202#define MPI_LAN_DEVICE_STATE_OPERATIONAL (0x01)
203
204
205/****************************************************************************/
206/* LAN Loopback defines */
207/****************************************************************************/
208
209#define MPI_LAN_TX_MODES_ENABLE_LOOPBACK_SUPPRESSION (0x01)
210
211#endif
212
diff --git a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h
new file mode 100644
index 000000000000..9580a9de7fd2
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_raid.h
@@ -0,0 +1,232 @@
1/*
2 * Copyright (c) 2001-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_raid.h
6 * Title: MPI RAID message and structures
7 * Creation Date: February 27, 2001
8 *
9 * mpi_raid.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 02-27-01 01.01.01 Original release for this file.
17 * 03-27-01 01.01.02 Added structure offset comments.
18 * 08-08-01 01.02.01 Original release for v1.2 work.
19 * 09-28-01 01.02.02 Major rework for MPI v1.2 Integrated RAID changes.
20 * 10-04-01 01.02.03 Added ActionData defines for
21 * MPI_RAID_ACTION_DELETE_VOLUME action.
22 * 11-01-01 01.02.04 Added define for MPI_RAID_ACTION_ADATA_DO_NOT_SYNC.
23 * 03-14-02 01.02.05 Added define for MPI_RAID_ACTION_ADATA_LOW_LEVEL_INIT.
24 * 05-07-02 01.02.06 Added define for MPI_RAID_ACTION_ACTIVATE_VOLUME,
25 * MPI_RAID_ACTION_INACTIVATE_VOLUME, and
26 * MPI_RAID_ACTION_ADATA_INACTIVATE_ALL.
27 * 07-12-02 01.02.07 Added structures for Mailbox request and reply.
28 * 11-15-02 01.02.08 Added missing MsgContext field to MSG_MAILBOX_REQUEST.
29 * 04-01-03 01.02.09 New action data option flag for
30 * MPI_RAID_ACTION_DELETE_VOLUME.
31 * --------------------------------------------------------------------------
32 */
33
34#ifndef MPI_RAID_H
35#define MPI_RAID_H
36
37
38/******************************************************************************
39*
40* R A I D M e s s a g e s
41*
42*******************************************************************************/
43
44
45/****************************************************************************/
46/* RAID Action Request */
47/****************************************************************************/
48
49typedef struct _MSG_RAID_ACTION
50{
51 U8 Action; /* 00h */
52 U8 Reserved1; /* 01h */
53 U8 ChainOffset; /* 02h */
54 U8 Function; /* 03h */
55 U8 VolumeID; /* 04h */
56 U8 VolumeBus; /* 05h */
57 U8 PhysDiskNum; /* 06h */
58 U8 MsgFlags; /* 07h */
59 U32 MsgContext; /* 08h */
60 U32 Reserved2; /* 0Ch */
61 U32 ActionDataWord; /* 10h */
62 SGE_SIMPLE_UNION ActionDataSGE; /* 14h */
63} MSG_RAID_ACTION_REQUEST, MPI_POINTER PTR_MSG_RAID_ACTION_REQUEST,
64 MpiRaidActionRequest_t , MPI_POINTER pMpiRaidActionRequest_t;
65
66
67/* RAID Action request Action values */
68
69#define MPI_RAID_ACTION_STATUS (0x00)
70#define MPI_RAID_ACTION_INDICATOR_STRUCT (0x01)
71#define MPI_RAID_ACTION_CREATE_VOLUME (0x02)
72#define MPI_RAID_ACTION_DELETE_VOLUME (0x03)
73#define MPI_RAID_ACTION_DISABLE_VOLUME (0x04)
74#define MPI_RAID_ACTION_ENABLE_VOLUME (0x05)
75#define MPI_RAID_ACTION_QUIESCE_PHYS_IO (0x06)
76#define MPI_RAID_ACTION_ENABLE_PHYS_IO (0x07)
77#define MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS (0x08)
78#define MPI_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
79#define MPI_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
80#define MPI_RAID_ACTION_CHANGE_PHYSDISK_SETTINGS (0x0C)
81#define MPI_RAID_ACTION_CREATE_PHYSDISK (0x0D)
82#define MPI_RAID_ACTION_DELETE_PHYSDISK (0x0E)
83#define MPI_RAID_ACTION_FAIL_PHYSDISK (0x0F)
84#define MPI_RAID_ACTION_REPLACE_PHYSDISK (0x10)
85#define MPI_RAID_ACTION_ACTIVATE_VOLUME (0x11)
86#define MPI_RAID_ACTION_INACTIVATE_VOLUME (0x12)
87
88/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */
89#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC (0x00000001)
90#define MPI_RAID_ACTION_ADATA_LOW_LEVEL_INIT (0x00000002)
91
92/* ActionDataWord defines for use with MPI_RAID_ACTION_DELETE_VOLUME action */
93#define MPI_RAID_ACTION_ADATA_KEEP_PHYS_DISKS (0x00000000)
94#define MPI_RAID_ACTION_ADATA_DEL_PHYS_DISKS (0x00000001)
95
96#define MPI_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
97#define MPI_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000002)
98
99/* ActionDataWord defines for use with MPI_RAID_ACTION_ACTIVATE_VOLUME action */
100#define MPI_RAID_ACTION_ADATA_INACTIVATE_ALL (0x00000001)
101
102
103/* RAID Action reply message */
104
105typedef struct _MSG_RAID_ACTION_REPLY
106{
107 U8 Action; /* 00h */
108 U8 Reserved; /* 01h */
109 U8 MsgLength; /* 02h */
110 U8 Function; /* 03h */
111 U8 VolumeID; /* 04h */
112 U8 VolumeBus; /* 05h */
113 U8 PhysDiskNum; /* 06h */
114 U8 MsgFlags; /* 07h */
115 U32 MsgContext; /* 08h */
116 U16 ActionStatus; /* 0Ch */
117 U16 IOCStatus; /* 0Eh */
118 U32 IOCLogInfo; /* 10h */
119 U32 VolumeStatus; /* 14h */
120 U32 ActionData; /* 18h */
121} MSG_RAID_ACTION_REPLY, MPI_POINTER PTR_MSG_RAID_ACTION_REPLY,
122 MpiRaidActionReply_t, MPI_POINTER pMpiRaidActionReply_t;
123
124
125/* RAID Volume reply ActionStatus values */
126
127#define MPI_RAID_ACTION_ASTATUS_SUCCESS (0x0000)
128#define MPI_RAID_ACTION_ASTATUS_INVALID_ACTION (0x0001)
129#define MPI_RAID_ACTION_ASTATUS_FAILURE (0x0002)
130#define MPI_RAID_ACTION_ASTATUS_IN_PROGRESS (0x0003)
131
132
133/* RAID Volume reply RAID Volume Indicator structure */
134
135typedef struct _MPI_RAID_VOL_INDICATOR
136{
137 U64 TotalBlocks; /* 00h */
138 U64 BlocksRemaining; /* 08h */
139} MPI_RAID_VOL_INDICATOR, MPI_POINTER PTR_MPI_RAID_VOL_INDICATOR,
140 MpiRaidVolIndicator_t, MPI_POINTER pMpiRaidVolIndicator_t;
141
142
143/****************************************************************************/
144/* SCSI IO RAID Passthrough Request */
145/****************************************************************************/
146
147typedef struct _MSG_SCSI_IO_RAID_PT_REQUEST
148{
149 U8 PhysDiskNum; /* 00h */
150 U8 Reserved1; /* 01h */
151 U8 ChainOffset; /* 02h */
152 U8 Function; /* 03h */
153 U8 CDBLength; /* 04h */
154 U8 SenseBufferLength; /* 05h */
155 U8 Reserved2; /* 06h */
156 U8 MsgFlags; /* 07h */
157 U32 MsgContext; /* 08h */
158 U8 LUN[8]; /* 0Ch */
159 U32 Control; /* 14h */
160 U8 CDB[16]; /* 18h */
161 U32 DataLength; /* 28h */
162 U32 SenseBufferLowAddr; /* 2Ch */
163 SGE_IO_UNION SGL; /* 30h */
164} MSG_SCSI_IO_RAID_PT_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REQUEST,
165 SCSIIORaidPassthroughRequest_t, MPI_POINTER pSCSIIORaidPassthroughRequest_t;
166
167
168/* SCSI IO RAID Passthrough reply structure */
169
170typedef struct _MSG_SCSI_IO_RAID_PT_REPLY
171{
172 U8 PhysDiskNum; /* 00h */
173 U8 Reserved1; /* 01h */
174 U8 MsgLength; /* 02h */
175 U8 Function; /* 03h */
176 U8 CDBLength; /* 04h */
177 U8 SenseBufferLength; /* 05h */
178 U8 Reserved2; /* 06h */
179 U8 MsgFlags; /* 07h */
180 U32 MsgContext; /* 08h */
181 U8 SCSIStatus; /* 0Ch */
182 U8 SCSIState; /* 0Dh */
183 U16 IOCStatus; /* 0Eh */
184 U32 IOCLogInfo; /* 10h */
185 U32 TransferCount; /* 14h */
186 U32 SenseCount; /* 18h */
187 U32 ResponseInfo; /* 1Ch */
188} MSG_SCSI_IO_RAID_PT_REPLY, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REPLY,
189 SCSIIORaidPassthroughReply_t, MPI_POINTER pSCSIIORaidPassthroughReply_t;
190
191
192/****************************************************************************/
193/* Mailbox reqeust structure */
194/****************************************************************************/
195
196typedef struct _MSG_MAILBOX_REQUEST
197{
198 U16 Reserved1;
199 U8 ChainOffset;
200 U8 Function;
201 U16 Reserved2;
202 U8 Reserved3;
203 U8 MsgFlags;
204 U32 MsgContext;
205 U8 Command[10];
206 U16 Reserved4;
207 SGE_IO_UNION SGL;
208} MSG_MAILBOX_REQUEST, MPI_POINTER PTR_MSG_MAILBOX_REQUEST,
209 MailboxRequest_t, MPI_POINTER pMailboxRequest_t;
210
211
212/* Mailbox reply structure */
213typedef struct _MSG_MAILBOX_REPLY
214{
215 U16 Reserved1; /* 00h */
216 U8 MsgLength; /* 02h */
217 U8 Function; /* 03h */
218 U16 Reserved2; /* 04h */
219 U8 Reserved3; /* 06h */
220 U8 MsgFlags; /* 07h */
221 U32 MsgContext; /* 08h */
222 U16 MailboxStatus; /* 0Ch */
223 U16 IOCStatus; /* 0Eh */
224 U32 IOCLogInfo; /* 10h */
225 U32 Reserved4; /* 14h */
226} MSG_MAILBOX_REPLY, MPI_POINTER PTR_MSG_MAILBOX_REPLY,
227 MailboxReply_t, MPI_POINTER pMailboxReply_t;
228
229#endif
230
231
232
diff --git a/drivers/message/fusion/lsi/mpi_sas.h b/drivers/message/fusion/lsi/mpi_sas.h
new file mode 100644
index 000000000000..cb878f9c65de
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_sas.h
@@ -0,0 +1,181 @@
1/*
2 * Copyright (c) 2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_sas.h
6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: April 23, 2003
8 *
9 * mpi_sas.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * xx-yy-zz 01.05.01 Original release.
17 * --------------------------------------------------------------------------
18 */
19
20#ifndef MPI_SAS_H
21#define MPI_SAS_H
22
23/*****************************************************************************
24*
25* S e r i a l A t t a c h e d S C S I M e s s a g e s
26*
27*****************************************************************************/
28
29/****************************************************************************/
30/* Serial Management Protocol Passthrough Request */
31/****************************************************************************/
32
33typedef struct _MSG_SMP_PASSTHROUGH_REQUEST
34{
35 U8 PassthroughFlags; /* 00h */
36 U8 PhysicalPort; /* 01h */
37 U8 ChainOffset; /* 02h */
38 U8 Function; /* 03h */
39 U16 RequestDataLength; /* 04h */
40 U8 ConnectionRate; /* 06h */
41 U8 MsgFlags; /* 07h */
42 U32 MsgContext; /* 08h */
43 U32 Reserved1; /* 0Ch */
44 U64 SASAddress; /* 10h */
45 U32 Reserved2; /* 18h */
46 U32 Reserved3; /* 1Ch */
47 SGE_SIMPLE_UNION SGL; /* 20h */
48} MSG_SMP_PASSTHROUGH_REQUEST, MPI_POINTER PTR_MSG_SMP_PASSTHROUGH_REQUEST,
49 SmpPassthroughRequest_t, MPI_POINTER pSmpPassthroughRequest_t;
50
51#define MPI_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
52
53#define MPI_SMP_PT_REQ_CONNECT_RATE_NEGOTIATED (0x00)
54#define MPI_SMP_PT_REQ_CONNECT_RATE_1_5 (0x08)
55#define MPI_SMP_PT_REQ_CONNECT_RATE_3_0 (0x09)
56
57
58/* Serial Management Protocol Passthrough Reply */
59typedef struct _MSG_SMP_PASSTHROUGH_REPLY
60{
61 U8 PassthroughFlags; /* 00h */
62 U8 PhysicalPort; /* 01h */
63 U8 MsgLength; /* 02h */
64 U8 Function; /* 03h */
65 U16 ResponseDataLength; /* 04h */
66 U8 Reserved1; /* 06h */
67 U8 MsgFlags; /* 07h */
68 U32 MsgContext; /* 08h */
69 U8 Reserved2; /* 0Ch */
70 U8 SASStatus; /* 0Dh */
71 U16 IOCStatus; /* 0Eh */
72 U32 IOCLogInfo; /* 10h */
73 U32 Reserved3; /* 14h */
74 U8 ResponseData[4]; /* 18h */
75} MSG_SMP_PASSTHROUGH_REPLY, MPI_POINTER PTR_MSG_SMP_PASSTHROUGH_REPLY,
76 SmpPassthroughReply_t, MPI_POINTER pSmpPassthroughReply_t;
77
78#define MPI_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
79
80/* values for the SASStatus field */
81#define MPI_SASSTATUS_SUCCESS (0x00)
82#define MPI_SASSTATUS_UNKNOWN_ERROR (0x01)
83#define MPI_SASSTATUS_INVALID_FRAME (0x02)
84#define MPI_SASSTATUS_UTC_BAD_DEST (0x03)
85#define MPI_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
86#define MPI_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
87#define MPI_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
88#define MPI_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
89#define MPI_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
90#define MPI_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
91#define MPI_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
92#define MPI_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
93#define MPI_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
94#define MPI_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
95#define MPI_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
96#define MPI_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
97#define MPI_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
98#define MPI_SASSTATUS_DATA_OFFSET_ERROR (0x11)
99#define MPI_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
100#define MPI_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
101#define MPI_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
102
103
104/*
105 * Values for the SAS DeviceInfo field used in SAS Device Status Change Event
106 * data and SAS IO Unit Configuration pages.
107 */
108#define MPI_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
109#define MPI_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
110#define MPI_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
111#define MPI_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
112#define MPI_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
113#define MPI_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
114#define MPI_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
115#define MPI_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
116#define MPI_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
117#define MPI_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
118#define MPI_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
119
120#define MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
121#define MPI_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
122#define MPI_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
123#define MPI_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
124#define MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
125
126
127/****************************************************************************/
128/* SAS IO Unit Control Request */
129/****************************************************************************/
130
131typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
132{
133 U8 Operation; /* 00h */
134 U8 Reserved1; /* 01h */
135 U8 ChainOffset; /* 02h */
136 U8 Function; /* 03h */
137 U16 Reserved2; /* 04h */
138 U8 Reserved3; /* 06h */
139 U8 MsgFlags; /* 07h */
140 U32 MsgContext; /* 08h */
141 U8 TargetID; /* 0Ch */
142 U8 Bus; /* 0Dh */
143 U8 PhyNum; /* 0Eh */
144 U8 Reserved4; /* 0Fh */
145 U32 Reserved5; /* 10h */
146 U64 SASAddress; /* 14h */
147 U32 Reserved6; /* 1Ch */
148} MSG_SAS_IOUNIT_CONTROL_REQUEST, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REQUEST,
149 SasIoUnitControlRequest_t, MPI_POINTER pSasIoUnitControlRequest_t;
150
151/* values for the ... field */
152#define MPI_SAS_OP_CLEAR_NOT_PRESENT (0x01)
153#define MPI_SAS_OP_CLEAR_ALL (0x02)
154#define MPI_SAS_OP_MAP (0x03)
155#define MPI_SAS_OP_MOVE (0x04)
156#define MPI_SAS_OP_CLEAR (0x05)
157#define MPI_SAS_OP_PHY_LINK_RESET (0x06)
158#define MPI_SAS_OP_PHY_HARD_RESET (0x07)
159#define MPI_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
160
161
162/* SAS IO Unit Control Reply */
163typedef struct _MSG_SAS_IOUNIT_CONTROL_REPLY
164{
165 U8 Operation; /* 00h */
166 U8 Reserved1; /* 01h */
167 U8 MsgLength; /* 02h */
168 U8 Function; /* 03h */
169 U16 Reserved2; /* 04h */
170 U8 Reserved3; /* 06h */
171 U8 MsgFlags; /* 07h */
172 U32 MsgContext; /* 08h */
173 U16 Reserved4; /* 0Ch */
174 U16 IOCStatus; /* 0Eh */
175 U32 IOCLogInfo; /* 10h */
176} MSG_SAS_IOUNIT_CONTROL_REPLY, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REPLY,
177 SasIoUnitControlReply_t, MPI_POINTER pSasIoUnitControlReply_t;
178
179#endif
180
181
diff --git a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h
new file mode 100644
index 000000000000..804dc85426c1
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_targ.h
@@ -0,0 +1,435 @@
1/*
2 * Copyright (c) 2000-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_targ.h
6 * Title: MPI Target mode messages and structures
7 * Creation Date: June 22, 2000
8 *
9 * mpi_targ.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
17 * 06-06-00 01.00.01 Update version number for 1.0 release.
18 * 06-22-00 01.00.02 Added _MSG_TARGET_CMD_BUFFER_POST_REPLY structure.
19 * Corrected DECSRIPTOR typo to DESCRIPTOR.
20 * 11-02-00 01.01.01 Original release for post 1.0 work
21 * Modified target mode to use IoIndex instead of
22 * HostIndex and IocIndex. Added Alias.
23 * 01-09-01 01.01.02 Added defines for TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER
24 * and TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER.
25 * 02-20-01 01.01.03 Started using MPI_POINTER.
26 * Added structures for MPI_TARGET_SCSI_SPI_CMD_BUFFER and
27 * MPI_TARGET_FCP_CMD_BUFFER.
28 * 03-27-01 01.01.04 Added structure offset comments.
29 * 08-08-01 01.02.01 Original release for v1.2 work.
30 * 09-28-01 01.02.02 Added structure for MPI_TARGET_SCSI_SPI_STATUS_IU.
31 * Added PriorityReason field to some replies and
32 * defined more PriorityReason codes.
33 * Added some defines for to support previous version
34 * of MPI.
35 * 10-04-01 01.02.03 Added PriorityReason to MSG_TARGET_ERROR_REPLY.
36 * 11-01-01 01.02.04 Added define for TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY.
37 * 03-14-02 01.02.05 Modified MPI_TARGET_FCP_RSP_BUFFER to get the proper
38 * byte ordering.
39 * 05-31-02 01.02.06 Modified TARGET_MODE_REPLY_ALIAS_MASK to only include
40 * one bit.
41 * Added AliasIndex field to MPI_TARGET_FCP_CMD_BUFFER.
42 * 09-16-02 01.02.07 Added flags for confirmed completion.
43 * Added PRIORITY_REASON_TARGET_BUSY.
44 * 11-15-02 01.02.08 Added AliasID field to MPI_TARGET_SCSI_SPI_CMD_BUFFER.
45 * 04-01-03 01.02.09 Added OptionalOxid field to MPI_TARGET_FCP_CMD_BUFFER.
46 * --------------------------------------------------------------------------
47 */
48
49#ifndef MPI_TARG_H
50#define MPI_TARG_H
51
52
53/******************************************************************************
54*
55* S C S I T a r g e t M e s s a g e s
56*
57*******************************************************************************/
58
59typedef struct _CMD_BUFFER_DESCRIPTOR
60{
61 U16 IoIndex; /* 00h */
62 U16 Reserved; /* 02h */
63 union /* 04h */
64 {
65 U32 PhysicalAddress32;
66 U64 PhysicalAddress64;
67 } u;
68} CMD_BUFFER_DESCRIPTOR, MPI_POINTER PTR_CMD_BUFFER_DESCRIPTOR,
69 CmdBufferDescriptor_t, MPI_POINTER pCmdBufferDescriptor_t;
70
71
72/****************************************************************************/
73/* Target Command Buffer Post Request */
74/****************************************************************************/
75
76typedef struct _MSG_TARGET_CMD_BUFFER_POST_REQUEST
77{
78 U8 BufferPostFlags; /* 00h */
79 U8 BufferCount; /* 01h */
80 U8 ChainOffset; /* 02h */
81 U8 Function; /* 03h */
82 U8 BufferLength; /* 04h */
83 U8 Reserved; /* 05h */
84 U8 Reserved1; /* 06h */
85 U8 MsgFlags; /* 07h */
86 U32 MsgContext; /* 08h */
87 CMD_BUFFER_DESCRIPTOR Buffer[1]; /* 0Ch */
88} MSG_TARGET_CMD_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST,
89 TargetCmdBufferPostRequest_t, MPI_POINTER pTargetCmdBufferPostRequest_t;
90
91#define CMD_BUFFER_POST_FLAGS_PORT_MASK (0x01)
92#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_MASK (0x80)
93#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_32 (0)
94#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_64 (1)
95#define CMD_BUFFER_POST_FLAGS_64_BIT_ADDR (0x80)
96
97#define CMD_BUFFER_POST_IO_INDEX_MASK (0x00003FFF)
98#define CMD_BUFFER_POST_IO_INDEX_MASK_0100 (0x000003FF) /* obsolete */
99
100
101typedef struct _MSG_TARGET_CMD_BUFFER_POST_REPLY
102{
103 U8 BufferPostFlags; /* 00h */
104 U8 BufferCount; /* 01h */
105 U8 MsgLength; /* 02h */
106 U8 Function; /* 03h */
107 U8 BufferLength; /* 04h */
108 U8 Reserved; /* 05h */
109 U8 Reserved1; /* 06h */
110 U8 MsgFlags; /* 07h */
111 U32 MsgContext; /* 08h */
112 U16 Reserved2; /* 0Ch */
113 U16 IOCStatus; /* 0Eh */
114 U32 IOCLogInfo; /* 10h */
115} MSG_TARGET_CMD_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_REPLY,
116 TargetCmdBufferPostReply_t, MPI_POINTER pTargetCmdBufferPostReply_t;
117
118/* the following structure is obsolete as of MPI v1.2 */
119typedef struct _MSG_PRIORITY_CMD_RECEIVED_REPLY
120{
121 U16 Reserved; /* 00h */
122 U8 MsgLength; /* 02h */
123 U8 Function; /* 03h */
124 U16 Reserved1; /* 04h */
125 U8 Reserved2; /* 06h */
126 U8 MsgFlags; /* 07h */
127 U32 MsgContext; /* 08h */
128 U8 PriorityReason; /* 0Ch */
129 U8 Reserved3; /* 0Dh */
130 U16 IOCStatus; /* 0Eh */
131 U32 IOCLogInfo; /* 10h */
132 U32 ReplyWord; /* 14h */
133} MSG_PRIORITY_CMD_RECEIVED_REPLY, MPI_POINTER PTR_MSG_PRIORITY_CMD_RECEIVED_REPLY,
134 PriorityCommandReceivedReply_t, MPI_POINTER pPriorityCommandReceivedReply_t;
135
136#define PRIORITY_REASON_NO_DISCONNECT (0x00)
137#define PRIORITY_REASON_SCSI_TASK_MANAGEMENT (0x01)
138#define PRIORITY_REASON_CMD_PARITY_ERR (0x02)
139#define PRIORITY_REASON_MSG_OUT_PARITY_ERR (0x03)
140#define PRIORITY_REASON_LQ_CRC_ERR (0x04)
141#define PRIORITY_REASON_CMD_CRC_ERR (0x05)
142#define PRIORITY_REASON_PROTOCOL_ERR (0x06)
143#define PRIORITY_REASON_DATA_OUT_PARITY_ERR (0x07)
144#define PRIORITY_REASON_DATA_OUT_CRC_ERR (0x08)
145#define PRIORITY_REASON_TARGET_BUSY (0x09)
146#define PRIORITY_REASON_UNKNOWN (0xFF)
147
148
149typedef struct _MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY
150{
151 U16 Reserved; /* 00h */
152 U8 MsgLength; /* 02h */
153 U8 Function; /* 03h */
154 U16 Reserved1; /* 04h */
155 U8 Reserved2; /* 06h */
156 U8 MsgFlags; /* 07h */
157 U32 MsgContext; /* 08h */
158 U8 PriorityReason; /* 0Ch */
159 U8 Reserved3; /* 0Dh */
160 U16 IOCStatus; /* 0Eh */
161 U32 IOCLogInfo; /* 10h */
162 U32 ReplyWord; /* 14h */
163} MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY,
164 MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY,
165 TargetCmdBufferPostErrorReply_t, MPI_POINTER pTargetCmdBufferPostErrorReply_t;
166
167
168typedef struct _MPI_TARGET_FCP_CMD_BUFFER
169{
170 U8 FcpLun[8]; /* 00h */
171 U8 FcpCntl[4]; /* 08h */
172 U8 FcpCdb[16]; /* 0Ch */
173 U32 FcpDl; /* 1Ch */
174 U8 AliasIndex; /* 20h */
175 U8 Reserved1; /* 21h */
176 U16 OptionalOxid; /* 22h */
177} MPI_TARGET_FCP_CMD_BUFFER, MPI_POINTER PTR_MPI_TARGET_FCP_CMD_BUFFER,
178 MpiTargetFcpCmdBuffer, MPI_POINTER pMpiTargetFcpCmdBuffer;
179
180
181typedef struct _MPI_TARGET_SCSI_SPI_CMD_BUFFER
182{
183 /* SPI L_Q information unit */
184 U8 L_QType; /* 00h */
185 U8 Reserved; /* 01h */
186 U16 Tag; /* 02h */
187 U8 LogicalUnitNumber[8]; /* 04h */
188 U32 DataLength; /* 0Ch */
189 /* SPI command information unit */
190 U8 ReservedFirstByteOfCommandIU; /* 10h */
191 U8 TaskAttribute; /* 11h */
192 U8 TaskManagementFlags; /* 12h */
193 U8 AdditionalCDBLength; /* 13h */
194 U8 CDB[16]; /* 14h */
195 /* Alias ID */
196 U8 AliasID; /* 24h */
197 U8 Reserved1; /* 25h */
198 U16 Reserved2; /* 26h */
199} MPI_TARGET_SCSI_SPI_CMD_BUFFER,
200 MPI_POINTER PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER,
201 MpiTargetScsiSpiCmdBuffer, MPI_POINTER pMpiTargetScsiSpiCmdBuffer;
202
203
204/****************************************************************************/
205/* Target Assist Request */
206/****************************************************************************/
207
208typedef struct _MSG_TARGET_ASSIST_REQUEST
209{
210 U8 StatusCode; /* 00h */
211 U8 TargetAssistFlags; /* 01h */
212 U8 ChainOffset; /* 02h */
213 U8 Function; /* 03h */
214 U16 QueueTag; /* 04h */
215 U8 Reserved; /* 06h */
216 U8 MsgFlags; /* 07h */
217 U32 MsgContext; /* 08h */
218 U32 ReplyWord; /* 0Ch */
219 U8 LUN[8]; /* 10h */
220 U32 RelativeOffset; /* 18h */
221 U32 DataLength; /* 1Ch */
222 SGE_IO_UNION SGL[1]; /* 20h */
223} MSG_TARGET_ASSIST_REQUEST, MPI_POINTER PTR_MSG_TARGET_ASSIST_REQUEST,
224 TargetAssistRequest_t, MPI_POINTER pTargetAssistRequest_t;
225
226#define TARGET_ASSIST_FLAGS_DATA_DIRECTION (0x01)
227#define TARGET_ASSIST_FLAGS_AUTO_STATUS (0x02)
228#define TARGET_ASSIST_FLAGS_HIGH_PRIORITY (0x04)
229#define TARGET_ASSIST_FLAGS_CONFIRMED (0x08)
230#define TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x80)
231
232
233typedef struct _MSG_TARGET_ERROR_REPLY
234{
235 U16 Reserved; /* 00h */
236 U8 MsgLength; /* 02h */
237 U8 Function; /* 03h */
238 U16 Reserved1; /* 04h */
239 U8 Reserved2; /* 06h */
240 U8 MsgFlags; /* 07h */
241 U32 MsgContext; /* 08h */
242 U8 PriorityReason; /* 0Ch */
243 U8 Reserved3; /* 0Dh */
244 U16 IOCStatus; /* 0Eh */
245 U32 IOCLogInfo; /* 10h */
246 U32 ReplyWord; /* 14h */
247 U32 TransferCount; /* 18h */
248} MSG_TARGET_ERROR_REPLY, MPI_POINTER PTR_MSG_TARGET_ERROR_REPLY,
249 TargetErrorReply_t, MPI_POINTER pTargetErrorReply_t;
250
251
252/****************************************************************************/
253/* Target Status Send Request */
254/****************************************************************************/
255
256typedef struct _MSG_TARGET_STATUS_SEND_REQUEST
257{
258 U8 StatusCode; /* 00h */
259 U8 StatusFlags; /* 01h */
260 U8 ChainOffset; /* 02h */
261 U8 Function; /* 03h */
262 U16 QueueTag; /* 04h */
263 U8 Reserved; /* 06h */
264 U8 MsgFlags; /* 07h */
265 U32 MsgContext; /* 08h */
266 U32 ReplyWord; /* 0Ch */
267 U8 LUN[8]; /* 10h */
268 SGE_SIMPLE_UNION StatusDataSGE; /* 18h */
269} MSG_TARGET_STATUS_SEND_REQUEST, MPI_POINTER PTR_MSG_TARGET_STATUS_SEND_REQUEST,
270 TargetStatusSendRequest_t, MPI_POINTER pTargetStatusSendRequest_t;
271
272#define TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS (0x01)
273#define TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY (0x04)
274#define TARGET_STATUS_SEND_FLAGS_CONFIRMED (0x08)
275#define TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER (0x80)
276
277/*
278 * NOTE: FCP_RSP data is big-endian. When used on a little-endian system, this
279 * structure properly orders the bytes.
280 */
281typedef struct _MPI_TARGET_FCP_RSP_BUFFER
282{
283 U8 Reserved0[8]; /* 00h */
284 U8 Reserved1[2]; /* 08h */
285 U8 FcpFlags; /* 0Ah */
286 U8 FcpStatus; /* 0Bh */
287 U32 FcpResid; /* 0Ch */
288 U32 FcpSenseLength; /* 10h */
289 U32 FcpResponseLength; /* 14h */
290 U8 FcpResponseData[8]; /* 18h */
291 U8 FcpSenseData[32]; /* Pad to 64 bytes */ /* 20h */
292} MPI_TARGET_FCP_RSP_BUFFER, MPI_POINTER PTR_MPI_TARGET_FCP_RSP_BUFFER,
293 MpiTargetFcpRspBuffer, MPI_POINTER pMpiTargetFcpRspBuffer;
294
295/*
296 * NOTE: The SPI status IU is big-endian. When used on a little-endian system,
297 * this structure properly orders the bytes.
298 */
299typedef struct _MPI_TARGET_SCSI_SPI_STATUS_IU
300{
301 U8 Reserved0; /* 00h */
302 U8 Reserved1; /* 01h */
303 U8 Valid; /* 02h */
304 U8 Status; /* 03h */
305 U32 SenseDataListLength; /* 04h */
306 U32 PktFailuresListLength; /* 08h */
307 U8 SenseData[52]; /* Pad the IU to 64 bytes */ /* 0Ch */
308} MPI_TARGET_SCSI_SPI_STATUS_IU, MPI_POINTER PTR_MPI_TARGET_SCSI_SPI_STATUS_IU,
309 TargetScsiSpiStatusIU_t, MPI_POINTER pTargetScsiSpiStatusIU_t;
310
311/****************************************************************************/
312/* Target Mode Abort Request */
313/****************************************************************************/
314
315typedef struct _MSG_TARGET_MODE_ABORT_REQUEST
316{
317 U8 AbortType; /* 00h */
318 U8 Reserved; /* 01h */
319 U8 ChainOffset; /* 02h */
320 U8 Function; /* 03h */
321 U16 Reserved1; /* 04h */
322 U8 Reserved2; /* 06h */
323 U8 MsgFlags; /* 07h */
324 U32 MsgContext; /* 08h */
325 U32 ReplyWord; /* 0Ch */
326 U32 MsgContextToAbort; /* 10h */
327} MSG_TARGET_MODE_ABORT, MPI_POINTER PTR_MSG_TARGET_MODE_ABORT,
328 TargetModeAbort_t, MPI_POINTER pTargetModeAbort_t;
329
330#define TARGET_MODE_ABORT_TYPE_ALL_CMD_BUFFERS (0x00)
331#define TARGET_MODE_ABORT_TYPE_ALL_IO (0x01)
332#define TARGET_MODE_ABORT_TYPE_EXACT_IO (0x02)
333#define TARGET_MODE_ABORT_TYPE_EXACT_IO_REQUEST (0x03)
334
335/* Target Mode Abort Reply */
336
337typedef struct _MSG_TARGET_MODE_ABORT_REPLY
338{
339 U16 Reserved; /* 00h */
340 U8 MsgLength; /* 02h */
341 U8 Function; /* 03h */
342 U16 Reserved1; /* 04h */
343 U8 Reserved2; /* 06h */
344 U8 MsgFlags; /* 07h */
345 U32 MsgContext; /* 08h */
346 U16 Reserved3; /* 0Ch */
347 U16 IOCStatus; /* 0Eh */
348 U32 IOCLogInfo; /* 10h */
349 U32 AbortCount; /* 14h */
350} MSG_TARGET_MODE_ABORT_REPLY, MPI_POINTER PTR_MSG_TARGET_MODE_ABORT_REPLY,
351 TargetModeAbortReply_t, MPI_POINTER pTargetModeAbortReply_t;
352
353
354/****************************************************************************/
355/* Target Mode Context Reply */
356/****************************************************************************/
357
358#define TARGET_MODE_REPLY_IO_INDEX_MASK (0x00003FFF)
359#define TARGET_MODE_REPLY_IO_INDEX_SHIFT (0)
360#define TARGET_MODE_REPLY_INITIATOR_INDEX_MASK (0x03FFC000)
361#define TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT (14)
362#define TARGET_MODE_REPLY_ALIAS_MASK (0x04000000)
363#define TARGET_MODE_REPLY_ALIAS_SHIFT (26)
364#define TARGET_MODE_REPLY_PORT_MASK (0x10000000)
365#define TARGET_MODE_REPLY_PORT_SHIFT (28)
366
367
368#define GET_IO_INDEX(x) (((x) & TARGET_MODE_REPLY_IO_INDEX_MASK) \
369 >> TARGET_MODE_REPLY_IO_INDEX_SHIFT)
370
371#define SET_IO_INDEX(t, i) \
372 ((t) = ((t) & ~TARGET_MODE_REPLY_IO_INDEX_MASK) | \
373 (((i) << TARGET_MODE_REPLY_IO_INDEX_SHIFT) & \
374 TARGET_MODE_REPLY_IO_INDEX_MASK))
375
376#define GET_INITIATOR_INDEX(x) (((x) & TARGET_MODE_REPLY_INITIATOR_INDEX_MASK) \
377 >> TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT)
378
379#define SET_INITIATOR_INDEX(t, ii) \
380 ((t) = ((t) & ~TARGET_MODE_REPLY_INITIATOR_INDEX_MASK) | \
381 (((ii) << TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT) & \
382 TARGET_MODE_REPLY_INITIATOR_INDEX_MASK))
383
384#define GET_ALIAS(x) (((x) & TARGET_MODE_REPLY_ALIAS_MASK) \
385 >> TARGET_MODE_REPLY_ALIAS_SHIFT)
386
387#define SET_ALIAS(t, a) ((t) = ((t) & ~TARGET_MODE_REPLY_ALIAS_MASK) | \
388 (((a) << TARGET_MODE_REPLY_ALIAS_SHIFT) & \
389 TARGET_MODE_REPLY_ALIAS_MASK))
390
391#define GET_PORT(x) (((x) & TARGET_MODE_REPLY_PORT_MASK) \
392 >> TARGET_MODE_REPLY_PORT_SHIFT)
393
394#define SET_PORT(t, p) ((t) = ((t) & ~TARGET_MODE_REPLY_PORT_MASK) | \
395 (((p) << TARGET_MODE_REPLY_PORT_SHIFT) & \
396 TARGET_MODE_REPLY_PORT_MASK))
397
398/* the following obsolete values are for MPI v1.0 support */
399#define TARGET_MODE_REPLY_0100_MASK_HOST_INDEX (0x000003FF)
400#define TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX (0)
401#define TARGET_MODE_REPLY_0100_MASK_IOC_INDEX (0x001FF800)
402#define TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX (11)
403#define TARGET_MODE_REPLY_0100_PORT_MASK (0x00400000)
404#define TARGET_MODE_REPLY_0100_PORT_SHIFT (22)
405#define TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX (0x1F800000)
406#define TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX (23)
407
408#define GET_HOST_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) \
409 >> TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX)
410
411#define SET_HOST_INDEX_0100(t, hi) \
412 ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) | \
413 (((hi) << TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX) & \
414 TARGET_MODE_REPLY_0100_MASK_HOST_INDEX))
415
416#define GET_IOC_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) \
417 >> TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX)
418
419#define SET_IOC_INDEX_0100(t, ii) \
420 ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) | \
421 (((ii) << TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX) & \
422 TARGET_MODE_REPLY_0100_MASK_IOC_INDEX))
423
424#define GET_INITIATOR_INDEX_0100(x) \
425 (((x) & TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) \
426 >> TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX)
427
428#define SET_INITIATOR_INDEX_0100(t, ii) \
429 ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) | \
430 (((ii) << TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX) & \
431 TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX))
432
433
434#endif
435
diff --git a/drivers/message/fusion/lsi/mpi_tool.h b/drivers/message/fusion/lsi/mpi_tool.h
new file mode 100644
index 000000000000..536d197c4142
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_tool.h
@@ -0,0 +1,305 @@
1/*
2 * Copyright (c) 2001-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_tool.h
6 * Title: MPI Toolbox structures and definitions
7 * Creation Date: July 30, 2001
8 *
9 * mpi_tool.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 08-08-01 01.02.01 Original release.
17 * 08-29-01 01.02.02 Added DIAG_DATA_UPLOAD_HEADER and related defines.
18 * --------------------------------------------------------------------------
19 */
20
21#ifndef MPI_TOOL_H
22#define MPI_TOOL_H
23
24#define MPI_TOOLBOX_CLEAN_TOOL (0x00)
25#define MPI_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
26#define MPI_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
27#define MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
28#define MPI_TOOLBOX_FC_MANAGEMENT_TOOL (0x04)
29
30
31/****************************************************************************/
32/* Toolbox reply */
33/****************************************************************************/
34
35typedef struct _MSG_TOOLBOX_REPLY
36{
37 U8 Tool; /* 00h */
38 U8 Reserved; /* 01h */
39 U8 MsgLength; /* 02h */
40 U8 Function; /* 03h */
41 U16 Reserved1; /* 04h */
42 U8 Reserved2; /* 06h */
43 U8 MsgFlags; /* 07h */
44 U32 MsgContext; /* 08h */
45 U16 Reserved3; /* 0Ch */
46 U16 IOCStatus; /* 0Eh */
47 U32 IOCLogInfo; /* 10h */
48} MSG_TOOLBOX_REPLY, MPI_POINTER PTR_MSG_TOOLBOX_REPLY,
49 ToolboxReply_t, MPI_POINTER pToolboxReply_t;
50
51
52/****************************************************************************/
53/* Toolbox Clean Tool request */
54/****************************************************************************/
55
56typedef struct _MSG_TOOLBOX_CLEAN_REQUEST
57{
58 U8 Tool; /* 00h */
59 U8 Reserved; /* 01h */
60 U8 ChainOffset; /* 02h */
61 U8 Function; /* 03h */
62 U16 Reserved1; /* 04h */
63 U8 Reserved2; /* 06h */
64 U8 MsgFlags; /* 07h */
65 U32 MsgContext; /* 08h */
66 U32 Flags; /* 0Ch */
67} MSG_TOOLBOX_CLEAN_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_CLEAN_REQUEST,
68 ToolboxCleanRequest_t, MPI_POINTER pToolboxCleanRequest_t;
69
70#define MPI_TOOLBOX_CLEAN_NVSRAM (0x00000001)
71#define MPI_TOOLBOX_CLEAN_SEEPROM (0x00000002)
72#define MPI_TOOLBOX_CLEAN_FLASH (0x00000004)
73#define MPI_TOOLBOX_CLEAN_BOOTLOADER (0x04000000)
74#define MPI_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
75#define MPI_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
76#define MPI_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
77#define MPI_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
78#define MPI_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
79
80
81/****************************************************************************/
82/* Toolbox Memory Move request */
83/****************************************************************************/
84
85typedef struct _MSG_TOOLBOX_MEM_MOVE_REQUEST
86{
87 U8 Tool; /* 00h */
88 U8 Reserved; /* 01h */
89 U8 ChainOffset; /* 02h */
90 U8 Function; /* 03h */
91 U16 Reserved1; /* 04h */
92 U8 Reserved2; /* 06h */
93 U8 MsgFlags; /* 07h */
94 U32 MsgContext; /* 08h */
95 SGE_SIMPLE_UNION SGL; /* 0Ch */
96} MSG_TOOLBOX_MEM_MOVE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_MEM_MOVE_REQUEST,
97 ToolboxMemMoveRequest_t, MPI_POINTER pToolboxMemMoveRequest_t;
98
99
100/****************************************************************************/
101/* Toolbox Diagnostic Data Upload request */
102/****************************************************************************/
103
104typedef struct _MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST
105{
106 U8 Tool; /* 00h */
107 U8 Reserved; /* 01h */
108 U8 ChainOffset; /* 02h */
109 U8 Function; /* 03h */
110 U16 Reserved1; /* 04h */
111 U8 Reserved2; /* 06h */
112 U8 MsgFlags; /* 07h */
113 U32 MsgContext; /* 08h */
114 U32 Flags; /* 0Ch */
115 U32 Reserved3; /* 10h */
116 SGE_SIMPLE_UNION SGL; /* 14h */
117} MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
118 ToolboxDiagDataUploadRequest_t, MPI_POINTER pToolboxDiagDataUploadRequest_t;
119
120typedef struct _DIAG_DATA_UPLOAD_HEADER
121{
122 U32 DiagDataLength; /* 00h */
123 U8 FormatCode; /* 04h */
124 U8 Reserved; /* 05h */
125 U16 Reserved1; /* 06h */
126} DIAG_DATA_UPLOAD_HEADER, MPI_POINTER PTR_DIAG_DATA_UPLOAD_HEADER,
127 DiagDataUploadHeader_t, MPI_POINTER pDiagDataUploadHeader_t;
128
129#define MPI_TB_DIAG_FORMAT_SCSI_PRINTF_1 (0x01)
130#define MPI_TB_DIAG_FORMAT_SCSI_2 (0x02)
131#define MPI_TB_DIAG_FORMAT_SCSI_3 (0x03)
132#define MPI_TB_DIAG_FORMAT_FC_TRACE_1 (0x04)
133
134
135/****************************************************************************/
136/* Toolbox ISTWI Read Write request */
137/****************************************************************************/
138
139typedef struct _MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST
140{
141 U8 Tool; /* 00h */
142 U8 Reserved; /* 01h */
143 U8 ChainOffset; /* 02h */
144 U8 Function; /* 03h */
145 U16 Reserved1; /* 04h */
146 U8 Reserved2; /* 06h */
147 U8 MsgFlags; /* 07h */
148 U32 MsgContext; /* 08h */
149 U8 Flags; /* 0Ch */
150 U8 BusNum; /* 0Dh */
151 U16 Reserved3; /* 0Eh */
152 U8 NumAddressBytes; /* 10h */
153 U8 Reserved4; /* 11h */
154 U16 DataLength; /* 12h */
155 U8 DeviceAddr; /* 14h */
156 U8 Addr1; /* 15h */
157 U8 Addr2; /* 16h */
158 U8 Addr3; /* 17h */
159 U32 Reserved5; /* 18h */
160 SGE_SIMPLE_UNION SGL; /* 1Ch */
161} MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
162 ToolboxIstwiReadWriteRequest_t, MPI_POINTER pToolboxIstwiReadWriteRequest_t;
163
164#define MPI_TB_ISTWI_FLAGS_WRITE (0x00)
165#define MPI_TB_ISTWI_FLAGS_READ (0x01)
166
167
168/****************************************************************************/
169/* Toolbox FC Management request */
170/****************************************************************************/
171
172/* ActionInfo for Bus and TargetId */
173typedef struct _MPI_TB_FC_MANAGE_BUS_TID_AI
174{
175 U16 Reserved; /* 00h */
176 U8 Bus; /* 02h */
177 U8 TargetId; /* 03h */
178} MPI_TB_FC_MANAGE_BUS_TID_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_BUS_TID_AI,
179 MpiTbFcManageBusTidAi_t, MPI_POINTER pMpiTbFcManageBusTidAi_t;
180
181/* ActionInfo for port identifier */
182typedef struct _MPI_TB_FC_MANAGE_PID_AI
183{
184 U32 PortIdentifier; /* 00h */
185} MPI_TB_FC_MANAGE_PID_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_PID_AI,
186 MpiTbFcManagePidAi_t, MPI_POINTER pMpiTbFcManagePidAi_t;
187
188/* union of ActionInfo */
189typedef union _MPI_TB_FC_MANAGE_AI_UNION
190{
191 MPI_TB_FC_MANAGE_BUS_TID_AI BusTid;
192 MPI_TB_FC_MANAGE_PID_AI Port;
193} MPI_TB_FC_MANAGE_AI_UNION, MPI_POINTER PTR_MPI_TB_FC_MANAGE_AI_UNION,
194 MpiTbFcManageAiUnion_t, MPI_POINTER pMpiTbFcManageAiUnion_t;
195
196typedef struct _MSG_TOOLBOX_FC_MANAGE_REQUEST
197{
198 U8 Tool; /* 00h */
199 U8 Reserved; /* 01h */
200 U8 ChainOffset; /* 02h */
201 U8 Function; /* 03h */
202 U16 Reserved1; /* 04h */
203 U8 Reserved2; /* 06h */
204 U8 MsgFlags; /* 07h */
205 U32 MsgContext; /* 08h */
206 U8 Action; /* 0Ch */
207 U8 Reserved3; /* 0Dh */
208 U16 Reserved4; /* 0Eh */
209 MPI_TB_FC_MANAGE_AI_UNION ActionInfo; /* 10h */
210} MSG_TOOLBOX_FC_MANAGE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_FC_MANAGE_REQUEST,
211 ToolboxFcManageRequest_t, MPI_POINTER pToolboxFcManageRequest_t;
212
213/* defines for the Action field */
214#define MPI_TB_FC_MANAGE_ACTION_DISC_ALL (0x00)
215#define MPI_TB_FC_MANAGE_ACTION_DISC_PID (0x01)
216#define MPI_TB_FC_MANAGE_ACTION_DISC_BUS_TID (0x02)
217
218
219/****************************************************************************/
220/* Diagnostic Buffer Post request */
221/****************************************************************************/
222
223typedef struct _MSG_DIAG_BUFFER_POST_REQUEST
224{
225 U8 TraceLevel; /* 00h */
226 U8 BufferType; /* 01h */
227 U8 ChainOffset; /* 02h */
228 U8 Function; /* 03h */
229 U16 Reserved1; /* 04h */
230 U8 Reserved2; /* 06h */
231 U8 MsgFlags; /* 07h */
232 U32 MsgContext; /* 08h */
233 U32 ExtendedType; /* 0Ch */
234 U32 BufferLength; /* 10h */
235 U32 ProductSpecific[4]; /* 14h */
236 U32 Reserved3; /* 18h */
237 SGE_SIMPLE_UNION SGL; /* 28h */
238} MSG_DIAG_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_DIAG_BUFFER_POST_REQUEST,
239 DiagBufferPostRequest_t, MPI_POINTER pDiagBufferPostRequest_t;
240
241#define MPI_DIAG_BUF_TYPE_TRACE (0x00)
242#define MPI_DIAG_BUF_TYPE_SNAPSHOT (0x01)
243#define MPI_DIAG_BUF_TYPE_EXTENDED (0x02)
244
245#define MPI_DIAG_EXTENDED_QTAG (0x00000001)
246
247
248/* Diagnostic Buffer Post reply */
249typedef struct _MSG_DIAG_BUFFER_POST_REPLY
250{
251 U8 Reserved1; /* 00h */
252 U8 BufferType; /* 01h */
253 U8 MsgLength; /* 02h */
254 U8 Function; /* 03h */
255 U16 Reserved2; /* 04h */
256 U8 Reserved3; /* 06h */
257 U8 MsgFlags; /* 07h */
258 U32 MsgContext; /* 08h */
259 U16 Reserved4; /* 0Ch */
260 U16 IOCStatus; /* 0Eh */
261 U32 IOCLogInfo; /* 10h */
262 U32 TransferLength; /* 14h */
263} MSG_DIAG_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_DIAG_BUFFER_POST_REPLY,
264 DiagBufferPostReply_t, MPI_POINTER pDiagBufferPostReply_t;
265
266
267/****************************************************************************/
268/* Diagnostic Release request */
269/****************************************************************************/
270
271typedef struct _MSG_DIAG_RELEASE_REQUEST
272{
273 U8 Reserved1; /* 00h */
274 U8 BufferType; /* 01h */
275 U8 ChainOffset; /* 02h */
276 U8 Function; /* 03h */
277 U16 Reserved2; /* 04h */
278 U8 Reserved3; /* 06h */
279 U8 MsgFlags; /* 07h */
280 U32 MsgContext; /* 08h */
281} MSG_DIAG_RELEASE_REQUEST, MPI_POINTER PTR_MSG_DIAG_RELEASE_REQUEST,
282 DiagReleaseRequest_t, MPI_POINTER pDiagReleaseRequest_t;
283
284
285/* Diagnostic Release reply */
286typedef struct _MSG_DIAG_RELEASE_REPLY
287{
288 U8 Reserved1; /* 00h */
289 U8 BufferType; /* 01h */
290 U8 MsgLength; /* 02h */
291 U8 Function; /* 03h */
292 U16 Reserved2; /* 04h */
293 U8 Reserved3; /* 06h */
294 U8 MsgFlags; /* 07h */
295 U32 MsgContext; /* 08h */
296 U16 Reserved4; /* 0Ch */
297 U16 IOCStatus; /* 0Eh */
298 U32 IOCLogInfo; /* 10h */
299} MSG_DIAG_RELEASE_REPLY, MPI_POINTER PTR_MSG_DIAG_RELEASE_REPLY,
300 DiagReleaseReply_t, MPI_POINTER pDiagReleaseReply_t;
301
302
303#endif
304
305
diff --git a/drivers/message/fusion/lsi/mpi_type.h b/drivers/message/fusion/lsi/mpi_type.h
new file mode 100644
index 000000000000..239328a7689c
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_type.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright (c) 2000-2003 LSI Logic Corporation.
3 *
4 *
5 * Name: mpi_type.h
6 * Title: MPI Basic type definitions
7 * Creation Date: June 6, 2000
8 *
9 * mpi_type.h Version: 01.05.xx
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
17 * 06-06-00 01.00.01 Update version number for 1.0 release.
18 * 11-02-00 01.01.01 Original release for post 1.0 work
19 * 02-20-01 01.01.02 Added define and ifdef for MPI_POINTER.
20 * 08-08-01 01.02.01 Original release for v1.2 work.
21 * --------------------------------------------------------------------------
22 */
23
24#ifndef MPI_TYPE_H
25#define MPI_TYPE_H
26
27
28/*******************************************************************************
29 * Define MPI_POINTER if it hasn't already been defined. By default MPI_POINTER
30 * is defined to be a near pointer. MPI_POINTER can be defined as a far pointer
31 * by defining MPI_POINTER as "far *" before this header file is included.
32 */
33#ifndef MPI_POINTER
34#define MPI_POINTER *
35#endif
36
37
38/*****************************************************************************
39*
40* B a s i c T y p e s
41*
42*****************************************************************************/
43
44typedef signed char S8;
45typedef unsigned char U8;
46typedef signed short S16;
47typedef unsigned short U16;
48
49
50typedef int32_t S32;
51typedef u_int32_t U32;
52
53/*
54 * The only way crap below could work on big-endian boxen would be if it
55 * wasn't used at all.
56 */
57
58typedef struct _S64
59{
60 U32 Low;
61 S32 High;
62} S64;
63
64typedef struct _U64
65{
66 U32 Low;
67 U32 High;
68} U64;
69
70
71/****************************************************************************/
72/* Pointers */
73/****************************************************************************/
74
75typedef S8 *PS8;
76typedef U8 *PU8;
77typedef S16 *PS16;
78typedef U16 *PU16;
79typedef S32 *PS32;
80typedef U32 *PU32;
81typedef S64 *PS64;
82typedef U64 *PU64;
83
84
85#endif
86
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
new file mode 100644
index 000000000000..942cc1c2a186
--- /dev/null
+++ b/drivers/message/fusion/mptbase.c
@@ -0,0 +1,5946 @@
1/*
2 * linux/drivers/message/fusion/mptbase.c
3 * High performance SCSI + LAN / Fibre Channel device drivers.
4 * This is the Fusion MPT base driver which supports multiple
5 * (SCSI + LAN) specialized protocol drivers.
6 * For use with PCI chip/adapter(s):
7 * LSIFC9xx/LSI409xx Fibre Channel
8 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
9 *
10 * Credits:
11 * There are lots of people not mentioned below that deserve credit
12 * and thanks but won't get it here - sorry in advance that you
13 * got overlooked.
14 *
15 * This driver would not exist if not for Alan Cox's development
16 * of the linux i2o driver.
17 *
18 * A special thanks to Noah Romer (LSI Logic) for tons of work
19 * and tough debugging on the LAN driver, especially early on;-)
20 * And to Roger Hickerson (LSI Logic) for tirelessly supporting
21 * this driver project.
22 *
23 * A special thanks to Pamela Delaney (LSI Logic) for tons of work
24 * and countless enhancements while adding support for the 1030
25 * chip family. Pam has been instrumental in the development of
26 * of the 2.xx.xx series fusion drivers, and her contributions are
27 * far too numerous to hope to list in one place.
28 *
29 * All manner of help from Stephen Shirron (LSI Logic):
30 * low-level FC analysis, debug + various fixes in FCxx firmware,
31 * initial port to alpha platform, various driver code optimizations,
32 * being a faithful sounding board on all sorts of issues & ideas,
33 * etc.
34 *
35 * A huge debt of gratitude is owed to David S. Miller (DaveM)
36 * for fixing much of the stupid and broken stuff in the early
37 * driver while porting to sparc64 platform. THANK YOU!
38 *
39 * Special thanks goes to the I2O LAN driver people at the
40 * University of Helsinki, who, unbeknownst to them, provided
41 * the inspiration and initial structure for this driver.
42 *
43 * A really huge debt of gratitude is owed to Eddie C. Dost
44 * for gobs of hard work fixing and optimizing LAN code.
45 * THANK YOU!
46 *
47 * Copyright (c) 1999-2004 LSI Logic Corporation
48 * Originally By: Steven J. Ralston
49 * (mailto:sjralston1@netscape.net)
50 * (mailto:mpt_linux_developer@lsil.com)
51 *
52 * $Id: mptbase.c,v 1.126 2002/12/16 15:28:45 pdelaney Exp $
53 */
54/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
55/*
56 This program is free software; you can redistribute it and/or modify
57 it under the terms of the GNU General Public License as published by
58 the Free Software Foundation; version 2 of the License.
59
60 This program is distributed in the hope that it will be useful,
61 but WITHOUT ANY WARRANTY; without even the implied warranty of
62 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
63 GNU General Public License for more details.
64
65 NO WARRANTY
66 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
67 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
68 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
69 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
70 solely responsible for determining the appropriateness of using and
71 distributing the Program and assumes all risks associated with its
72 exercise of rights under this Agreement, including but not limited to
73 the risks and costs of program errors, damage to or loss of data,
74 programs or equipment, and unavailability or interruption of operations.
75
76 DISCLAIMER OF LIABILITY
77 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
78 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
79 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
80 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
81 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
82 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
83 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
84
85 You should have received a copy of the GNU General Public License
86 along with this program; if not, write to the Free Software
87 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
88*/
89/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
90
91#include <linux/config.h>
92#include <linux/version.h>
93#include <linux/kernel.h>
94#include <linux/module.h>
95#include <linux/errno.h>
96#include <linux/init.h>
97#include <linux/slab.h>
98#include <linux/types.h>
99#include <linux/pci.h>
100#include <linux/kdev_t.h>
101#include <linux/blkdev.h>
102#include <linux/delay.h>
103#include <linux/interrupt.h> /* needed for in_interrupt() proto */
104#include <asm/io.h>
105#ifdef CONFIG_MTRR
106#include <asm/mtrr.h>
107#endif
108#ifdef __sparc__
109#include <asm/irq.h> /* needed for __irq_itoa() proto */
110#endif
111
112#include "mptbase.h"
113
114/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
115#define my_NAME "Fusion MPT base driver"
116#define my_VERSION MPT_LINUX_VERSION_COMMON
117#define MYNAM "mptbase"
118
119MODULE_AUTHOR(MODULEAUTHOR);
120MODULE_DESCRIPTION(my_NAME);
121MODULE_LICENSE("GPL");
122
123/*
124 * cmd line parameters
125 */
126#ifdef MFCNT
127static int mfcounter = 0;
128#define PRINT_MF_COUNT 20000
129#endif
130
131/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
132/*
133 * Public data...
134 */
135int mpt_lan_index = -1;
136int mpt_stm_index = -1;
137
138struct proc_dir_entry *mpt_proc_root_dir;
139
140#define WHOINIT_UNKNOWN 0xAA
141
142/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
143/*
144 * Private data...
145 */
146 /* Adapter link list */
147LIST_HEAD(ioc_list);
148 /* Callback lookup table */
149static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS];
150 /* Protocol driver class lookup table */
151static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS];
152 /* Event handler lookup table */
153static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
154 /* Reset handler lookup table */
155static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
156static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
157
158static int mpt_base_index = -1;
159static int last_drv_idx = -1;
160
161static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq);
162
163/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
164/*
165 * Forward protos...
166 */
167static irqreturn_t mpt_interrupt(int irq, void *bus_id, struct pt_regs *r);
168static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
169static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
170 u32 *req, int replyBytes, u16 *u16reply, int maxwait,
171 int sleepFlag);
172static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag);
173static void mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev);
174static void mpt_adapter_disable(MPT_ADAPTER *ioc);
175static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
176
177static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
178static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
179//static u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
180static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
181static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
182static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
183static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
184static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
185static int mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag);
186static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
187static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
188static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
189static int PrimeIocFifos(MPT_ADAPTER *ioc);
190static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
191static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
192static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
193static int GetLanConfigPages(MPT_ADAPTER *ioc);
194static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum);
195static int GetIoUnitPage2(MPT_ADAPTER *ioc);
196static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
197static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
198static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
199static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
200static void mpt_timer_expired(unsigned long data);
201static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
202static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
203
204#ifdef CONFIG_PROC_FS
205static int procmpt_summary_read(char *buf, char **start, off_t offset,
206 int request, int *eof, void *data);
207static int procmpt_version_read(char *buf, char **start, off_t offset,
208 int request, int *eof, void *data);
209static int procmpt_iocinfo_read(char *buf, char **start, off_t offset,
210 int request, int *eof, void *data);
211#endif
212static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
213
214//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
215static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers);
216static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
217static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
218static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info);
219
220/* module entry point */
221static int __devinit mptbase_probe (struct pci_dev *, const struct pci_device_id *);
222static void __devexit mptbase_remove(struct pci_dev *);
223static void mptbase_shutdown(struct device * );
224static int __init fusion_init (void);
225static void __exit fusion_exit (void);
226
227/****************************************************************************
228 * Supported hardware
229 */
230
231static struct pci_device_id mptbase_pci_table[] = {
232 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC909,
233 PCI_ANY_ID, PCI_ANY_ID },
234 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC929,
235 PCI_ANY_ID, PCI_ANY_ID },
236 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC919,
237 PCI_ANY_ID, PCI_ANY_ID },
238 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC929X,
239 PCI_ANY_ID, PCI_ANY_ID },
240 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC919X,
241 PCI_ANY_ID, PCI_ANY_ID },
242 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1030,
243 PCI_ANY_ID, PCI_ANY_ID },
244 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_1030_53C1035,
245 PCI_ANY_ID, PCI_ANY_ID },
246 {0} /* Terminating entry */
247};
248MODULE_DEVICE_TABLE(pci, mptbase_pci_table);
249
250#define CHIPREG_READ32(addr) readl_relaxed(addr)
251#define CHIPREG_READ32_dmasync(addr) readl(addr)
252#define CHIPREG_WRITE32(addr,val) writel(val, addr)
253#define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr)
254#define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr)
255
256/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
257/*
258 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
259 * @irq: irq number (not used)
260 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
261 * @r: pt_regs pointer (not used)
262 *
263 * This routine is registered via the request_irq() kernel API call,
264 * and handles all interrupts generated from a specific MPT adapter
265 * (also referred to as a IO Controller or IOC).
266 * This routine must clear the interrupt from the adapter and does
267 * so by reading the reply FIFO. Multiple replies may be processed
268 * per single call to this routine; up to MPT_MAX_REPLIES_PER_ISR
269 * which is currently set to 32 in mptbase.h.
270 *
271 * This routine handles register-level access of the adapter but
272 * dispatches (calls) a protocol-specific callback routine to handle
273 * the protocol-specific details of the MPT request completion.
274 */
275static irqreturn_t
276mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
277{
278 MPT_ADAPTER *ioc;
279 MPT_FRAME_HDR *mf;
280 MPT_FRAME_HDR *mr;
281 u32 pa;
282 int req_idx;
283 int cb_idx;
284 int type;
285 int freeme;
286
287 ioc = (MPT_ADAPTER *)bus_id;
288
289 /*
290 * Drain the reply FIFO!
291 *
292 * NOTES: I've seen up to 10 replies processed in this loop, so far...
293 * Update: I've seen up to 9182 replies processed in this loop! ??
294 * Update: Limit ourselves to processing max of N replies
295 * (bottom of loop).
296 */
297 while (1) {
298
299 if ((pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo)) == 0xFFFFFFFF)
300 return IRQ_HANDLED;
301
302 cb_idx = 0;
303 freeme = 0;
304
305 /*
306 * Check for non-TURBO reply!
307 */
308 if (pa & MPI_ADDRESS_REPLY_A_BIT) {
309 u32 reply_dma_low;
310 u16 ioc_stat;
311
312 /* non-TURBO reply! Hmmm, something may be up...
313 * Newest turbo reply mechanism; get address
314 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
315 */
316
317 /* Map DMA address of reply header to cpu address.
318 * pa is 32 bits - but the dma address may be 32 or 64 bits
319 * get offset based only only the low addresses
320 */
321 reply_dma_low = (pa = (pa << 1));
322 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
323 (reply_dma_low - ioc->reply_frames_low_dma));
324
325 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
326 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
327 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
328
329 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x\n",
330 ioc->name, mr, req_idx));
331 DBG_DUMP_REPLY_FRAME(mr)
332
333 /* NEW! 20010301 -sralston
334 * Check/log IOC log info
335 */
336 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
337 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
338 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
339 if (ioc->bus_type == FC)
340 mpt_fc_log_info(ioc, log_info);
341 else if (ioc->bus_type == SCSI)
342 mpt_sp_log_info(ioc, log_info);
343 }
344 if (ioc_stat & MPI_IOCSTATUS_MASK) {
345 if (ioc->bus_type == SCSI)
346 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
347 }
348 } else {
349 /*
350 * Process turbo (context) reply...
351 */
352 dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n", ioc->name, pa));
353 type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT);
354 if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) {
355 cb_idx = mpt_stm_index;
356 mf = NULL;
357 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
358 } else if (type == MPI_CONTEXT_REPLY_TYPE_LAN) {
359 cb_idx = mpt_lan_index;
360 /*
361 * BUG FIX! 20001218 -sralston
362 * Blind set of mf to NULL here was fatal
363 * after lan_reply says "freeme"
364 * Fix sort of combined with an optimization here;
365 * added explicit check for case where lan_reply
366 * was just returning 1 and doing nothing else.
367 * For this case skip the callback, but set up
368 * proper mf value first here:-)
369 */
370 if ((pa & 0x58000000) == 0x58000000) {
371 req_idx = pa & 0x0000FFFF;
372 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
373 freeme = 1;
374 /*
375 * IMPORTANT! Invalidate the callback!
376 */
377 cb_idx = 0;
378 } else {
379 mf = NULL;
380 }
381 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
382 } else {
383 req_idx = pa & 0x0000FFFF;
384 cb_idx = (pa & 0x00FF0000) >> 16;
385 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
386 mr = NULL;
387 }
388 pa = 0; /* No reply flush! */
389 }
390
391#ifdef MPT_DEBUG_IRQ
392 if (ioc->bus_type == SCSI) {
393 /* Verify mf, mr are reasonable.
394 */
395 if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
396 || (mf < ioc->req_frames)) ) {
397 printk(MYIOC_s_WARN_FMT
398 "mpt_interrupt: Invalid mf (%p) req_idx (%d)!\n", ioc->name, (void *)mf, req_idx);
399 cb_idx = 0;
400 pa = 0;
401 freeme = 0;
402 }
403 if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth))
404 || (mr < ioc->reply_frames)) ) {
405 printk(MYIOC_s_WARN_FMT
406 "mpt_interrupt: Invalid rf (%p)!\n", ioc->name, (void *)mr);
407 cb_idx = 0;
408 pa = 0;
409 freeme = 0;
410 }
411 if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) {
412 printk(MYIOC_s_WARN_FMT
413 "mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx);
414 cb_idx = 0;
415 pa = 0;
416 freeme = 0;
417 }
418 }
419#endif
420
421 /* Check for (valid) IO callback! */
422 if (cb_idx) {
423 /* Do the callback! */
424 freeme = (*(MptCallbacks[cb_idx]))(ioc, mf, mr);
425 }
426
427 if (pa) {
428 /* Flush (non-TURBO) reply with a WRITE! */
429 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
430 }
431
432 if (freeme) {
433 unsigned long flags;
434
435 /* Put Request back on FreeQ! */
436 spin_lock_irqsave(&ioc->FreeQlock, flags);
437 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
438#ifdef MFCNT
439 ioc->mfcnt--;
440#endif
441 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
442 }
443
444 mb();
445 } /* drain reply FIFO */
446
447 return IRQ_HANDLED;
448}
449
450/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
451/*
452 * mpt_base_reply - MPT base driver's callback routine; all base driver
453 * "internal" request/reply processing is routed here.
454 * Currently used for EventNotification and EventAck handling.
455 * @ioc: Pointer to MPT_ADAPTER structure
456 * @mf: Pointer to original MPT request frame
457 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
458 *
459 * Returns 1 indicating original alloc'd request frame ptr
460 * should be freed, or 0 if it shouldn't.
461 */
462static int
463mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
464{
465 int freereq = 1;
466 u8 func;
467
468 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name));
469
470 if ((mf == NULL) ||
471 (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
472 printk(MYIOC_s_ERR_FMT "NULL or BAD request frame ptr! (=%p)\n",
473 ioc->name, (void *)mf);
474 return 1;
475 }
476
477 if (reply == NULL) {
478 dprintk((MYIOC_s_ERR_FMT "Unexpected NULL Event (turbo?) reply!\n",
479 ioc->name));
480 return 1;
481 }
482
483 if (!(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) {
484 dmfprintk((KERN_INFO MYNAM ": Original request frame (@%p) header\n", mf));
485 DBG_DUMP_REQUEST_FRAME_HDR(mf)
486 }
487
488 func = reply->u.hdr.Function;
489 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n",
490 ioc->name, func));
491
492 if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
493 EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply;
494 int evHandlers = 0;
495 int results;
496
497 results = ProcessEventNotification(ioc, pEvReply, &evHandlers);
498 if (results != evHandlers) {
499 /* CHECKME! Any special handling needed here? */
500 devtprintk((MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n",
501 ioc->name, evHandlers, results));
502 }
503
504 /*
505 * Hmmm... It seems that EventNotificationReply is an exception
506 * to the rule of one reply per request.
507 */
508 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
509 freereq = 0;
510
511#ifdef CONFIG_PROC_FS
512// LogEvent(ioc, pEvReply);
513#endif
514
515 } else if (func == MPI_FUNCTION_EVENT_ACK) {
516 dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n",
517 ioc->name));
518 } else if (func == MPI_FUNCTION_CONFIG ||
519 func == MPI_FUNCTION_TOOLBOX) {
520 CONFIGPARMS *pCfg;
521 unsigned long flags;
522
523 dcprintk((MYIOC_s_INFO_FMT "config_complete (mf=%p,mr=%p)\n",
524 ioc->name, mf, reply));
525
526 pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *)));
527
528 if (pCfg) {
529 /* disable timer and remove from linked list */
530 del_timer(&pCfg->timer);
531
532 spin_lock_irqsave(&ioc->FreeQlock, flags);
533 list_del(&pCfg->linkage);
534 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
535
536 /*
537 * If IOC Status is SUCCESS, save the header
538 * and set the status code to GOOD.
539 */
540 pCfg->status = MPT_CONFIG_ERROR;
541 if (reply) {
542 ConfigReply_t *pReply = (ConfigReply_t *)reply;
543 u16 status;
544
545 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
546 dcprintk((KERN_NOTICE " IOCStatus=%04xh, IOCLogInfo=%08xh\n",
547 status, le32_to_cpu(pReply->IOCLogInfo)));
548
549 pCfg->status = status;
550 if (status == MPI_IOCSTATUS_SUCCESS) {
551 pCfg->hdr->PageVersion = pReply->Header.PageVersion;
552 pCfg->hdr->PageLength = pReply->Header.PageLength;
553 pCfg->hdr->PageNumber = pReply->Header.PageNumber;
554 pCfg->hdr->PageType = pReply->Header.PageType;
555 }
556 }
557
558 /*
559 * Wake up the original calling thread
560 */
561 pCfg->wait_done = 1;
562 wake_up(&mpt_waitq);
563 }
564 } else {
565 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
566 ioc->name, func);
567 }
568
569 /*
570 * Conditionally tell caller to free the original
571 * EventNotification/EventAck/unexpected request frame!
572 */
573 return freereq;
574}
575
576/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
577/**
578 * mpt_register - Register protocol-specific main callback handler.
579 * @cbfunc: callback function pointer
580 * @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
581 *
582 * This routine is called by a protocol-specific driver (SCSI host,
583 * LAN, SCSI target) to register it's reply callback routine. Each
584 * protocol-specific driver must do this before it will be able to
585 * use any IOC resources, such as obtaining request frames.
586 *
587 * NOTES: The SCSI protocol driver currently calls this routine thrice
588 * in order to register separate callbacks; one for "normal" SCSI IO;
589 * one for MptScsiTaskMgmt requests; one for Scan/DV requests.
590 *
591 * Returns a positive integer valued "handle" in the
592 * range (and S.O.D. order) {N,...,7,6,5,...,1} if successful.
593 * Any non-positive return value (including zero!) should be considered
594 * an error by the caller.
595 */
596int
597mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass)
598{
599 int i;
600
601 last_drv_idx = -1;
602
603 /*
604 * Search for empty callback slot in this order: {N,...,7,6,5,...,1}
605 * (slot/handle 0 is reserved!)
606 */
607 for (i = MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) {
608 if (MptCallbacks[i] == NULL) {
609 MptCallbacks[i] = cbfunc;
610 MptDriverClass[i] = dclass;
611 MptEvHandlers[i] = NULL;
612 last_drv_idx = i;
613 break;
614 }
615 }
616
617 return last_drv_idx;
618}
619
620/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
621/**
622 * mpt_deregister - Deregister a protocol drivers resources.
623 * @cb_idx: previously registered callback handle
624 *
625 * Each protocol-specific driver should call this routine when it's
626 * module is unloaded.
627 */
628void
629mpt_deregister(int cb_idx)
630{
631 if ((cb_idx >= 0) && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) {
632 MptCallbacks[cb_idx] = NULL;
633 MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
634 MptEvHandlers[cb_idx] = NULL;
635
636 last_drv_idx++;
637 }
638}
639
640/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
641/**
642 * mpt_event_register - Register protocol-specific event callback
643 * handler.
644 * @cb_idx: previously registered (via mpt_register) callback handle
645 * @ev_cbfunc: callback function
646 *
647 * This routine can be called by one or more protocol-specific drivers
648 * if/when they choose to be notified of MPT events.
649 *
650 * Returns 0 for success.
651 */
652int
653mpt_event_register(int cb_idx, MPT_EVHANDLER ev_cbfunc)
654{
655 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
656 return -1;
657
658 MptEvHandlers[cb_idx] = ev_cbfunc;
659 return 0;
660}
661
662/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
663/**
664 * mpt_event_deregister - Deregister protocol-specific event callback
665 * handler.
666 * @cb_idx: previously registered callback handle
667 *
668 * Each protocol-specific driver should call this routine
669 * when it does not (or can no longer) handle events,
670 * or when it's module is unloaded.
671 */
672void
673mpt_event_deregister(int cb_idx)
674{
675 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
676 return;
677
678 MptEvHandlers[cb_idx] = NULL;
679}
680
681/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
682/**
683 * mpt_reset_register - Register protocol-specific IOC reset handler.
684 * @cb_idx: previously registered (via mpt_register) callback handle
685 * @reset_func: reset function
686 *
687 * This routine can be called by one or more protocol-specific drivers
688 * if/when they choose to be notified of IOC resets.
689 *
690 * Returns 0 for success.
691 */
692int
693mpt_reset_register(int cb_idx, MPT_RESETHANDLER reset_func)
694{
695 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
696 return -1;
697
698 MptResetHandlers[cb_idx] = reset_func;
699 return 0;
700}
701
702/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
703/**
704 * mpt_reset_deregister - Deregister protocol-specific IOC reset handler.
705 * @cb_idx: previously registered callback handle
706 *
707 * Each protocol-specific driver should call this routine
708 * when it does not (or can no longer) handle IOC reset handling,
709 * or when it's module is unloaded.
710 */
711void
712mpt_reset_deregister(int cb_idx)
713{
714 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
715 return;
716
717 MptResetHandlers[cb_idx] = NULL;
718}
719
720/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
721/**
722 * mpt_device_driver_register - Register device driver hooks
723 */
724int
725mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
726{
727 MPT_ADAPTER *ioc;
728 int error=0;
729
730 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) {
731 error= -EINVAL;
732 return error;
733 }
734
735 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
736
737 /* call per pci device probe entry point */
738 list_for_each_entry(ioc, &ioc_list, list) {
739 if(dd_cbfunc->probe) {
740 error = dd_cbfunc->probe(ioc->pcidev,
741 ioc->pcidev->driver->id_table);
742 if(error != 0)
743 return error;
744 }
745 }
746
747 return error;
748}
749
750/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
751/**
752 * mpt_device_driver_deregister - DeRegister device driver hooks
753 */
754void
755mpt_device_driver_deregister(int cb_idx)
756{
757 struct mpt_pci_driver *dd_cbfunc;
758 MPT_ADAPTER *ioc;
759
760 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
761 return;
762
763 dd_cbfunc = MptDeviceDriverHandlers[cb_idx];
764
765 list_for_each_entry(ioc, &ioc_list, list) {
766 if (dd_cbfunc->remove)
767 dd_cbfunc->remove(ioc->pcidev);
768 }
769
770 MptDeviceDriverHandlers[cb_idx] = NULL;
771}
772
773
774/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
775/**
776 * mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024)
777 * allocated per MPT adapter.
778 * @handle: Handle of registered MPT protocol driver
779 * @ioc: Pointer to MPT adapter structure
780 *
781 * Returns pointer to a MPT request frame or %NULL if none are available
782 * or IOC is not active.
783 */
784MPT_FRAME_HDR*
785mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc)
786{
787 MPT_FRAME_HDR *mf;
788 unsigned long flags;
789 u16 req_idx; /* Request index */
790
791 /* validate handle and ioc identifier */
792
793#ifdef MFCNT
794 if (!ioc->active)
795 printk(KERN_WARNING "IOC Not Active! mpt_get_msg_frame returning NULL!\n");
796#endif
797
798 /* If interrupts are not attached, do not return a request frame */
799 if (!ioc->active)
800 return NULL;
801
802 spin_lock_irqsave(&ioc->FreeQlock, flags);
803 if (!list_empty(&ioc->FreeQ)) {
804 int req_offset;
805
806 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
807 u.frame.linkage.list);
808 list_del(&mf->u.frame.linkage.list);
809 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */
810 req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
811 /* u16! */
812 req_idx = cpu_to_le16(req_offset / ioc->req_sz);
813 mf->u.frame.hwhdr.msgctxu.fld.req_idx = req_idx;
814 mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
815 ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame; /* Default, will be changed if necessary in SG generation */
816#ifdef MFCNT
817 ioc->mfcnt++;
818#endif
819 }
820 else
821 mf = NULL;
822 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
823
824#ifdef MFCNT
825 if (mf == NULL)
826 printk(KERN_WARNING "IOC Active. No free Msg Frames! Count 0x%x Max 0x%x\n", ioc->mfcnt, ioc->req_depth);
827 mfcounter++;
828 if (mfcounter == PRINT_MF_COUNT)
829 printk(KERN_INFO "MF Count 0x%x Max 0x%x \n", ioc->mfcnt, ioc->req_depth);
830#endif
831
832 dmfprintk((KERN_INFO MYNAM ": %s: mpt_get_msg_frame(%d,%d), got mf=%p\n",
833 ioc->name, handle, ioc->id, mf));
834 return mf;
835}
836
837/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
838/**
839 * mpt_put_msg_frame - Send a protocol specific MPT request frame
840 * to a IOC.
841 * @handle: Handle of registered MPT protocol driver
842 * @ioc: Pointer to MPT adapter structure
843 * @mf: Pointer to MPT request frame
844 *
845 * This routine posts a MPT request frame to the request post FIFO of a
846 * specific MPT adapter.
847 */
848void
849mpt_put_msg_frame(int handle, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
850{
851 u32 mf_dma_addr;
852 int req_offset;
853 u16 req_idx; /* Request index */
854
855 /* ensure values are reset properly! */
856 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */
857 req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
858 /* u16! */
859 req_idx = cpu_to_le16(req_offset / ioc->req_sz);
860 mf->u.frame.hwhdr.msgctxu.fld.req_idx = req_idx;
861 mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
862
863#ifdef MPT_DEBUG_MSG_FRAME
864 {
865 u32 *m = mf->u.frame.hwhdr.__hdr;
866 int ii, n;
867
868 printk(KERN_INFO MYNAM ": %s: About to Put msg frame @ %p:\n" KERN_INFO " ",
869 ioc->name, m);
870 n = ioc->req_sz/4 - 1;
871 while (m[n] == 0)
872 n--;
873 for (ii=0; ii<=n; ii++) {
874 if (ii && ((ii%8)==0))
875 printk("\n" KERN_INFO " ");
876 printk(" %08x", le32_to_cpu(m[ii]));
877 }
878 printk("\n");
879 }
880#endif
881
882 mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
883 dsgprintk((MYIOC_s_INFO_FMT "mf_dma_addr=%x req_idx=%d RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, ioc->RequestNB[req_idx]));
884 CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
885}
886
887/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
888/**
889 * mpt_free_msg_frame - Place MPT request frame back on FreeQ.
890 * @handle: Handle of registered MPT protocol driver
891 * @ioc: Pointer to MPT adapter structure
892 * @mf: Pointer to MPT request frame
893 *
894 * This routine places a MPT request frame back on the MPT adapter's
895 * FreeQ.
896 */
897void
898mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
899{
900 unsigned long flags;
901
902 /* Put Request back on FreeQ! */
903 spin_lock_irqsave(&ioc->FreeQlock, flags);
904 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
905#ifdef MFCNT
906 ioc->mfcnt--;
907#endif
908 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
909}
910
911/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
912/**
913 * mpt_add_sge - Place a simple SGE at address pAddr.
914 * @pAddr: virtual address for SGE
915 * @flagslength: SGE flags and data transfer length
916 * @dma_addr: Physical address
917 *
918 * This routine places a MPT request frame back on the MPT adapter's
919 * FreeQ.
920 */
921void
922mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
923{
924 if (sizeof(dma_addr_t) == sizeof(u64)) {
925 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
926 u32 tmp = dma_addr & 0xFFFFFFFF;
927
928 pSge->FlagsLength = cpu_to_le32(flagslength);
929 pSge->Address.Low = cpu_to_le32(tmp);
930 tmp = (u32) ((u64)dma_addr >> 32);
931 pSge->Address.High = cpu_to_le32(tmp);
932
933 } else {
934 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
935 pSge->FlagsLength = cpu_to_le32(flagslength);
936 pSge->Address = cpu_to_le32(dma_addr);
937 }
938}
939
940/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
941/**
942 * mpt_send_handshake_request - Send MPT request via doorbell
943 * handshake method.
944 * @handle: Handle of registered MPT protocol driver
945 * @ioc: Pointer to MPT adapter structure
946 * @reqBytes: Size of the request in bytes
947 * @req: Pointer to MPT request frame
948 * @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
949 *
950 * This routine is used exclusively to send MptScsiTaskMgmt
951 * requests since they are required to be sent via doorbell handshake.
952 *
953 * NOTE: It is the callers responsibility to byte-swap fields in the
954 * request which are greater than 1 byte in size.
955 *
956 * Returns 0 for success, non-zero for failure.
957 */
958int
959mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
960{
961 int r = 0;
962 u8 *req_as_bytes;
963 int ii;
964
965 /* State is known to be good upon entering
966 * this function so issue the bus reset
967 * request.
968 */
969
970 /*
971 * Emulate what mpt_put_msg_frame() does /wrt to sanity
972 * setting cb_idx/req_idx. But ONLY if this request
973 * is in proper (pre-alloc'd) request buffer range...
974 */
975 ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
976 if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) {
977 MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
978 mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii);
979 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle;
980 }
981
982 /* Make sure there are no doorbells */
983 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
984
985 CHIPREG_WRITE32(&ioc->chip->Doorbell,
986 ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
987 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
988
989 /* Wait for IOC doorbell int */
990 if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) {
991 return ii;
992 }
993
994 /* Read doorbell and check for active bit */
995 if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
996 return -5;
997
998 dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n",
999 ioc->name, ii));
1000
1001 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1002
1003 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1004 return -2;
1005 }
1006
1007 /* Send request via doorbell handshake */
1008 req_as_bytes = (u8 *) req;
1009 for (ii = 0; ii < reqBytes/4; ii++) {
1010 u32 word;
1011
1012 word = ((req_as_bytes[(ii*4) + 0] << 0) |
1013 (req_as_bytes[(ii*4) + 1] << 8) |
1014 (req_as_bytes[(ii*4) + 2] << 16) |
1015 (req_as_bytes[(ii*4) + 3] << 24));
1016 CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
1017 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1018 r = -3;
1019 break;
1020 }
1021 }
1022
1023 if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0)
1024 r = 0;
1025 else
1026 r = -4;
1027
1028 /* Make sure there are no doorbells */
1029 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1030
1031 return r;
1032}
1033
1034/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1035/**
1036 * mpt_verify_adapter - Given a unique IOC identifier, set pointer to
1037 * the associated MPT adapter structure.
1038 * @iocid: IOC unique identifier (integer)
1039 * @iocpp: Pointer to pointer to IOC adapter
1040 *
1041 * Returns iocid and sets iocpp.
1042 */
1043int
1044mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
1045{
1046 MPT_ADAPTER *ioc;
1047
1048 list_for_each_entry(ioc,&ioc_list,list) {
1049 if (ioc->id == iocid) {
1050 *iocpp =ioc;
1051 return iocid;
1052 }
1053 }
1054
1055 *iocpp = NULL;
1056 return -1;
1057}
1058
1059/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1060/*
1061 * mptbase_probe - Install a PCI intelligent MPT adapter.
1062 * @pdev: Pointer to pci_dev structure
1063 *
1064 * This routine performs all the steps necessary to bring the IOC of
1065 * a MPT adapter to a OPERATIONAL state. This includes registering
1066 * memory regions, registering the interrupt, and allocating request
1067 * and reply memory pools.
1068 *
1069 * This routine also pre-fetches the LAN MAC address of a Fibre Channel
1070 * MPT adapter.
1071 *
1072 * Returns 0 for success, non-zero for failure.
1073 *
1074 * TODO: Add support for polled controllers
1075 */
1076static int __devinit
1077mptbase_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1078{
1079 MPT_ADAPTER *ioc;
1080 u8 __iomem *mem;
1081 unsigned long mem_phys;
1082 unsigned long port;
1083 u32 msize;
1084 u32 psize;
1085 int ii;
1086 int r = -ENODEV;
1087 u64 mask = 0xffffffffffffffffULL;
1088 u8 revision;
1089 u8 pcixcmd;
1090 static int mpt_ids = 0;
1091#ifdef CONFIG_PROC_FS
1092 struct proc_dir_entry *dent, *ent;
1093#endif
1094
1095 if (pci_enable_device(pdev))
1096 return r;
1097
1098 dinitprintk((KERN_WARNING MYNAM ": mpt_adapter_install\n"));
1099
1100 if (!pci_set_dma_mask(pdev, mask)) {
1101 dprintk((KERN_INFO MYNAM
1102 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n"));
1103 } else if (pci_set_dma_mask(pdev, (u64) 0xffffffff)) {
1104 printk(KERN_WARNING MYNAM ": 32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n");
1105 return r;
1106 }
1107
1108 if (!pci_set_consistent_dma_mask(pdev, mask))
1109 dprintk((KERN_INFO MYNAM
1110 ": Using 64 bit consistent mask\n"));
1111 else
1112 dprintk((KERN_INFO MYNAM
1113 ": Not using 64 bit consistent mask\n"));
1114
1115 ioc = kmalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
1116 if (ioc == NULL) {
1117 printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
1118 return -ENOMEM;
1119 }
1120 memset(ioc, 0, sizeof(MPT_ADAPTER));
1121 ioc->alloc_total = sizeof(MPT_ADAPTER);
1122 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1123 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1124
1125 ioc->pcidev = pdev;
1126 ioc->diagPending = 0;
1127 spin_lock_init(&ioc->diagLock);
1128
1129 /* Initialize the event logging.
1130 */
1131 ioc->eventTypes = 0; /* None */
1132 ioc->eventContext = 0;
1133 ioc->eventLogSize = 0;
1134 ioc->events = NULL;
1135
1136#ifdef MFCNT
1137 ioc->mfcnt = 0;
1138#endif
1139
1140 ioc->cached_fw = NULL;
1141
1142 /* Initilize SCSI Config Data structure
1143 */
1144 memset(&ioc->spi_data, 0, sizeof(ScsiCfgData));
1145
1146 /* Initialize the running configQ head.
1147 */
1148 INIT_LIST_HEAD(&ioc->configQ);
1149
1150 /* Find lookup slot. */
1151 INIT_LIST_HEAD(&ioc->list);
1152 ioc->id = mpt_ids++;
1153
1154 mem_phys = msize = 0;
1155 port = psize = 0;
1156 for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) {
1157 if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) {
1158 /* Get I/O space! */
1159 port = pci_resource_start(pdev, ii);
1160 psize = pci_resource_len(pdev,ii);
1161 } else {
1162 /* Get memmap */
1163 mem_phys = pci_resource_start(pdev, ii);
1164 msize = pci_resource_len(pdev,ii);
1165 break;
1166 }
1167 }
1168 ioc->mem_size = msize;
1169
1170 if (ii == DEVICE_COUNT_RESOURCE) {
1171 printk(KERN_ERR MYNAM ": ERROR - MPT adapter has no memory regions defined!\n");
1172 kfree(ioc);
1173 return -EINVAL;
1174 }
1175
1176 dinitprintk((KERN_INFO MYNAM ": MPT adapter @ %lx, msize=%dd bytes\n", mem_phys, msize));
1177 dinitprintk((KERN_INFO MYNAM ": (port i/o @ %lx, psize=%dd bytes)\n", port, psize));
1178
1179 mem = NULL;
1180 /* Get logical ptr for PciMem0 space */
1181 /*mem = ioremap(mem_phys, msize);*/
1182 mem = ioremap(mem_phys, 0x100);
1183 if (mem == NULL) {
1184 printk(KERN_ERR MYNAM ": ERROR - Unable to map adapter memory!\n");
1185 kfree(ioc);
1186 return -EINVAL;
1187 }
1188 ioc->memmap = mem;
1189 dinitprintk((KERN_INFO MYNAM ": mem = %p, mem_phys = %lx\n", mem, mem_phys));
1190
1191 dinitprintk((KERN_INFO MYNAM ": facts @ %p, pfacts[0] @ %p\n",
1192 &ioc->facts, &ioc->pfacts[0]));
1193
1194 ioc->mem_phys = mem_phys;
1195 ioc->chip = (SYSIF_REGS __iomem *)mem;
1196
1197 /* Save Port IO values in case we need to do downloadboot */
1198 {
1199 u8 *pmem = (u8*)port;
1200 ioc->pio_mem_phys = port;
1201 ioc->pio_chip = (SYSIF_REGS __iomem *)pmem;
1202 }
1203
1204 if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC909) {
1205 ioc->prod_name = "LSIFC909";
1206 ioc->bus_type = FC;
1207 }
1208 if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) {
1209 ioc->prod_name = "LSIFC929";
1210 ioc->bus_type = FC;
1211 }
1212 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919) {
1213 ioc->prod_name = "LSIFC919";
1214 ioc->bus_type = FC;
1215 }
1216 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929X) {
1217 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1218 ioc->bus_type = FC;
1219 if (revision < XL_929) {
1220 ioc->prod_name = "LSIFC929X";
1221 /* 929X Chip Fix. Set Split transactions level
1222 * for PCIX. Set MOST bits to zero.
1223 */
1224 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1225 pcixcmd &= 0x8F;
1226 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1227 } else {
1228 ioc->prod_name = "LSIFC929XL";
1229 /* 929XL Chip Fix. Set MMRBC to 0x08.
1230 */
1231 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1232 pcixcmd |= 0x08;
1233 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1234 }
1235 }
1236 else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919X) {
1237 ioc->prod_name = "LSIFC919X";
1238 ioc->bus_type = FC;
1239 /* 919X Chip Fix. Set Split transactions level
1240 * for PCIX. Set MOST bits to zero.
1241 */
1242 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1243 pcixcmd &= 0x8F;
1244 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1245 }
1246 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) {
1247 ioc->prod_name = "LSI53C1030";
1248 ioc->bus_type = SCSI;
1249 /* 1030 Chip Fix. Disable Split transactions
1250 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
1251 */
1252 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1253 if (revision < C0_1030) {
1254 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1255 pcixcmd &= 0x8F;
1256 pci_write_config_byte(pdev, 0x6a, pcixcmd);
1257 }
1258 }
1259 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) {
1260 ioc->prod_name = "LSI53C1035";
1261 ioc->bus_type = SCSI;
1262 }
1263
1264 sprintf(ioc->name, "ioc%d", ioc->id);
1265
1266 spin_lock_init(&ioc->FreeQlock);
1267
1268 /* Disable all! */
1269 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
1270 ioc->active = 0;
1271 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1272
1273 /* Set lookup ptr. */
1274 list_add_tail(&ioc->list, &ioc_list);
1275
1276 ioc->pci_irq = -1;
1277 if (pdev->irq) {
1278 r = request_irq(pdev->irq, mpt_interrupt, SA_SHIRQ, ioc->name, ioc);
1279
1280 if (r < 0) {
1281#ifndef __sparc__
1282 printk(MYIOC_s_ERR_FMT "Unable to allocate interrupt %d!\n",
1283 ioc->name, pdev->irq);
1284#else
1285 printk(MYIOC_s_ERR_FMT "Unable to allocate interrupt %s!\n",
1286 ioc->name, __irq_itoa(pdev->irq));
1287#endif
1288 list_del(&ioc->list);
1289 iounmap(mem);
1290 kfree(ioc);
1291 return -EBUSY;
1292 }
1293
1294 ioc->pci_irq = pdev->irq;
1295
1296 pci_set_master(pdev); /* ?? */
1297 pci_set_drvdata(pdev, ioc);
1298
1299#ifndef __sparc__
1300 dprintk((KERN_INFO MYNAM ": %s installed at interrupt %d\n", ioc->name, pdev->irq));
1301#else
1302 dprintk((KERN_INFO MYNAM ": %s installed at interrupt %s\n", ioc->name, __irq_itoa(pdev->irq)));
1303#endif
1304 }
1305
1306 /* NEW! 20010220 -sralston
1307 * Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets.
1308 */
1309 mpt_detect_bound_ports(ioc, pdev);
1310
1311 if ((r = mpt_do_ioc_recovery(ioc,
1312 MPT_HOSTEVENT_IOC_BRINGUP, CAN_SLEEP)) != 0) {
1313 printk(KERN_WARNING MYNAM
1314 ": WARNING - %s did not initialize properly! (%d)\n",
1315 ioc->name, r);
1316
1317 list_del(&ioc->list);
1318 free_irq(ioc->pci_irq, ioc);
1319 iounmap(mem);
1320 kfree(ioc);
1321 pci_set_drvdata(pdev, NULL);
1322 return r;
1323 }
1324
1325 /* call per device driver probe entry point */
1326 for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
1327 if(MptDeviceDriverHandlers[ii] &&
1328 MptDeviceDriverHandlers[ii]->probe) {
1329 MptDeviceDriverHandlers[ii]->probe(pdev,id);
1330 }
1331 }
1332
1333#ifdef CONFIG_PROC_FS
1334 /*
1335 * Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter.
1336 */
1337 dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
1338 if (dent) {
1339 ent = create_proc_entry("info", S_IFREG|S_IRUGO, dent);
1340 if (ent) {
1341 ent->read_proc = procmpt_iocinfo_read;
1342 ent->data = ioc;
1343 }
1344 ent = create_proc_entry("summary", S_IFREG|S_IRUGO, dent);
1345 if (ent) {
1346 ent->read_proc = procmpt_summary_read;
1347 ent->data = ioc;
1348 }
1349 }
1350#endif
1351
1352 return 0;
1353}
1354
1355/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1356/*
1357 * mptbase_remove - Remove a PCI intelligent MPT adapter.
1358 * @pdev: Pointer to pci_dev structure
1359 *
1360 */
1361
1362static void __devexit
1363mptbase_remove(struct pci_dev *pdev)
1364{
1365 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1366 char pname[32];
1367 int ii;
1368
1369 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
1370 remove_proc_entry(pname, NULL);
1371 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
1372 remove_proc_entry(pname, NULL);
1373 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
1374 remove_proc_entry(pname, NULL);
1375
1376 /* call per device driver remove entry point */
1377 for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
1378 if(MptDeviceDriverHandlers[ii] &&
1379 MptDeviceDriverHandlers[ii]->remove) {
1380 MptDeviceDriverHandlers[ii]->remove(pdev);
1381 }
1382 }
1383
1384 /* Disable interrupts! */
1385 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
1386
1387 ioc->active = 0;
1388 synchronize_irq(pdev->irq);
1389
1390 /* Clear any lingering interrupt */
1391 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1392
1393 CHIPREG_READ32(&ioc->chip->IntStatus);
1394
1395 mpt_adapter_dispose(ioc);
1396
1397 pci_set_drvdata(pdev, NULL);
1398}
1399
1400/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1401/*
1402 * mptbase_shutdown -
1403 *
1404 */
1405static void
1406mptbase_shutdown(struct device * dev)
1407{
1408 int ii;
1409
1410 /* call per device driver shutdown entry point */
1411 for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
1412 if(MptDeviceDriverHandlers[ii] &&
1413 MptDeviceDriverHandlers[ii]->shutdown) {
1414 MptDeviceDriverHandlers[ii]->shutdown(dev);
1415 }
1416 }
1417
1418}
1419
1420
1421/**************************************************************************
1422 * Power Management
1423 */
1424#ifdef CONFIG_PM
1425/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1426/*
1427 * mptbase_suspend - Fusion MPT base driver suspend routine.
1428 *
1429 *
1430 */
1431static int
1432mptbase_suspend(struct pci_dev *pdev, u32 state)
1433{
1434 u32 device_state;
1435 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1436 int ii;
1437
1438 switch(state)
1439 {
1440 case 1: /* S1 */
1441 device_state=1; /* D1 */;
1442 break;
1443 case 3: /* S3 */
1444 case 4: /* S4 */
1445 device_state=3; /* D3 */;
1446 break;
1447 default:
1448 return -EAGAIN /*FIXME*/;
1449 break;
1450 }
1451
1452 printk(MYIOC_s_INFO_FMT
1453 "pci-suspend: pdev=0x%p, slot=%s, Entering operating state [D%d]\n",
1454 ioc->name, pdev, pci_name(pdev), device_state);
1455
1456 /* call per device driver suspend entry point */
1457 for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
1458 if(MptDeviceDriverHandlers[ii] &&
1459 MptDeviceDriverHandlers[ii]->suspend) {
1460 MptDeviceDriverHandlers[ii]->suspend(pdev, state);
1461 }
1462 }
1463
1464 pci_save_state(pdev);
1465
1466 /* put ioc into READY_STATE */
1467 if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
1468 printk(MYIOC_s_ERR_FMT
1469 "pci-suspend: IOC msg unit reset failed!\n", ioc->name);
1470 }
1471
1472 /* disable interrupts */
1473 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
1474 ioc->active = 0;
1475
1476 /* Clear any lingering interrupt */
1477 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1478
1479 pci_disable_device(pdev);
1480 pci_set_power_state(pdev, device_state);
1481
1482 return 0;
1483}
1484
1485/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1486/*
1487 * mptbase_resume - Fusion MPT base driver resume routine.
1488 *
1489 *
1490 */
1491static int
1492mptbase_resume(struct pci_dev *pdev)
1493{
1494 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1495 u32 device_state = pdev->current_state;
1496 int recovery_state;
1497 int ii;
1498
1499 printk(MYIOC_s_INFO_FMT
1500 "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
1501 ioc->name, pdev, pci_name(pdev), device_state);
1502
1503 pci_set_power_state(pdev, 0);
1504 pci_restore_state(pdev);
1505 pci_enable_device(pdev);
1506
1507 /* enable interrupts */
1508 CHIPREG_WRITE32(&ioc->chip->IntMask, ~(MPI_HIM_RIM));
1509 ioc->active = 1;
1510
1511 /* F/W not running */
1512 if(!CHIPREG_READ32(&ioc->chip->Doorbell)) {
1513 /* enable domain validation flags */
1514 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
1515 ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_NEED_DV;
1516 }
1517 }
1518
1519 printk(MYIOC_s_INFO_FMT
1520 "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
1521 ioc->name,
1522 (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
1523 CHIPREG_READ32(&ioc->chip->Doorbell));
1524
1525 /* bring ioc to operational state */
1526 if ((recovery_state = mpt_do_ioc_recovery(ioc,
1527 MPT_HOSTEVENT_IOC_RECOVER, CAN_SLEEP)) != 0) {
1528 printk(MYIOC_s_INFO_FMT
1529 "pci-resume: Cannot recover, error:[%x]\n",
1530 ioc->name, recovery_state);
1531 } else {
1532 printk(MYIOC_s_INFO_FMT
1533 "pci-resume: success\n", ioc->name);
1534 }
1535
1536 /* call per device driver resume entry point */
1537 for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
1538 if(MptDeviceDriverHandlers[ii] &&
1539 MptDeviceDriverHandlers[ii]->resume) {
1540 MptDeviceDriverHandlers[ii]->resume(pdev);
1541 }
1542 }
1543
1544 return 0;
1545}
1546#endif
1547
1548/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1549/*
1550 * mpt_do_ioc_recovery - Initialize or recover MPT adapter.
1551 * @ioc: Pointer to MPT adapter structure
1552 * @reason: Event word / reason
1553 * @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
1554 *
1555 * This routine performs all the steps necessary to bring the IOC
1556 * to a OPERATIONAL state.
1557 *
1558 * This routine also pre-fetches the LAN MAC address of a Fibre Channel
1559 * MPT adapter.
1560 *
1561 * Returns:
1562 * 0 for success
1563 * -1 if failed to get board READY
1564 * -2 if READY but IOCFacts Failed
1565 * -3 if READY but PrimeIOCFifos Failed
1566 * -4 if READY but IOCInit Failed
1567 */
1568static int
1569mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1570{
1571 int hard_reset_done = 0;
1572 int alt_ioc_ready = 0;
1573 int hard;
1574 int rc=0;
1575 int ii;
1576 int handlers;
1577 int ret = 0;
1578 int reset_alt_ioc_active = 0;
1579
1580 printk(KERN_INFO MYNAM ": Initiating %s %s\n",
1581 ioc->name, reason==MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery");
1582
1583 /* Disable reply interrupts (also blocks FreeQ) */
1584 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
1585 ioc->active = 0;
1586
1587 if (ioc->alt_ioc) {
1588 if (ioc->alt_ioc->active)
1589 reset_alt_ioc_active = 1;
1590
1591 /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */
1592 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF);
1593 ioc->alt_ioc->active = 0;
1594 }
1595
1596 hard = 1;
1597 if (reason == MPT_HOSTEVENT_IOC_BRINGUP)
1598 hard = 0;
1599
1600 if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) {
1601 if (hard_reset_done == -4) {
1602 printk(KERN_WARNING MYNAM ": %s Owned by PEER..skipping!\n",
1603 ioc->name);
1604
1605 if (reset_alt_ioc_active && ioc->alt_ioc) {
1606 /* (re)Enable alt-IOC! (reply interrupt, FreeQ) */
1607 dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n",
1608 ioc->alt_ioc->name));
1609 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM));
1610 ioc->alt_ioc->active = 1;
1611 }
1612
1613 } else {
1614 printk(KERN_WARNING MYNAM ": %s NOT READY WARNING!\n",
1615 ioc->name);
1616 }
1617 return -1;
1618 }
1619
1620 /* hard_reset_done = 0 if a soft reset was performed
1621 * and 1 if a hard reset was performed.
1622 */
1623 if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) {
1624 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
1625 alt_ioc_ready = 1;
1626 else
1627 printk(KERN_WARNING MYNAM
1628 ": alt-%s: Not ready WARNING!\n",
1629 ioc->alt_ioc->name);
1630 }
1631
1632 for (ii=0; ii<5; ii++) {
1633 /* Get IOC facts! Allow 5 retries */
1634 if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
1635 break;
1636 }
1637
1638
1639 if (ii == 5) {
1640 dinitprintk((MYIOC_s_INFO_FMT "Retry IocFacts failed rc=%x\n", ioc->name, rc));
1641 ret = -2;
1642 } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
1643 MptDisplayIocCapabilities(ioc);
1644 }
1645
1646 if (alt_ioc_ready) {
1647 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
1648 dinitprintk((MYIOC_s_INFO_FMT "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc));
1649 /* Retry - alt IOC was initialized once
1650 */
1651 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
1652 }
1653 if (rc) {
1654 dinitprintk((MYIOC_s_INFO_FMT "Retry Alt IocFacts failed rc=%x\n", ioc->name, rc));
1655 alt_ioc_ready = 0;
1656 reset_alt_ioc_active = 0;
1657 } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
1658 MptDisplayIocCapabilities(ioc->alt_ioc);
1659 }
1660 }
1661
1662 /* Prime reply & request queues!
1663 * (mucho alloc's) Must be done prior to
1664 * init as upper addresses are needed for init.
1665 * If fails, continue with alt-ioc processing
1666 */
1667 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
1668 ret = -3;
1669
1670 /* May need to check/upload firmware & data here!
1671 * If fails, continue with alt-ioc processing
1672 */
1673 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
1674 ret = -4;
1675// NEW!
1676 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
1677 printk(KERN_WARNING MYNAM ": alt-%s: (%d) FIFO mgmt alloc WARNING!\n",
1678 ioc->alt_ioc->name, rc);
1679 alt_ioc_ready = 0;
1680 reset_alt_ioc_active = 0;
1681 }
1682
1683 if (alt_ioc_ready) {
1684 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
1685 alt_ioc_ready = 0;
1686 reset_alt_ioc_active = 0;
1687 printk(KERN_WARNING MYNAM
1688 ": alt-%s: (%d) init failure WARNING!\n",
1689 ioc->alt_ioc->name, rc);
1690 }
1691 }
1692
1693 if (reason == MPT_HOSTEVENT_IOC_BRINGUP){
1694 if (ioc->upload_fw) {
1695 ddlprintk((MYIOC_s_INFO_FMT
1696 "firmware upload required!\n", ioc->name));
1697
1698 /* Controller is not operational, cannot do upload
1699 */
1700 if (ret == 0) {
1701 rc = mpt_do_upload(ioc, sleepFlag);
1702 if (rc != 0)
1703 printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
1704 }
1705 }
1706 }
1707
1708 if (ret == 0) {
1709 /* Enable! (reply interrupt) */
1710 CHIPREG_WRITE32(&ioc->chip->IntMask, ~(MPI_HIM_RIM));
1711 ioc->active = 1;
1712 }
1713
1714 if (reset_alt_ioc_active && ioc->alt_ioc) {
1715 /* (re)Enable alt-IOC! (reply interrupt) */
1716 dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n",
1717 ioc->alt_ioc->name));
1718 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM));
1719 ioc->alt_ioc->active = 1;
1720 }
1721
1722 /* NEW! 20010120 -sralston
1723 * Enable MPT base driver management of EventNotification
1724 * and EventAck handling.
1725 */
1726 if ((ret == 0) && (!ioc->facts.EventState))
1727 (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */
1728
1729 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
1730 (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */
1731
1732 /* (Bugzilla:fibrebugs, #513)
1733 * Bug fix (part 2)! 20010905 -sralston
1734 * Add additional "reason" check before call to GetLanConfigPages
1735 * (combined with GetIoUnitPage2 call). This prevents a somewhat
1736 * recursive scenario; GetLanConfigPages times out, timer expired
1737 * routine calls HardResetHandler, which calls into here again,
1738 * and we try GetLanConfigPages again...
1739 */
1740 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
1741 if (ioc->bus_type == FC) {
1742 /*
1743 * Pre-fetch FC port WWN and stuff...
1744 * (FCPortPage0_t stuff)
1745 */
1746 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
1747 (void) GetFcPortPage0(ioc, ii);
1748 }
1749
1750 if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) &&
1751 (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
1752 /*
1753 * Pre-fetch the ports LAN MAC address!
1754 * (LANPage1_t stuff)
1755 */
1756 (void) GetLanConfigPages(ioc);
1757#ifdef MPT_DEBUG
1758 {
1759 u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
1760 dprintk((MYIOC_s_INFO_FMT "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1761 ioc->name, a[5], a[4], a[3], a[2], a[1], a[0] ));
1762 }
1763#endif
1764 }
1765 } else {
1766 /* Get NVRAM and adapter maximums from SPP 0 and 2
1767 */
1768 mpt_GetScsiPortSettings(ioc, 0);
1769
1770 /* Get version and length of SDP 1
1771 */
1772 mpt_readScsiDevicePageHeaders(ioc, 0);
1773
1774 /* Find IM volumes
1775 */
1776 if (ioc->facts.MsgVersion >= 0x0102)
1777 mpt_findImVolumes(ioc);
1778
1779 /* Check, and possibly reset, the coalescing value
1780 */
1781 mpt_read_ioc_pg_1(ioc);
1782
1783 mpt_read_ioc_pg_4(ioc);
1784 }
1785
1786 GetIoUnitPage2(ioc);
1787 }
1788
1789 /*
1790 * Call each currently registered protocol IOC reset handler
1791 * with post-reset indication.
1792 * NOTE: If we're doing _IOC_BRINGUP, there can be no
1793 * MptResetHandlers[] registered yet.
1794 */
1795 if (hard_reset_done) {
1796 rc = handlers = 0;
1797 for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
1798 if ((ret == 0) && MptResetHandlers[ii]) {
1799 dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n",
1800 ioc->name, ii));
1801 rc += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_POST_RESET);
1802 handlers++;
1803 }
1804
1805 if (alt_ioc_ready && MptResetHandlers[ii]) {
1806 dprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n",
1807 ioc->name, ioc->alt_ioc->name, ii));
1808 rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET);
1809 handlers++;
1810 }
1811 }
1812 /* FIXME? Examine results here? */
1813 }
1814
1815 return ret;
1816}
1817
1818/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1819/*
1820 * mpt_detect_bound_ports - Search for PCI bus/dev_function
1821 * which matches PCI bus/dev_function (+/-1) for newly discovered 929,
1822 * 929X, 1030 or 1035.
1823 * @ioc: Pointer to MPT adapter structure
1824 * @pdev: Pointer to (struct pci_dev) structure
1825 *
1826 * If match on PCI dev_function +/-1 is found, bind the two MPT adapters
1827 * using alt_ioc pointer fields in their %MPT_ADAPTER structures.
1828 */
1829static void
1830mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
1831{
1832 unsigned int match_lo, match_hi;
1833 MPT_ADAPTER *ioc_srch;
1834
1835 match_lo = pdev->devfn-1;
1836 match_hi = pdev->devfn+1;
1837 dprintk((MYIOC_s_INFO_FMT "PCI bus/devfn=%x/%x, searching for devfn match on %x or %x\n",
1838 ioc->name, pdev->bus->number, pdev->devfn, match_lo, match_hi));
1839
1840 list_for_each_entry(ioc_srch, &ioc_list, list) {
1841 struct pci_dev *_pcidev = ioc_srch->pcidev;
1842
1843 if ((_pcidev->device == pdev->device) &&
1844 (_pcidev->bus->number == pdev->bus->number) &&
1845 (_pcidev->devfn == match_lo || _pcidev->devfn == match_hi) ) {
1846 /* Paranoia checks */
1847 if (ioc->alt_ioc != NULL) {
1848 printk(KERN_WARNING MYNAM ": Oops, already bound (%s <==> %s)!\n",
1849 ioc->name, ioc->alt_ioc->name);
1850 break;
1851 } else if (ioc_srch->alt_ioc != NULL) {
1852 printk(KERN_WARNING MYNAM ": Oops, already bound (%s <==> %s)!\n",
1853 ioc_srch->name, ioc_srch->alt_ioc->name);
1854 break;
1855 }
1856 dprintk((KERN_INFO MYNAM ": FOUND! binding %s <==> %s\n",
1857 ioc->name, ioc_srch->name));
1858 ioc_srch->alt_ioc = ioc;
1859 ioc->alt_ioc = ioc_srch;
1860 break;
1861 }
1862 }
1863}
1864
1865/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1866/*
1867 * mpt_adapter_disable - Disable misbehaving MPT adapter.
1868 * @this: Pointer to MPT adapter structure
1869 */
1870static void
1871mpt_adapter_disable(MPT_ADAPTER *ioc)
1872{
1873 int sz;
1874 int ret;
1875
1876 if (ioc->cached_fw != NULL) {
1877 ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n"));
1878 if ((ret = mpt_downloadboot(ioc, NO_SLEEP)) < 0) {
1879 printk(KERN_WARNING MYNAM
1880 ": firmware downloadboot failure (%d)!\n", ret);
1881 }
1882 }
1883
1884 /* Disable adapter interrupts! */
1885 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
1886 ioc->active = 0;
1887 /* Clear any lingering interrupt */
1888 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1889
1890 if (ioc->alloc != NULL) {
1891 sz = ioc->alloc_sz;
1892 dexitprintk((KERN_INFO MYNAM ": %s.free @ %p, sz=%d bytes\n",
1893 ioc->name, ioc->alloc, ioc->alloc_sz));
1894 pci_free_consistent(ioc->pcidev, sz,
1895 ioc->alloc, ioc->alloc_dma);
1896 ioc->reply_frames = NULL;
1897 ioc->req_frames = NULL;
1898 ioc->alloc = NULL;
1899 ioc->alloc_total -= sz;
1900 }
1901
1902 if (ioc->sense_buf_pool != NULL) {
1903 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
1904 pci_free_consistent(ioc->pcidev, sz,
1905 ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
1906 ioc->sense_buf_pool = NULL;
1907 ioc->alloc_total -= sz;
1908 }
1909
1910 if (ioc->events != NULL){
1911 sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
1912 kfree(ioc->events);
1913 ioc->events = NULL;
1914 ioc->alloc_total -= sz;
1915 }
1916
1917 if (ioc->cached_fw != NULL) {
1918 sz = ioc->facts.FWImageSize;
1919 pci_free_consistent(ioc->pcidev, sz,
1920 ioc->cached_fw, ioc->cached_fw_dma);
1921 ioc->cached_fw = NULL;
1922 ioc->alloc_total -= sz;
1923 }
1924
1925 if (ioc->spi_data.nvram != NULL) {
1926 kfree(ioc->spi_data.nvram);
1927 ioc->spi_data.nvram = NULL;
1928 }
1929
1930 if (ioc->spi_data.pIocPg3 != NULL) {
1931 kfree(ioc->spi_data.pIocPg3);
1932 ioc->spi_data.pIocPg3 = NULL;
1933 }
1934
1935 if (ioc->spi_data.pIocPg4 != NULL) {
1936 sz = ioc->spi_data.IocPg4Sz;
1937 pci_free_consistent(ioc->pcidev, sz,
1938 ioc->spi_data.pIocPg4,
1939 ioc->spi_data.IocPg4_dma);
1940 ioc->spi_data.pIocPg4 = NULL;
1941 ioc->alloc_total -= sz;
1942 }
1943
1944 if (ioc->ReqToChain != NULL) {
1945 kfree(ioc->ReqToChain);
1946 kfree(ioc->RequestNB);
1947 ioc->ReqToChain = NULL;
1948 }
1949
1950 if (ioc->ChainToChain != NULL) {
1951 kfree(ioc->ChainToChain);
1952 ioc->ChainToChain = NULL;
1953 }
1954}
1955
1956/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1957/*
1958 * mpt_adapter_dispose - Free all resources associated with a MPT
1959 * adapter.
1960 * @ioc: Pointer to MPT adapter structure
1961 *
1962 * This routine unregisters h/w resources and frees all alloc'd memory
1963 * associated with a MPT adapter structure.
1964 */
1965static void
1966mpt_adapter_dispose(MPT_ADAPTER *ioc)
1967{
1968 if (ioc != NULL) {
1969 int sz_first, sz_last;
1970
1971 sz_first = ioc->alloc_total;
1972
1973 mpt_adapter_disable(ioc);
1974
1975 if (ioc->pci_irq != -1) {
1976 free_irq(ioc->pci_irq, ioc);
1977 ioc->pci_irq = -1;
1978 }
1979
1980 if (ioc->memmap != NULL)
1981 iounmap(ioc->memmap);
1982
1983#if defined(CONFIG_MTRR) && 0
1984 if (ioc->mtrr_reg > 0) {
1985 mtrr_del(ioc->mtrr_reg, 0, 0);
1986 dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", ioc->name));
1987 }
1988#endif
1989
1990 /* Zap the adapter lookup ptr! */
1991 list_del(&ioc->list);
1992
1993 sz_last = ioc->alloc_total;
1994 dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n",
1995 ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
1996 kfree(ioc);
1997 }
1998}
1999
2000/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2001/*
2002 * MptDisplayIocCapabilities - Disply IOC's capacilities.
2003 * @ioc: Pointer to MPT adapter structure
2004 */
2005static void
2006MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
2007{
2008 int i = 0;
2009
2010 printk(KERN_INFO "%s: ", ioc->name);
2011 if (ioc->prod_name && strlen(ioc->prod_name) > 3)
2012 printk("%s: ", ioc->prod_name+3);
2013 printk("Capabilities={");
2014
2015 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2016 printk("Initiator");
2017 i++;
2018 }
2019
2020 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2021 printk("%sTarget", i ? "," : "");
2022 i++;
2023 }
2024
2025 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
2026 printk("%sLAN", i ? "," : "");
2027 i++;
2028 }
2029
2030#if 0
2031 /*
2032 * This would probably evoke more questions than it's worth
2033 */
2034 if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2035 printk("%sLogBusAddr", i ? "," : "");
2036 i++;
2037 }
2038#endif
2039
2040 printk("}\n");
2041}
2042
2043/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2044/*
2045 * MakeIocReady - Get IOC to a READY state, using KickStart if needed.
2046 * @ioc: Pointer to MPT_ADAPTER structure
2047 * @force: Force hard KickStart of IOC
2048 * @sleepFlag: Specifies whether the process can sleep
2049 *
2050 * Returns:
2051 * 1 - DIAG reset and READY
2052 * 0 - READY initially OR soft reset and READY
2053 * -1 - Any failure on KickStart
2054 * -2 - Msg Unit Reset Failed
2055 * -3 - IO Unit Reset Failed
2056 * -4 - IOC owned by a PEER
2057 */
2058static int
2059MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2060{
2061 u32 ioc_state;
2062 int statefault = 0;
2063 int cntdn;
2064 int hard_reset_done = 0;
2065 int r;
2066 int ii;
2067 int whoinit;
2068
2069 /* Get current [raw] IOC state */
2070 ioc_state = mpt_GetIocState(ioc, 0);
2071 dhsprintk((KERN_INFO MYNAM "::MakeIocReady, %s [raw] state=%08x\n", ioc->name, ioc_state));
2072
2073 /*
2074 * Check to see if IOC got left/stuck in doorbell handshake
2075 * grip of death. If so, hard reset the IOC.
2076 */
2077 if (ioc_state & MPI_DOORBELL_ACTIVE) {
2078 statefault = 1;
2079 printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n",
2080 ioc->name);
2081 }
2082
2083 /* Is it already READY? */
2084 if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)
2085 return 0;
2086
2087 /*
2088 * Check to see if IOC is in FAULT state.
2089 */
2090 if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
2091 statefault = 2;
2092 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n",
2093 ioc->name);
2094 printk(KERN_WARNING " FAULT code = %04xh\n",
2095 ioc_state & MPI_DOORBELL_DATA_MASK);
2096 }
2097
2098 /*
2099 * Hmmm... Did it get left operational?
2100 */
2101 if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
2102 dinitprintk((MYIOC_s_WARN_FMT "IOC operational unexpected\n",
2103 ioc->name));
2104
2105 /* Check WhoInit.
2106 * If PCI Peer, exit.
2107 * Else, if no fault conditions are present, issue a MessageUnitReset
2108 * Else, fall through to KickStart case
2109 */
2110 whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
2111 dprintk((KERN_WARNING MYNAM
2112 ": whoinit 0x%x\n statefault %d force %d\n",
2113 whoinit, statefault, force));
2114 if (whoinit == MPI_WHOINIT_PCI_PEER)
2115 return -4;
2116 else {
2117 if ((statefault == 0 ) && (force == 0)) {
2118 if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0)
2119 return 0;
2120 }
2121 statefault = 3;
2122 }
2123 }
2124
2125 hard_reset_done = KickStart(ioc, statefault||force, sleepFlag);
2126 if (hard_reset_done < 0)
2127 return -1;
2128
2129 /*
2130 * Loop here waiting for IOC to come READY.
2131 */
2132 ii = 0;
2133 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */
2134
2135 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
2136 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
2137 /*
2138 * BIOS or previous driver load left IOC in OP state.
2139 * Reset messaging FIFOs.
2140 */
2141 if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) {
2142 printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name);
2143 return -2;
2144 }
2145 } else if (ioc_state == MPI_IOC_STATE_RESET) {
2146 /*
2147 * Something is wrong. Try to get IOC back
2148 * to a known state.
2149 */
2150 if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) {
2151 printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name);
2152 return -3;
2153 }
2154 }
2155
2156 ii++; cntdn--;
2157 if (!cntdn) {
2158 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n",
2159 ioc->name, (int)((ii+5)/HZ));
2160 return -ETIME;
2161 }
2162
2163 if (sleepFlag == CAN_SLEEP) {
2164 msleep_interruptible(1);
2165 } else {
2166 mdelay (1); /* 1 msec delay */
2167 }
2168
2169 }
2170
2171 if (statefault < 3) {
2172 printk(MYIOC_s_INFO_FMT "Recovered from %s\n",
2173 ioc->name,
2174 statefault==1 ? "stuck handshake" : "IOC FAULT");
2175 }
2176
2177 return hard_reset_done;
2178}
2179
2180/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2181/*
2182 * mpt_GetIocState - Get the current state of a MPT adapter.
2183 * @ioc: Pointer to MPT_ADAPTER structure
2184 * @cooked: Request raw or cooked IOC state
2185 *
2186 * Returns all IOC Doorbell register bits if cooked==0, else just the
2187 * Doorbell bits in MPI_IOC_STATE_MASK.
2188 */
2189u32
2190mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
2191{
2192 u32 s, sc;
2193
2194 /* Get! */
2195 s = CHIPREG_READ32(&ioc->chip->Doorbell);
2196// dprintk((MYIOC_s_INFO_FMT "raw state = %08x\n", ioc->name, s));
2197 sc = s & MPI_IOC_STATE_MASK;
2198
2199 /* Save! */
2200 ioc->last_state = sc;
2201
2202 return cooked ? sc : s;
2203}
2204
2205/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2206/*
2207 * GetIocFacts - Send IOCFacts request to MPT adapter.
2208 * @ioc: Pointer to MPT_ADAPTER structure
2209 * @sleepFlag: Specifies whether the process can sleep
2210 * @reason: If recovery, only update facts.
2211 *
2212 * Returns 0 for success, non-zero for failure.
2213 */
2214static int
2215GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2216{
2217 IOCFacts_t get_facts;
2218 IOCFactsReply_t *facts;
2219 int r;
2220 int req_sz;
2221 int reply_sz;
2222 int sz;
2223 u32 status, vv;
2224 u8 shiftFactor=1;
2225
2226 /* IOC *must* NOT be in RESET state! */
2227 if (ioc->last_state == MPI_IOC_STATE_RESET) {
2228 printk(KERN_ERR MYNAM ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
2229 ioc->name,
2230 ioc->last_state );
2231 return -44;
2232 }
2233
2234 facts = &ioc->facts;
2235
2236 /* Destination (reply area)... */
2237 reply_sz = sizeof(*facts);
2238 memset(facts, 0, reply_sz);
2239
2240 /* Request area (get_facts on the stack right now!) */
2241 req_sz = sizeof(get_facts);
2242 memset(&get_facts, 0, req_sz);
2243
2244 get_facts.Function = MPI_FUNCTION_IOC_FACTS;
2245 /* Assert: All other get_facts fields are zero! */
2246
2247 dinitprintk((MYIOC_s_INFO_FMT
2248 "Sending get IocFacts request req_sz=%d reply_sz=%d\n",
2249 ioc->name, req_sz, reply_sz));
2250
2251 /* No non-zero fields in the get_facts request are greater than
2252 * 1 byte in size, so we can just fire it off as is.
2253 */
2254 r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts,
2255 reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag);
2256 if (r != 0)
2257 return r;
2258
2259 /*
2260 * Now byte swap (GRRR) the necessary fields before any further
2261 * inspection of reply contents.
2262 *
2263 * But need to do some sanity checks on MsgLength (byte) field
2264 * to make sure we don't zero IOC's req_sz!
2265 */
2266 /* Did we get a valid reply? */
2267 if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) {
2268 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
2269 /*
2270 * If not been here, done that, save off first WhoInit value
2271 */
2272 if (ioc->FirstWhoInit == WHOINIT_UNKNOWN)
2273 ioc->FirstWhoInit = facts->WhoInit;
2274 }
2275
2276 facts->MsgVersion = le16_to_cpu(facts->MsgVersion);
2277 facts->MsgContext = le32_to_cpu(facts->MsgContext);
2278 facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
2279 facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
2280 facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
2281 status = facts->IOCStatus & MPI_IOCSTATUS_MASK;
2282 /* CHECKME! IOCStatus, IOCLogInfo */
2283
2284 facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
2285 facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize);
2286
2287 /*
2288 * FC f/w version changed between 1.1 and 1.2
2289 * Old: u16{Major(4),Minor(4),SubMinor(8)}
2290 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
2291 */
2292 if (facts->MsgVersion < 0x0102) {
2293 /*
2294 * Handle old FC f/w style, convert to new...
2295 */
2296 u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion);
2297 facts->FWVersion.Word =
2298 ((oldv<<12) & 0xFF000000) |
2299 ((oldv<<8) & 0x000FFF00);
2300 } else
2301 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
2302
2303 facts->ProductID = le16_to_cpu(facts->ProductID);
2304 facts->CurrentHostMfaHighAddr =
2305 le32_to_cpu(facts->CurrentHostMfaHighAddr);
2306 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
2307 facts->CurrentSenseBufferHighAddr =
2308 le32_to_cpu(facts->CurrentSenseBufferHighAddr);
2309 facts->CurReplyFrameSize =
2310 le16_to_cpu(facts->CurReplyFrameSize);
2311
2312 /*
2313 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
2314 * Older MPI-1.00.xx struct had 13 dwords, and enlarged
2315 * to 14 in MPI-1.01.0x.
2316 */
2317 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
2318 facts->MsgVersion > 0x0100) {
2319 facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
2320 }
2321
2322 sz = facts->FWImageSize;
2323 if ( sz & 0x01 )
2324 sz += 1;
2325 if ( sz & 0x02 )
2326 sz += 2;
2327 facts->FWImageSize = sz;
2328
2329 if (!facts->RequestFrameSize) {
2330 /* Something is wrong! */
2331 printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
2332 ioc->name);
2333 return -55;
2334 }
2335
2336 r = sz = le32_to_cpu(facts->BlockSize);
2337 vv = ((63 / (sz * 4)) + 1) & 0x03;
2338 ioc->NB_for_64_byte_frame = vv;
2339 while ( sz )
2340 {
2341 shiftFactor++;
2342 sz = sz >> 1;
2343 }
2344 ioc->NBShiftFactor = shiftFactor;
2345 dinitprintk((MYIOC_s_INFO_FMT "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
2346 ioc->name, vv, shiftFactor, r));
2347
2348 if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
2349 /*
2350 * Set values for this IOC's request & reply frame sizes,
2351 * and request & reply queue depths...
2352 */
2353 ioc->req_sz = min(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4);
2354 ioc->req_depth = min_t(int, MPT_MAX_REQ_DEPTH, facts->GlobalCredits);
2355 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
2356 ioc->reply_depth = min_t(int, MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth);
2357
2358 dinitprintk((MYIOC_s_INFO_FMT "reply_sz=%3d, reply_depth=%4d\n",
2359 ioc->name, ioc->reply_sz, ioc->reply_depth));
2360 dinitprintk((MYIOC_s_INFO_FMT "req_sz =%3d, req_depth =%4d\n",
2361 ioc->name, ioc->req_sz, ioc->req_depth));
2362
2363 /* Get port facts! */
2364 if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 )
2365 return r;
2366 }
2367 } else {
2368 printk(MYIOC_s_ERR_FMT
2369 "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
2370 ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
2371 RequestFrameSize)/sizeof(u32)));
2372 return -66;
2373 }
2374
2375 return 0;
2376}
2377
2378/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2379/*
2380 * GetPortFacts - Send PortFacts request to MPT adapter.
2381 * @ioc: Pointer to MPT_ADAPTER structure
2382 * @portnum: Port number
2383 * @sleepFlag: Specifies whether the process can sleep
2384 *
2385 * Returns 0 for success, non-zero for failure.
2386 */
2387static int
2388GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
2389{
2390 PortFacts_t get_pfacts;
2391 PortFactsReply_t *pfacts;
2392 int ii;
2393 int req_sz;
2394 int reply_sz;
2395
2396 /* IOC *must* NOT be in RESET state! */
2397 if (ioc->last_state == MPI_IOC_STATE_RESET) {
2398 printk(KERN_ERR MYNAM ": ERROR - Can't get PortFacts, %s NOT READY! (%08x)\n",
2399 ioc->name,
2400 ioc->last_state );
2401 return -4;
2402 }
2403
2404 pfacts = &ioc->pfacts[portnum];
2405
2406 /* Destination (reply area)... */
2407 reply_sz = sizeof(*pfacts);
2408 memset(pfacts, 0, reply_sz);
2409
2410 /* Request area (get_pfacts on the stack right now!) */
2411 req_sz = sizeof(get_pfacts);
2412 memset(&get_pfacts, 0, req_sz);
2413
2414 get_pfacts.Function = MPI_FUNCTION_PORT_FACTS;
2415 get_pfacts.PortNumber = portnum;
2416 /* Assert: All other get_pfacts fields are zero! */
2417
2418 dinitprintk((MYIOC_s_INFO_FMT "Sending get PortFacts(%d) request\n",
2419 ioc->name, portnum));
2420
2421 /* No non-zero fields in the get_pfacts request are greater than
2422 * 1 byte in size, so we can just fire it off as is.
2423 */
2424 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts,
2425 reply_sz, (u16*)pfacts, 5 /*seconds*/, sleepFlag);
2426 if (ii != 0)
2427 return ii;
2428
2429 /* Did we get a valid reply? */
2430
2431 /* Now byte swap the necessary fields in the response. */
2432 pfacts->MsgContext = le32_to_cpu(pfacts->MsgContext);
2433 pfacts->IOCStatus = le16_to_cpu(pfacts->IOCStatus);
2434 pfacts->IOCLogInfo = le32_to_cpu(pfacts->IOCLogInfo);
2435 pfacts->MaxDevices = le16_to_cpu(pfacts->MaxDevices);
2436 pfacts->PortSCSIID = le16_to_cpu(pfacts->PortSCSIID);
2437 pfacts->ProtocolFlags = le16_to_cpu(pfacts->ProtocolFlags);
2438 pfacts->MaxPostedCmdBuffers = le16_to_cpu(pfacts->MaxPostedCmdBuffers);
2439 pfacts->MaxPersistentIDs = le16_to_cpu(pfacts->MaxPersistentIDs);
2440 pfacts->MaxLanBuckets = le16_to_cpu(pfacts->MaxLanBuckets);
2441
2442 return 0;
2443}
2444
2445/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2446/*
2447 * SendIocInit - Send IOCInit request to MPT adapter.
2448 * @ioc: Pointer to MPT_ADAPTER structure
2449 * @sleepFlag: Specifies whether the process can sleep
2450 *
2451 * Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state.
2452 *
2453 * Returns 0 for success, non-zero for failure.
2454 */
2455static int
2456SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2457{
2458 IOCInit_t ioc_init;
2459 MPIDefaultReply_t init_reply;
2460 u32 state;
2461 int r;
2462 int count;
2463 int cntdn;
2464
2465 memset(&ioc_init, 0, sizeof(ioc_init));
2466 memset(&init_reply, 0, sizeof(init_reply));
2467
2468 ioc_init.WhoInit = MPI_WHOINIT_HOST_DRIVER;
2469 ioc_init.Function = MPI_FUNCTION_IOC_INIT;
2470
2471 /* If we are in a recovery mode and we uploaded the FW image,
2472 * then this pointer is not NULL. Skip the upload a second time.
2473 * Set this flag if cached_fw set for either IOC.
2474 */
2475 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
2476 ioc->upload_fw = 1;
2477 else
2478 ioc->upload_fw = 0;
2479 ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n",
2480 ioc->name, ioc->upload_fw, ioc->facts.Flags));
2481
2482 if (ioc->bus_type == FC)
2483 ioc_init.MaxDevices = MPT_MAX_FC_DEVICES;
2484 else
2485 ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES;
2486
2487 ioc_init.MaxBuses = MPT_MAX_BUS;
2488
2489 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
2490
2491 if (sizeof(dma_addr_t) == sizeof(u64)) {
2492 /* Save the upper 32-bits of the request
2493 * (reply) and sense buffers.
2494 */
2495 ioc_init.HostMfaHighAddr = cpu_to_le32((u32)((u64)ioc->alloc_dma >> 32));
2496 ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
2497 } else {
2498 /* Force 32-bit addressing */
2499 ioc_init.HostMfaHighAddr = cpu_to_le32(0);
2500 ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
2501 }
2502
2503 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
2504 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
2505
2506 dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n",
2507 ioc->name, &ioc_init));
2508
2509 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
2510 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
2511 if (r != 0)
2512 return r;
2513
2514 /* No need to byte swap the multibyte fields in the reply
2515 * since we don't even look at it's contents.
2516 */
2517
2518 dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n",
2519 ioc->name, &ioc_init));
2520
2521 if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0)
2522 return r;
2523
2524 /* YIKES! SUPER IMPORTANT!!!
2525 * Poll IocState until _OPERATIONAL while IOC is doing
2526 * LoopInit and TargetDiscovery!
2527 */
2528 count = 0;
2529 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 60; /* 60 seconds */
2530 state = mpt_GetIocState(ioc, 1);
2531 while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) {
2532 if (sleepFlag == CAN_SLEEP) {
2533 msleep_interruptible(1);
2534 } else {
2535 mdelay(1);
2536 }
2537
2538 if (!cntdn) {
2539 printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n",
2540 ioc->name, (int)((count+5)/HZ));
2541 return -9;
2542 }
2543
2544 state = mpt_GetIocState(ioc, 1);
2545 count++;
2546 }
2547 dhsprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
2548 ioc->name, count));
2549
2550 return r;
2551}
2552
2553/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2554/*
2555 * SendPortEnable - Send PortEnable request to MPT adapter port.
2556 * @ioc: Pointer to MPT_ADAPTER structure
2557 * @portnum: Port number to enable
2558 * @sleepFlag: Specifies whether the process can sleep
2559 *
2560 * Send PortEnable to bring IOC to OPERATIONAL state.
2561 *
2562 * Returns 0 for success, non-zero for failure.
2563 */
2564static int
2565SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
2566{
2567 PortEnable_t port_enable;
2568 MPIDefaultReply_t reply_buf;
2569 int ii;
2570 int req_sz;
2571 int reply_sz;
2572
2573 /* Destination... */
2574 reply_sz = sizeof(MPIDefaultReply_t);
2575 memset(&reply_buf, 0, reply_sz);
2576
2577 req_sz = sizeof(PortEnable_t);
2578 memset(&port_enable, 0, req_sz);
2579
2580 port_enable.Function = MPI_FUNCTION_PORT_ENABLE;
2581 port_enable.PortNumber = portnum;
2582/* port_enable.ChainOffset = 0; */
2583/* port_enable.MsgFlags = 0; */
2584/* port_enable.MsgContext = 0; */
2585
2586 dinitprintk((MYIOC_s_INFO_FMT "Sending Port(%d)Enable (req @ %p)\n",
2587 ioc->name, portnum, &port_enable));
2588
2589 /* RAID FW may take a long time to enable
2590 */
2591 if (ioc->bus_type == FC) {
2592 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2593 reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag);
2594 } else {
2595 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2596 reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag);
2597 }
2598
2599 if (ii != 0)
2600 return ii;
2601
2602 /* We do not even look at the reply, so we need not
2603 * swap the multi-byte fields.
2604 */
2605
2606 return 0;
2607}
2608
2609/*
2610 * ioc: Pointer to MPT_ADAPTER structure
2611 * size - total FW bytes
2612 */
2613void
2614mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
2615{
2616 if (ioc->cached_fw)
2617 return; /* use already allocated memory */
2618 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
2619 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
2620 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
2621 } else {
2622 if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) )
2623 ioc->alloc_total += size;
2624 }
2625}
2626/*
2627 * If alt_img is NULL, delete from ioc structure.
2628 * Else, delete a secondary image in same format.
2629 */
2630void
2631mpt_free_fw_memory(MPT_ADAPTER *ioc)
2632{
2633 int sz;
2634
2635 sz = ioc->facts.FWImageSize;
2636 dinitprintk((KERN_WARNING MYNAM "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
2637 ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
2638 pci_free_consistent(ioc->pcidev, sz,
2639 ioc->cached_fw, ioc->cached_fw_dma);
2640 ioc->cached_fw = NULL;
2641
2642 return;
2643}
2644
2645
2646/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2647/*
2648 * mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
2649 * @ioc: Pointer to MPT_ADAPTER structure
2650 * @sleepFlag: Specifies whether the process can sleep
2651 *
2652 * Returns 0 for success, >0 for handshake failure
2653 * <0 for fw upload failure.
2654 *
2655 * Remark: If bound IOC and a successful FWUpload was performed
2656 * on the bound IOC, the second image is discarded
2657 * and memory is free'd. Both channels must upload to prevent
2658 * IOC from running in degraded mode.
2659 */
2660static int
2661mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
2662{
2663 u8 request[ioc->req_sz];
2664 u8 reply[sizeof(FWUploadReply_t)];
2665 FWUpload_t *prequest;
2666 FWUploadReply_t *preply;
2667 FWUploadTCSGE_t *ptcsge;
2668 int sgeoffset;
2669 u32 flagsLength;
2670 int ii, sz, reply_sz;
2671 int cmdStatus;
2672
2673 /* If the image size is 0, we are done.
2674 */
2675 if ((sz = ioc->facts.FWImageSize) == 0)
2676 return 0;
2677
2678 mpt_alloc_fw_memory(ioc, sz);
2679
2680 dinitprintk((KERN_WARNING MYNAM ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
2681 ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
2682
2683 if (ioc->cached_fw == NULL) {
2684 /* Major Failure.
2685 */
2686 return -ENOMEM;
2687 }
2688
2689 prequest = (FWUpload_t *)&request;
2690 preply = (FWUploadReply_t *)&reply;
2691
2692 /* Destination... */
2693 memset(prequest, 0, ioc->req_sz);
2694
2695 reply_sz = sizeof(reply);
2696 memset(preply, 0, reply_sz);
2697
2698 prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2699 prequest->Function = MPI_FUNCTION_FW_UPLOAD;
2700
2701 ptcsge = (FWUploadTCSGE_t *) &prequest->SGL;
2702 ptcsge->DetailsLength = 12;
2703 ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2704 ptcsge->ImageSize = cpu_to_le32(sz);
2705
2706 sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t);
2707
2708 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
2709 mpt_add_sge(&request[sgeoffset], flagsLength, ioc->cached_fw_dma);
2710
2711 sgeoffset += sizeof(u32) + sizeof(dma_addr_t);
2712 dinitprintk((KERN_WARNING MYNAM "Sending FW Upload (req @ %p) sgeoffset=%d \n",
2713 prequest, sgeoffset));
2714 DBG_DUMP_FW_REQUEST_FRAME(prequest)
2715
2716 ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest,
2717 reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag);
2718
2719 dinitprintk((KERN_WARNING MYNAM "FW Upload completed rc=%x \n", ii));
2720
2721 cmdStatus = -EFAULT;
2722 if (ii == 0) {
2723 /* Handshake transfer was complete and successful.
2724 * Check the Reply Frame.
2725 */
2726 int status, transfer_sz;
2727 status = le16_to_cpu(preply->IOCStatus);
2728 if (status == MPI_IOCSTATUS_SUCCESS) {
2729 transfer_sz = le32_to_cpu(preply->ActualImageSize);
2730 if (transfer_sz == sz)
2731 cmdStatus = 0;
2732 }
2733 }
2734 dinitprintk((MYIOC_s_INFO_FMT ": do_upload status %d \n",
2735 ioc->name, cmdStatus));
2736
2737
2738 if (cmdStatus) {
2739
2740 ddlprintk((MYIOC_s_INFO_FMT ": fw upload failed, freeing image \n",
2741 ioc->name));
2742 mpt_free_fw_memory(ioc);
2743 }
2744
2745 return cmdStatus;
2746}
2747
2748/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2749/*
2750 * mpt_downloadboot - DownloadBoot code
2751 * @ioc: Pointer to MPT_ADAPTER structure
2752 * @flag: Specify which part of IOC memory is to be uploaded.
2753 * @sleepFlag: Specifies whether the process can sleep
2754 *
2755 * FwDownloadBoot requires Programmed IO access.
2756 *
2757 * Returns 0 for success
2758 * -1 FW Image size is 0
2759 * -2 No valid cached_fw Pointer
2760 * <0 for fw upload failure.
2761 */
2762static int
2763mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2764{
2765 MpiFwHeader_t *pFwHeader;
2766 MpiExtImageHeader_t *pExtImage;
2767 u32 fwSize;
2768 u32 diag0val;
2769 int count;
2770 u32 *ptrFw;
2771 u32 diagRwData;
2772 u32 nextImage;
2773 u32 load_addr;
2774 u32 ioc_state=0;
2775
2776 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x, ioc FW Ptr %p\n",
2777 ioc->name, ioc->facts.FWImageSize, ioc->cached_fw));
2778
2779 if ( ioc->facts.FWImageSize == 0 )
2780 return -1;
2781
2782 if (ioc->cached_fw == NULL)
2783 return -2;
2784
2785 /* prevent a second downloadboot and memory free with alt_ioc */
2786 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
2787 ioc->alt_ioc->cached_fw = NULL;
2788
2789 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2790 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
2791 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
2792 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
2793 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
2794 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
2795
2796 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM));
2797
2798 /* wait 1 msec */
2799 if (sleepFlag == CAN_SLEEP) {
2800 msleep_interruptible(1);
2801 } else {
2802 mdelay (1);
2803 }
2804
2805 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
2806 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
2807
2808 for (count = 0; count < 30; count ++) {
2809 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
2810 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
2811 ddlprintk((MYIOC_s_INFO_FMT "RESET_ADAPTER cleared, count=%d\n",
2812 ioc->name, count));
2813 break;
2814 }
2815 /* wait 1 sec */
2816 if (sleepFlag == CAN_SLEEP) {
2817 msleep_interruptible (1000);
2818 } else {
2819 mdelay (1000);
2820 }
2821 }
2822
2823 if ( count == 30 ) {
2824 ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! Unable to RESET_ADAPTER diag0val=%x\n",
2825 ioc->name, diag0val));
2826 return -3;
2827 }
2828
2829 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2830 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
2831 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
2832 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
2833 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
2834 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
2835
2836 /* Set the DiagRwEn and Disable ARM bits */
2837 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
2838
2839 pFwHeader = (MpiFwHeader_t *) ioc->cached_fw;
2840 fwSize = (pFwHeader->ImageSize + 3)/4;
2841 ptrFw = (u32 *) pFwHeader;
2842
2843 /* Write the LoadStartAddress to the DiagRw Address Register
2844 * using Programmed IO
2845 */
2846 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->LoadStartAddress);
2847 ddlprintk((MYIOC_s_INFO_FMT "LoadStart addr written 0x%x \n",
2848 ioc->name, pFwHeader->LoadStartAddress));
2849
2850 ddlprintk((MYIOC_s_INFO_FMT "Write FW Image: 0x%x bytes @ %p\n",
2851 ioc->name, fwSize*4, ptrFw));
2852 while (fwSize--) {
2853 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
2854 }
2855
2856 nextImage = pFwHeader->NextImageHeaderOffset;
2857 while (nextImage) {
2858 pExtImage = (MpiExtImageHeader_t *) ((char *)pFwHeader + nextImage);
2859
2860 load_addr = pExtImage->LoadStartAddress;
2861
2862 fwSize = (pExtImage->ImageSize + 3) >> 2;
2863 ptrFw = (u32 *)pExtImage;
2864
2865 ddlprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x bytes @ %p load_addr=%x\n",
2866 ioc->name, fwSize*4, ptrFw, load_addr));
2867 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
2868
2869 while (fwSize--) {
2870 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
2871 }
2872 nextImage = pExtImage->NextImageHeaderOffset;
2873 }
2874
2875 /* Write the IopResetVectorRegAddr */
2876 ddlprintk((MYIOC_s_INFO_FMT "Write IopResetVector Addr=%x! \n", ioc->name, pFwHeader->IopResetRegAddr));
2877 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr);
2878
2879 /* Write the IopResetVectorValue */
2880 ddlprintk((MYIOC_s_INFO_FMT "Write IopResetVector Value=%x! \n", ioc->name, pFwHeader->IopResetVectorValue));
2881 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, pFwHeader->IopResetVectorValue);
2882
2883 /* Clear the internal flash bad bit - autoincrementing register,
2884 * so must do two writes.
2885 */
2886 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
2887 diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
2888 diagRwData |= 0x4000000;
2889 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
2890 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
2891
2892 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
2893 ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, turning off PREVENT_IOC_BOOT, DISABLE_ARM\n",
2894 ioc->name, diag0val));
2895 diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM);
2896 ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n",
2897 ioc->name, diag0val));
2898 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
2899
2900 /* Write 0xFF to reset the sequencer */
2901 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2902
2903 for (count=0; count<HZ*20; count++) {
2904 if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
2905 ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n",
2906 ioc->name, count, ioc_state));
2907 if ((SendIocInit(ioc, sleepFlag)) != 0) {
2908 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n",
2909 ioc->name));
2910 return -EFAULT;
2911 }
2912 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit successful\n",
2913 ioc->name));
2914 return 0;
2915 }
2916 if (sleepFlag == CAN_SLEEP) {
2917 msleep_interruptible (10);
2918 } else {
2919 mdelay (10);
2920 }
2921 }
2922 ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! IocState=%x\n",
2923 ioc->name, ioc_state));
2924 return -EFAULT;
2925}
2926
2927/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2928/*
2929 * KickStart - Perform hard reset of MPT adapter.
2930 * @ioc: Pointer to MPT_ADAPTER structure
2931 * @force: Force hard reset
2932 * @sleepFlag: Specifies whether the process can sleep
2933 *
2934 * This routine places MPT adapter in diagnostic mode via the
2935 * WriteSequence register, and then performs a hard reset of adapter
2936 * via the Diagnostic register.
2937 *
2938 * Inputs: sleepflag - CAN_SLEEP (non-interrupt thread)
2939 * or NO_SLEEP (interrupt thread, use mdelay)
2940 * force - 1 if doorbell active, board fault state
2941 * board operational, IOC_RECOVERY or
2942 * IOC_BRINGUP and there is an alt_ioc.
2943 * 0 else
2944 *
2945 * Returns:
2946 * 1 - hard reset, READY
2947 * 0 - no reset due to History bit, READY
2948 * -1 - no reset due to History bit but not READY
2949 * OR reset but failed to come READY
2950 * -2 - no reset, could not enter DIAG mode
2951 * -3 - reset but bad FW bit
2952 */
2953static int
2954KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
2955{
2956 int hard_reset_done = 0;
2957 u32 ioc_state=0;
2958 int cnt,cntdn;
2959
2960 dinitprintk((KERN_WARNING MYNAM ": KickStarting %s!\n", ioc->name));
2961 if (ioc->bus_type == SCSI) {
2962 /* Always issue a Msg Unit Reset first. This will clear some
2963 * SCSI bus hang conditions.
2964 */
2965 SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
2966
2967 if (sleepFlag == CAN_SLEEP) {
2968 msleep_interruptible (1000);
2969 } else {
2970 mdelay (1000);
2971 }
2972 }
2973
2974 hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag);
2975 if (hard_reset_done < 0)
2976 return hard_reset_done;
2977
2978 dinitprintk((MYIOC_s_INFO_FMT "Diagnostic reset successful!\n",
2979 ioc->name));
2980
2981 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 2; /* 2 seconds */
2982 for (cnt=0; cnt<cntdn; cnt++) {
2983 ioc_state = mpt_GetIocState(ioc, 1);
2984 if ((ioc_state == MPI_IOC_STATE_READY) || (ioc_state == MPI_IOC_STATE_OPERATIONAL)) {
2985 dinitprintk((MYIOC_s_INFO_FMT "KickStart successful! (cnt=%d)\n",
2986 ioc->name, cnt));
2987 return hard_reset_done;
2988 }
2989 if (sleepFlag == CAN_SLEEP) {
2990 msleep_interruptible (10);
2991 } else {
2992 mdelay (10);
2993 }
2994 }
2995
2996 printk(MYIOC_s_ERR_FMT "Failed to come READY after reset! IocState=%x\n",
2997 ioc->name, ioc_state);
2998 return -1;
2999}
3000
3001/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3002/*
3003 * mpt_diag_reset - Perform hard reset of the adapter.
3004 * @ioc: Pointer to MPT_ADAPTER structure
3005 * @ignore: Set if to honor and clear to ignore
3006 * the reset history bit
3007 * @sleepflag: CAN_SLEEP if called in a non-interrupt thread,
3008 * else set to NO_SLEEP (use mdelay instead)
3009 *
3010 * This routine places the adapter in diagnostic mode via the
3011 * WriteSequence register and then performs a hard reset of adapter
3012 * via the Diagnostic register. Adapter should be in ready state
3013 * upon successful completion.
3014 *
3015 * Returns: 1 hard reset successful
3016 * 0 no reset performed because reset history bit set
3017 * -2 enabling diagnostic mode failed
3018 * -3 diagnostic reset failed
3019 */
3020static int
3021mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3022{
3023 u32 diag0val;
3024 u32 doorbell;
3025 int hard_reset_done = 0;
3026 int count = 0;
3027#ifdef MPT_DEBUG
3028 u32 diag1val = 0;
3029#endif
3030
3031 /* Clear any existing interrupts */
3032 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3033
3034 /* Use "Diagnostic reset" method! (only thing available!) */
3035 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3036
3037#ifdef MPT_DEBUG
3038 if (ioc->alt_ioc)
3039 diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
3040 dprintk((MYIOC_s_INFO_FMT "DbG1: diag0=%08x, diag1=%08x\n",
3041 ioc->name, diag0val, diag1val));
3042#endif
3043
3044 /* Do the reset if we are told to ignore the reset history
3045 * or if the reset history is 0
3046 */
3047 if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) {
3048 while ((diag0val & MPI_DIAG_DRWE) == 0) {
3049 /* Write magic sequence to WriteSequence register
3050 * Loop until in diagnostic mode
3051 */
3052 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
3053 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
3054 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
3055 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
3056 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
3057 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
3058
3059 /* wait 100 msec */
3060 if (sleepFlag == CAN_SLEEP) {
3061 msleep_interruptible (100);
3062 } else {
3063 mdelay (100);
3064 }
3065
3066 count++;
3067 if (count > 20) {
3068 printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
3069 ioc->name, diag0val);
3070 return -2;
3071
3072 }
3073
3074 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3075
3076 dprintk((MYIOC_s_INFO_FMT "Wrote magic DiagWriteEn sequence (%x)\n",
3077 ioc->name, diag0val));
3078 }
3079
3080#ifdef MPT_DEBUG
3081 if (ioc->alt_ioc)
3082 diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
3083 dprintk((MYIOC_s_INFO_FMT "DbG2: diag0=%08x, diag1=%08x\n",
3084 ioc->name, diag0val, diag1val));
3085#endif
3086 /*
3087 * Disable the ARM (Bug fix)
3088 *
3089 */
3090 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
3091 mdelay (1);
3092
3093 /*
3094 * Now hit the reset bit in the Diagnostic register
3095 * (THE BIG HAMMER!) (Clears DRWE bit).
3096 */
3097 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
3098 hard_reset_done = 1;
3099 dprintk((MYIOC_s_INFO_FMT "Diagnostic reset performed\n",
3100 ioc->name));
3101
3102 /*
3103 * Call each currently registered protocol IOC reset handler
3104 * with pre-reset indication.
3105 * NOTE: If we're doing _IOC_BRINGUP, there can be no
3106 * MptResetHandlers[] registered yet.
3107 */
3108 {
3109 int ii;
3110 int r = 0;
3111
3112 for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
3113 if (MptResetHandlers[ii]) {
3114 dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n",
3115 ioc->name, ii));
3116 r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_PRE_RESET);
3117 if (ioc->alt_ioc) {
3118 dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n",
3119 ioc->name, ioc->alt_ioc->name, ii));
3120 r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_PRE_RESET);
3121 }
3122 }
3123 }
3124 /* FIXME? Examine results here? */
3125 }
3126
3127 if (ioc->cached_fw) {
3128 /* If the DownloadBoot operation fails, the
3129 * IOC will be left unusable. This is a fatal error
3130 * case. _diag_reset will return < 0
3131 */
3132 for (count = 0; count < 30; count ++) {
3133 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3134 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
3135 break;
3136 }
3137
3138 /* wait 1 sec */
3139 if (sleepFlag == CAN_SLEEP) {
3140 ssleep(1);
3141 } else {
3142 mdelay (1000);
3143 }
3144 }
3145 if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) {
3146 printk(KERN_WARNING MYNAM
3147 ": firmware downloadboot failure (%d)!\n", count);
3148 }
3149
3150 } else {
3151 /* Wait for FW to reload and for board
3152 * to go to the READY state.
3153 * Maximum wait is 60 seconds.
3154 * If fail, no error will check again
3155 * with calling program.
3156 */
3157 for (count = 0; count < 60; count ++) {
3158 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
3159 doorbell &= MPI_IOC_STATE_MASK;
3160
3161 if (doorbell == MPI_IOC_STATE_READY) {
3162 break;
3163 }
3164
3165 /* wait 1 sec */
3166 if (sleepFlag == CAN_SLEEP) {
3167 msleep_interruptible (1000);
3168 } else {
3169 mdelay (1000);
3170 }
3171 }
3172 }
3173 }
3174
3175 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3176#ifdef MPT_DEBUG
3177 if (ioc->alt_ioc)
3178 diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
3179 dprintk((MYIOC_s_INFO_FMT "DbG3: diag0=%08x, diag1=%08x\n",
3180 ioc->name, diag0val, diag1val));
3181#endif
3182
3183 /* Clear RESET_HISTORY bit! Place board in the
3184 * diagnostic mode to update the diag register.
3185 */
3186 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3187 count = 0;
3188 while ((diag0val & MPI_DIAG_DRWE) == 0) {
3189 /* Write magic sequence to WriteSequence register
3190 * Loop until in diagnostic mode
3191 */
3192 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
3193 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
3194 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
3195 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
3196 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
3197 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
3198
3199 /* wait 100 msec */
3200 if (sleepFlag == CAN_SLEEP) {
3201 msleep_interruptible (100);
3202 } else {
3203 mdelay (100);
3204 }
3205
3206 count++;
3207 if (count > 20) {
3208 printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
3209 ioc->name, diag0val);
3210 break;
3211 }
3212 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3213 }
3214 diag0val &= ~MPI_DIAG_RESET_HISTORY;
3215 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
3216 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3217 if (diag0val & MPI_DIAG_RESET_HISTORY) {
3218 printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n",
3219 ioc->name);
3220 }
3221
3222 /* Disable Diagnostic Mode
3223 */
3224 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF);
3225
3226 /* Check FW reload status flags.
3227 */
3228 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3229 if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) {
3230 printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n",
3231 ioc->name, diag0val);
3232 return -3;
3233 }
3234
3235#ifdef MPT_DEBUG
3236 if (ioc->alt_ioc)
3237 diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
3238 dprintk((MYIOC_s_INFO_FMT "DbG4: diag0=%08x, diag1=%08x\n",
3239 ioc->name, diag0val, diag1val));
3240#endif
3241
3242 /*
3243 * Reset flag that says we've enabled event notification
3244 */
3245 ioc->facts.EventState = 0;
3246
3247 if (ioc->alt_ioc)
3248 ioc->alt_ioc->facts.EventState = 0;
3249
3250 return hard_reset_done;
3251}
3252
3253/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3254/*
3255 * SendIocReset - Send IOCReset request to MPT adapter.
3256 * @ioc: Pointer to MPT_ADAPTER structure
3257 * @reset_type: reset type, expected values are
3258 * %MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET
3259 *
3260 * Send IOCReset request to the MPT adapter.
3261 *
3262 * Returns 0 for success, non-zero for failure.
3263 */
3264static int
3265SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
3266{
3267 int r;
3268 u32 state;
3269 int cntdn, count;
3270
3271 drsprintk((KERN_WARNING MYNAM ": %s: Sending IOC reset(0x%02x)!\n",
3272 ioc->name, reset_type));
3273 CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
3274 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
3275 return r;
3276
3277 /* FW ACK'd request, wait for READY state
3278 */
3279 count = 0;
3280 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */
3281
3282 while ((state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
3283 cntdn--;
3284 count++;
3285 if (!cntdn) {
3286 if (sleepFlag != CAN_SLEEP)
3287 count *= 10;
3288
3289 printk(KERN_ERR MYNAM ": %s: ERROR - Wait IOC_READY state timeout(%d)!\n",
3290 ioc->name, (int)((count+5)/HZ));
3291 return -ETIME;
3292 }
3293
3294 if (sleepFlag == CAN_SLEEP) {
3295 msleep_interruptible(1);
3296 } else {
3297 mdelay (1); /* 1 msec delay */
3298 }
3299 }
3300
3301 /* TODO!
3302 * Cleanup all event stuff for this IOC; re-issue EventNotification
3303 * request if needed.
3304 */
3305 if (ioc->facts.Function)
3306 ioc->facts.EventState = 0;
3307
3308 return 0;
3309}
3310
3311/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3312/*
3313 * initChainBuffers - Allocate memory for and initialize
3314 * chain buffers, chain buffer control arrays and spinlock.
3315 * @hd: Pointer to MPT_SCSI_HOST structure
3316 * @init: If set, initialize the spin lock.
3317 */
3318static int
3319initChainBuffers(MPT_ADAPTER *ioc)
3320{
3321 u8 *mem;
3322 int sz, ii, num_chain;
3323 int scale, num_sge, numSGE;
3324
3325 /* ReqToChain size must equal the req_depth
3326 * index = req_idx
3327 */
3328 if (ioc->ReqToChain == NULL) {
3329 sz = ioc->req_depth * sizeof(int);
3330 mem = kmalloc(sz, GFP_ATOMIC);
3331 if (mem == NULL)
3332 return -1;
3333
3334 ioc->ReqToChain = (int *) mem;
3335 dinitprintk((KERN_INFO MYNAM ": %s ReqToChain alloc @ %p, sz=%d bytes\n",
3336 ioc->name, mem, sz));
3337 mem = kmalloc(sz, GFP_ATOMIC);
3338 if (mem == NULL)
3339 return -1;
3340
3341 ioc->RequestNB = (int *) mem;
3342 dinitprintk((KERN_INFO MYNAM ": %s RequestNB alloc @ %p, sz=%d bytes\n",
3343 ioc->name, mem, sz));
3344 }
3345 for (ii = 0; ii < ioc->req_depth; ii++) {
3346 ioc->ReqToChain[ii] = MPT_HOST_NO_CHAIN;
3347 }
3348
3349 /* ChainToChain size must equal the total number
3350 * of chain buffers to be allocated.
3351 * index = chain_idx
3352 *
3353 * Calculate the number of chain buffers needed(plus 1) per I/O
3354 * then multiply the the maximum number of simultaneous cmds
3355 *
3356 * num_sge = num sge in request frame + last chain buffer
3357 * scale = num sge per chain buffer if no chain element
3358 */
3359 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
3360 if (sizeof(dma_addr_t) == sizeof(u64))
3361 num_sge = scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32));
3362 else
3363 num_sge = 1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32));
3364
3365 if (sizeof(dma_addr_t) == sizeof(u64)) {
3366 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
3367 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32));
3368 } else {
3369 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
3370 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32));
3371 }
3372 dinitprintk((KERN_INFO MYNAM ": %s num_sge=%d numSGE=%d\n",
3373 ioc->name, num_sge, numSGE));
3374
3375 if ( numSGE > MPT_SCSI_SG_DEPTH )
3376 numSGE = MPT_SCSI_SG_DEPTH;
3377
3378 num_chain = 1;
3379 while (numSGE - num_sge > 0) {
3380 num_chain++;
3381 num_sge += (scale - 1);
3382 }
3383 num_chain++;
3384
3385 dinitprintk((KERN_INFO MYNAM ": %s Now numSGE=%d num_sge=%d num_chain=%d\n",
3386 ioc->name, numSGE, num_sge, num_chain));
3387
3388 if (ioc->bus_type == SCSI)
3389 num_chain *= MPT_SCSI_CAN_QUEUE;
3390 else
3391 num_chain *= MPT_FC_CAN_QUEUE;
3392
3393 ioc->num_chain = num_chain;
3394
3395 sz = num_chain * sizeof(int);
3396 if (ioc->ChainToChain == NULL) {
3397 mem = kmalloc(sz, GFP_ATOMIC);
3398 if (mem == NULL)
3399 return -1;
3400
3401 ioc->ChainToChain = (int *) mem;
3402 dinitprintk((KERN_INFO MYNAM ": %s ChainToChain alloc @ %p, sz=%d bytes\n",
3403 ioc->name, mem, sz));
3404 } else {
3405 mem = (u8 *) ioc->ChainToChain;
3406 }
3407 memset(mem, 0xFF, sz);
3408 return num_chain;
3409}
3410
3411/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3412/*
3413 * PrimeIocFifos - Initialize IOC request and reply FIFOs.
3414 * @ioc: Pointer to MPT_ADAPTER structure
3415 *
3416 * This routine allocates memory for the MPT reply and request frame
3417 * pools (if necessary), and primes the IOC reply FIFO with
3418 * reply frames.
3419 *
3420 * Returns 0 for success, non-zero for failure.
3421 */
3422static int
3423PrimeIocFifos(MPT_ADAPTER *ioc)
3424{
3425 MPT_FRAME_HDR *mf;
3426 unsigned long flags;
3427 dma_addr_t alloc_dma;
3428 u8 *mem;
3429 int i, reply_sz, sz, total_size, num_chain;
3430
3431 /* Prime reply FIFO... */
3432
3433 if (ioc->reply_frames == NULL) {
3434 if ( (num_chain = initChainBuffers(ioc)) < 0)
3435 return -1;
3436
3437 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
3438 dinitprintk((KERN_INFO MYNAM ": %s.ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
3439 ioc->name, ioc->reply_sz, ioc->reply_depth));
3440 dinitprintk((KERN_INFO MYNAM ": %s.ReplyBuffer sz=%d[%x] bytes\n",
3441 ioc->name, reply_sz, reply_sz));
3442
3443 sz = (ioc->req_sz * ioc->req_depth);
3444 dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffer sz=%d bytes, RequestDepth=%d\n",
3445 ioc->name, ioc->req_sz, ioc->req_depth));
3446 dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffer sz=%d[%x] bytes\n",
3447 ioc->name, sz, sz));
3448 total_size += sz;
3449
3450 sz = num_chain * ioc->req_sz; /* chain buffer pool size */
3451 dinitprintk((KERN_INFO MYNAM ": %s.ChainBuffer sz=%d bytes, ChainDepth=%d\n",
3452 ioc->name, ioc->req_sz, num_chain));
3453 dinitprintk((KERN_INFO MYNAM ": %s.ChainBuffer sz=%d[%x] bytes num_chain=%d\n",
3454 ioc->name, sz, sz, num_chain));
3455
3456 total_size += sz;
3457 mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
3458 if (mem == NULL) {
3459 printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
3460 ioc->name);
3461 goto out_fail;
3462 }
3463
3464 dinitprintk((KERN_INFO MYNAM ": %s.Total alloc @ %p[%p], sz=%d[%x] bytes\n",
3465 ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size));
3466
3467 memset(mem, 0, total_size);
3468 ioc->alloc_total += total_size;
3469 ioc->alloc = mem;
3470 ioc->alloc_dma = alloc_dma;
3471 ioc->alloc_sz = total_size;
3472 ioc->reply_frames = (MPT_FRAME_HDR *) mem;
3473 ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
3474
3475 alloc_dma += reply_sz;
3476 mem += reply_sz;
3477
3478 /* Request FIFO - WE manage this! */
3479
3480 ioc->req_frames = (MPT_FRAME_HDR *) mem;
3481 ioc->req_frames_dma = alloc_dma;
3482
3483 dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffers @ %p[%p]\n",
3484 ioc->name, mem, (void *)(ulong)alloc_dma));
3485
3486 ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
3487
3488#if defined(CONFIG_MTRR) && 0
3489 /*
3490 * Enable Write Combining MTRR for IOC's memory region.
3491 * (at least as much as we can; "size and base must be
3492 * multiples of 4 kiB"
3493 */
3494 ioc->mtrr_reg = mtrr_add(ioc->req_frames_dma,
3495 sz,
3496 MTRR_TYPE_WRCOMB, 1);
3497 dprintk((MYIOC_s_INFO_FMT "MTRR region registered (base:size=%08x:%x)\n",
3498 ioc->name, ioc->req_frames_dma, sz));
3499#endif
3500
3501 for (i = 0; i < ioc->req_depth; i++) {
3502 alloc_dma += ioc->req_sz;
3503 mem += ioc->req_sz;
3504 }
3505
3506 ioc->ChainBuffer = mem;
3507 ioc->ChainBufferDMA = alloc_dma;
3508
3509 dinitprintk((KERN_INFO MYNAM " :%s.ChainBuffers @ %p(%p)\n",
3510 ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
3511
3512 /* Initialize the free chain Q.
3513 */
3514
3515 INIT_LIST_HEAD(&ioc->FreeChainQ);
3516
3517 /* Post the chain buffers to the FreeChainQ.
3518 */
3519 mem = (u8 *)ioc->ChainBuffer;
3520 for (i=0; i < num_chain; i++) {
3521 mf = (MPT_FRAME_HDR *) mem;
3522 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeChainQ);
3523 mem += ioc->req_sz;
3524 }
3525
3526 /* Initialize Request frames linked list
3527 */
3528 alloc_dma = ioc->req_frames_dma;
3529 mem = (u8 *) ioc->req_frames;
3530
3531 spin_lock_irqsave(&ioc->FreeQlock, flags);
3532 INIT_LIST_HEAD(&ioc->FreeQ);
3533 for (i = 0; i < ioc->req_depth; i++) {
3534 mf = (MPT_FRAME_HDR *) mem;
3535
3536 /* Queue REQUESTs *internally*! */
3537 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
3538
3539 mem += ioc->req_sz;
3540 }
3541 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
3542
3543 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
3544 ioc->sense_buf_pool =
3545 pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
3546 if (ioc->sense_buf_pool == NULL) {
3547 printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
3548 ioc->name);
3549 goto out_fail;
3550 }
3551
3552 ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF);
3553 ioc->alloc_total += sz;
3554 dinitprintk((KERN_INFO MYNAM ": %s.SenseBuffers @ %p[%p]\n",
3555 ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma));
3556
3557 }
3558
3559 /* Post Reply frames to FIFO
3560 */
3561 alloc_dma = ioc->alloc_dma;
3562 dinitprintk((KERN_INFO MYNAM ": %s.ReplyBuffers @ %p[%p]\n",
3563 ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
3564
3565 for (i = 0; i < ioc->reply_depth; i++) {
3566 /* Write each address to the IOC! */
3567 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, alloc_dma);
3568 alloc_dma += ioc->reply_sz;
3569 }
3570
3571 return 0;
3572
3573out_fail:
3574 if (ioc->alloc != NULL) {
3575 sz = ioc->alloc_sz;
3576 pci_free_consistent(ioc->pcidev,
3577 sz,
3578 ioc->alloc, ioc->alloc_dma);
3579 ioc->reply_frames = NULL;
3580 ioc->req_frames = NULL;
3581 ioc->alloc_total -= sz;
3582 }
3583 if (ioc->sense_buf_pool != NULL) {
3584 sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
3585 pci_free_consistent(ioc->pcidev,
3586 sz,
3587 ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
3588 ioc->sense_buf_pool = NULL;
3589 }
3590 return -1;
3591}
3592
3593/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3594/**
3595 * mpt_handshake_req_reply_wait - Send MPT request to and receive reply
3596 * from IOC via doorbell handshake method.
3597 * @ioc: Pointer to MPT_ADAPTER structure
3598 * @reqBytes: Size of the request in bytes
3599 * @req: Pointer to MPT request frame
3600 * @replyBytes: Expected size of the reply in bytes
3601 * @u16reply: Pointer to area where reply should be written
3602 * @maxwait: Max wait time for a reply (in seconds)
3603 * @sleepFlag: Specifies whether the process can sleep
3604 *
3605 * NOTES: It is the callers responsibility to byte-swap fields in the
3606 * request which are greater than 1 byte in size. It is also the
3607 * callers responsibility to byte-swap response fields which are
3608 * greater than 1 byte in size.
3609 *
3610 * Returns 0 for success, non-zero for failure.
3611 */
3612static int
3613mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
3614 int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
3615{
3616 MPIDefaultReply_t *mptReply;
3617 int failcnt = 0;
3618 int t;
3619
3620 /*
3621 * Get ready to cache a handshake reply
3622 */
3623 ioc->hs_reply_idx = 0;
3624 mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
3625 mptReply->MsgLength = 0;
3626
3627 /*
3628 * Make sure there are no doorbells (WRITE 0 to IntStatus reg),
3629 * then tell IOC that we want to handshake a request of N words.
3630 * (WRITE u32val to Doorbell reg).
3631 */
3632 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3633 CHIPREG_WRITE32(&ioc->chip->Doorbell,
3634 ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
3635 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
3636
3637 /*
3638 * Wait for IOC's doorbell handshake int
3639 */
3640 if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
3641 failcnt++;
3642
3643 dhsprintk((MYIOC_s_INFO_FMT "HandShake request start reqBytes=%d, WaitCnt=%d%s\n",
3644 ioc->name, reqBytes, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
3645
3646 /* Read doorbell and check for active bit */
3647 if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
3648 return -1;
3649
3650 /*
3651 * Clear doorbell int (WRITE 0 to IntStatus reg),
3652 * then wait for IOC to ACKnowledge that it's ready for
3653 * our handshake request.
3654 */
3655 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3656 if (!failcnt && (t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
3657 failcnt++;
3658
3659 if (!failcnt) {
3660 int ii;
3661 u8 *req_as_bytes = (u8 *) req;
3662
3663 /*
3664 * Stuff request words via doorbell handshake,
3665 * with ACK from IOC for each.
3666 */
3667 for (ii = 0; !failcnt && ii < reqBytes/4; ii++) {
3668 u32 word = ((req_as_bytes[(ii*4) + 0] << 0) |
3669 (req_as_bytes[(ii*4) + 1] << 8) |
3670 (req_as_bytes[(ii*4) + 2] << 16) |
3671 (req_as_bytes[(ii*4) + 3] << 24));
3672
3673 CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
3674 if ((t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
3675 failcnt++;
3676 }
3677
3678 dhsprintk((KERN_INFO MYNAM ": Handshake request frame (@%p) header\n", req));
3679 DBG_DUMP_REQUEST_FRAME_HDR(req)
3680
3681 dhsprintk((MYIOC_s_INFO_FMT "HandShake request post done, WaitCnt=%d%s\n",
3682 ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : ""));
3683
3684 /*
3685 * Wait for completion of doorbell handshake reply from the IOC
3686 */
3687 if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
3688 failcnt++;
3689
3690 dhsprintk((MYIOC_s_INFO_FMT "HandShake reply count=%d%s\n",
3691 ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
3692
3693 /*
3694 * Copy out the cached reply...
3695 */
3696 for (ii=0; ii < min(replyBytes/2,mptReply->MsgLength*2); ii++)
3697 u16reply[ii] = ioc->hs_reply[ii];
3698 } else {
3699 return -99;
3700 }
3701
3702 return -failcnt;
3703}
3704
3705/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3706/*
3707 * WaitForDoorbellAck - Wait for IOC to clear the IOP_DOORBELL_STATUS bit
3708 * in it's IntStatus register.
3709 * @ioc: Pointer to MPT_ADAPTER structure
3710 * @howlong: How long to wait (in seconds)
3711 * @sleepFlag: Specifies whether the process can sleep
3712 *
3713 * This routine waits (up to ~2 seconds max) for IOC doorbell
3714 * handshake ACKnowledge.
3715 *
3716 * Returns a negative value on failure, else wait loop count.
3717 */
3718static int
3719WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3720{
3721 int cntdn;
3722 int count = 0;
3723 u32 intstat=0;
3724
3725 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong;
3726
3727 if (sleepFlag == CAN_SLEEP) {
3728 while (--cntdn) {
3729 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3730 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
3731 break;
3732 msleep_interruptible (1);
3733 count++;
3734 }
3735 } else {
3736 while (--cntdn) {
3737 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3738 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
3739 break;
3740 mdelay (1);
3741 count++;
3742 }
3743 }
3744
3745 if (cntdn) {
3746 dprintk((MYIOC_s_INFO_FMT "WaitForDoorbell ACK (count=%d)\n",
3747 ioc->name, count));
3748 return count;
3749 }
3750
3751 printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout (count=%d), IntStatus=%x!\n",
3752 ioc->name, count, intstat);
3753 return -1;
3754}
3755
3756/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3757/*
3758 * WaitForDoorbellInt - Wait for IOC to set the HIS_DOORBELL_INTERRUPT bit
3759 * in it's IntStatus register.
3760 * @ioc: Pointer to MPT_ADAPTER structure
3761 * @howlong: How long to wait (in seconds)
3762 * @sleepFlag: Specifies whether the process can sleep
3763 *
3764 * This routine waits (up to ~2 seconds max) for IOC doorbell interrupt.
3765 *
3766 * Returns a negative value on failure, else wait loop count.
3767 */
3768static int
3769WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3770{
3771 int cntdn;
3772 int count = 0;
3773 u32 intstat=0;
3774
3775 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong;
3776 if (sleepFlag == CAN_SLEEP) {
3777 while (--cntdn) {
3778 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3779 if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
3780 break;
3781 msleep_interruptible(1);
3782 count++;
3783 }
3784 } else {
3785 while (--cntdn) {
3786 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
3787 if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
3788 break;
3789 mdelay(1);
3790 count++;
3791 }
3792 }
3793
3794 if (cntdn) {
3795 dprintk((MYIOC_s_INFO_FMT "WaitForDoorbell INT (cnt=%d) howlong=%d\n",
3796 ioc->name, count, howlong));
3797 return count;
3798 }
3799
3800 printk(MYIOC_s_ERR_FMT "Doorbell INT timeout (count=%d), IntStatus=%x!\n",
3801 ioc->name, count, intstat);
3802 return -1;
3803}
3804
3805/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3806/*
3807 * WaitForDoorbellReply - Wait for and capture a IOC handshake reply.
3808 * @ioc: Pointer to MPT_ADAPTER structure
3809 * @howlong: How long to wait (in seconds)
3810 * @sleepFlag: Specifies whether the process can sleep
3811 *
3812 * This routine polls the IOC for a handshake reply, 16 bits at a time.
3813 * Reply is cached to IOC private area large enough to hold a maximum
3814 * of 128 bytes of reply data.
3815 *
3816 * Returns a negative value on failure, else size of reply in WORDS.
3817 */
3818static int
3819WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3820{
3821 int u16cnt = 0;
3822 int failcnt = 0;
3823 int t;
3824 u16 *hs_reply = ioc->hs_reply;
3825 volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
3826 u16 hword;
3827
3828 hs_reply[0] = hs_reply[1] = hs_reply[7] = 0;
3829
3830 /*
3831 * Get first two u16's so we can look at IOC's intended reply MsgLength
3832 */
3833 u16cnt=0;
3834 if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) {
3835 failcnt++;
3836 } else {
3837 hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
3838 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3839 if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
3840 failcnt++;
3841 else {
3842 hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
3843 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3844 }
3845 }
3846
3847 dhsprintk((MYIOC_s_INFO_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
3848 ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
3849 failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
3850
3851 /*
3852 * If no error (and IOC said MsgLength is > 0), piece together
3853 * reply 16 bits at a time.
3854 */
3855 for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) {
3856 if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
3857 failcnt++;
3858 hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
3859 /* don't overflow our IOC hs_reply[] buffer! */
3860 if (u16cnt < sizeof(ioc->hs_reply) / sizeof(ioc->hs_reply[0]))
3861 hs_reply[u16cnt] = hword;
3862 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3863 }
3864
3865 if (!failcnt && (t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
3866 failcnt++;
3867 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3868
3869 if (failcnt) {
3870 printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n",
3871 ioc->name);
3872 return -failcnt;
3873 }
3874#if 0
3875 else if (u16cnt != (2 * mptReply->MsgLength)) {
3876 return -101;
3877 }
3878 else if ((mptReply->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3879 return -102;
3880 }
3881#endif
3882
3883 dhsprintk((MYIOC_s_INFO_FMT "Got Handshake reply:\n", ioc->name));
3884 DBG_DUMP_REPLY_FRAME(mptReply)
3885
3886 dhsprintk((MYIOC_s_INFO_FMT "WaitForDoorbell REPLY WaitCnt=%d (sz=%d)\n",
3887 ioc->name, t, u16cnt/2));
3888 return u16cnt/2;
3889}
3890
3891/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3892/*
3893 * GetLanConfigPages - Fetch LANConfig pages.
3894 * @ioc: Pointer to MPT_ADAPTER structure
3895 *
3896 * Return: 0 for success
3897 * -ENOMEM if no memory available
3898 * -EPERM if not allowed due to ISR context
3899 * -EAGAIN if no msg frames currently available
3900 * -EFAULT for non-successful reply or no reply (timeout)
3901 */
3902static int
3903GetLanConfigPages(MPT_ADAPTER *ioc)
3904{
3905 ConfigPageHeader_t hdr;
3906 CONFIGPARMS cfg;
3907 LANPage0_t *ppage0_alloc;
3908 dma_addr_t page0_dma;
3909 LANPage1_t *ppage1_alloc;
3910 dma_addr_t page1_dma;
3911 int rc = 0;
3912 int data_sz;
3913 int copy_sz;
3914
3915 /* Get LAN Page 0 header */
3916 hdr.PageVersion = 0;
3917 hdr.PageLength = 0;
3918 hdr.PageNumber = 0;
3919 hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
3920 cfg.hdr = &hdr;
3921 cfg.physAddr = -1;
3922 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
3923 cfg.dir = 0;
3924 cfg.pageAddr = 0;
3925 cfg.timeout = 0;
3926
3927 if ((rc = mpt_config(ioc, &cfg)) != 0)
3928 return rc;
3929
3930 if (hdr.PageLength > 0) {
3931 data_sz = hdr.PageLength * 4;
3932 ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
3933 rc = -ENOMEM;
3934 if (ppage0_alloc) {
3935 memset((u8 *)ppage0_alloc, 0, data_sz);
3936 cfg.physAddr = page0_dma;
3937 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
3938
3939 if ((rc = mpt_config(ioc, &cfg)) == 0) {
3940 /* save the data */
3941 copy_sz = min_t(int, sizeof(LANPage0_t), data_sz);
3942 memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz);
3943
3944 }
3945
3946 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
3947
3948 /* FIXME!
3949 * Normalize endianness of structure data,
3950 * by byte-swapping all > 1 byte fields!
3951 */
3952
3953 }
3954
3955 if (rc)
3956 return rc;
3957 }
3958
3959 /* Get LAN Page 1 header */
3960 hdr.PageVersion = 0;
3961 hdr.PageLength = 0;
3962 hdr.PageNumber = 1;
3963 hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
3964 cfg.hdr = &hdr;
3965 cfg.physAddr = -1;
3966 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
3967 cfg.dir = 0;
3968 cfg.pageAddr = 0;
3969
3970 if ((rc = mpt_config(ioc, &cfg)) != 0)
3971 return rc;
3972
3973 if (hdr.PageLength == 0)
3974 return 0;
3975
3976 data_sz = hdr.PageLength * 4;
3977 rc = -ENOMEM;
3978 ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
3979 if (ppage1_alloc) {
3980 memset((u8 *)ppage1_alloc, 0, data_sz);
3981 cfg.physAddr = page1_dma;
3982 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
3983
3984 if ((rc = mpt_config(ioc, &cfg)) == 0) {
3985 /* save the data */
3986 copy_sz = min_t(int, sizeof(LANPage1_t), data_sz);
3987 memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
3988 }
3989
3990 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma);
3991
3992 /* FIXME!
3993 * Normalize endianness of structure data,
3994 * by byte-swapping all > 1 byte fields!
3995 */
3996
3997 }
3998
3999 return rc;
4000}
4001
4002/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4003/*
4004 * GetFcPortPage0 - Fetch FCPort config Page0.
4005 * @ioc: Pointer to MPT_ADAPTER structure
4006 * @portnum: IOC Port number
4007 *
4008 * Return: 0 for success
4009 * -ENOMEM if no memory available
4010 * -EPERM if not allowed due to ISR context
4011 * -EAGAIN if no msg frames currently available
4012 * -EFAULT for non-successful reply or no reply (timeout)
4013 */
4014static int
4015GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
4016{
4017 ConfigPageHeader_t hdr;
4018 CONFIGPARMS cfg;
4019 FCPortPage0_t *ppage0_alloc;
4020 FCPortPage0_t *pp0dest;
4021 dma_addr_t page0_dma;
4022 int data_sz;
4023 int copy_sz;
4024 int rc;
4025
4026 /* Get FCPort Page 0 header */
4027 hdr.PageVersion = 0;
4028 hdr.PageLength = 0;
4029 hdr.PageNumber = 0;
4030 hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
4031 cfg.hdr = &hdr;
4032 cfg.physAddr = -1;
4033 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4034 cfg.dir = 0;
4035 cfg.pageAddr = portnum;
4036 cfg.timeout = 0;
4037
4038 if ((rc = mpt_config(ioc, &cfg)) != 0)
4039 return rc;
4040
4041 if (hdr.PageLength == 0)
4042 return 0;
4043
4044 data_sz = hdr.PageLength * 4;
4045 rc = -ENOMEM;
4046 ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
4047 if (ppage0_alloc) {
4048 memset((u8 *)ppage0_alloc, 0, data_sz);
4049 cfg.physAddr = page0_dma;
4050 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
4051
4052 if ((rc = mpt_config(ioc, &cfg)) == 0) {
4053 /* save the data */
4054 pp0dest = &ioc->fc_port_page0[portnum];
4055 copy_sz = min_t(int, sizeof(FCPortPage0_t), data_sz);
4056 memcpy(pp0dest, ppage0_alloc, copy_sz);
4057
4058 /*
4059 * Normalize endianness of structure data,
4060 * by byte-swapping all > 1 byte fields!
4061 */
4062 pp0dest->Flags = le32_to_cpu(pp0dest->Flags);
4063 pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier);
4064 pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low);
4065 pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High);
4066 pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low);
4067 pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High);
4068 pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass);
4069 pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds);
4070 pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed);
4071 pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize);
4072 pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low);
4073 pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High);
4074 pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low);
4075 pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High);
4076 pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount);
4077 pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators);
4078
4079 }
4080
4081 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
4082 }
4083
4084 return rc;
4085}
4086
4087/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4088/*
4089 * GetIoUnitPage2 - Retrieve BIOS version and boot order information.
4090 * @ioc: Pointer to MPT_ADAPTER structure
4091 *
4092 * Returns: 0 for success
4093 * -ENOMEM if no memory available
4094 * -EPERM if not allowed due to ISR context
4095 * -EAGAIN if no msg frames currently available
4096 * -EFAULT for non-successful reply or no reply (timeout)
4097 */
4098static int
4099GetIoUnitPage2(MPT_ADAPTER *ioc)
4100{
4101 ConfigPageHeader_t hdr;
4102 CONFIGPARMS cfg;
4103 IOUnitPage2_t *ppage_alloc;
4104 dma_addr_t page_dma;
4105 int data_sz;
4106 int rc;
4107
4108 /* Get the page header */
4109 hdr.PageVersion = 0;
4110 hdr.PageLength = 0;
4111 hdr.PageNumber = 2;
4112 hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT;
4113 cfg.hdr = &hdr;
4114 cfg.physAddr = -1;
4115 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4116 cfg.dir = 0;
4117 cfg.pageAddr = 0;
4118 cfg.timeout = 0;
4119
4120 if ((rc = mpt_config(ioc, &cfg)) != 0)
4121 return rc;
4122
4123 if (hdr.PageLength == 0)
4124 return 0;
4125
4126 /* Read the config page */
4127 data_sz = hdr.PageLength * 4;
4128 rc = -ENOMEM;
4129 ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
4130 if (ppage_alloc) {
4131 memset((u8 *)ppage_alloc, 0, data_sz);
4132 cfg.physAddr = page_dma;
4133 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
4134
4135 /* If Good, save data */
4136 if ((rc = mpt_config(ioc, &cfg)) == 0)
4137 ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);
4138
4139 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma);
4140 }
4141
4142 return rc;
4143}
4144
4145/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4146/* mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
4147 * @ioc: Pointer to a Adapter Strucutre
4148 * @portnum: IOC port number
4149 *
4150 * Return: -EFAULT if read of config page header fails
4151 * or if no nvram
4152 * If read of SCSI Port Page 0 fails,
4153 * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
4154 * Adapter settings: async, narrow
4155 * Return 1
4156 * If read of SCSI Port Page 2 fails,
4157 * Adapter settings valid
4158 * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
4159 * Return 1
4160 * Else
4161 * Both valid
4162 * Return 0
4163 * CHECK - what type of locking mechanisms should be used????
4164 */
4165static int
4166mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
4167{
4168 u8 *pbuf;
4169 dma_addr_t buf_dma;
4170 CONFIGPARMS cfg;
4171 ConfigPageHeader_t header;
4172 int ii;
4173 int data, rc = 0;
4174
4175 /* Allocate memory
4176 */
4177 if (!ioc->spi_data.nvram) {
4178 int sz;
4179 u8 *mem;
4180 sz = MPT_MAX_SCSI_DEVICES * sizeof(int);
4181 mem = kmalloc(sz, GFP_ATOMIC);
4182 if (mem == NULL)
4183 return -EFAULT;
4184
4185 ioc->spi_data.nvram = (int *) mem;
4186
4187 dprintk((MYIOC_s_INFO_FMT "SCSI device NVRAM settings @ %p, sz=%d\n",
4188 ioc->name, ioc->spi_data.nvram, sz));
4189 }
4190
4191 /* Invalidate NVRAM information
4192 */
4193 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4194 ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID;
4195 }
4196
4197 /* Read SPP0 header, allocate memory, then read page.
4198 */
4199 header.PageVersion = 0;
4200 header.PageLength = 0;
4201 header.PageNumber = 0;
4202 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
4203 cfg.hdr = &header;
4204 cfg.physAddr = -1;
4205 cfg.pageAddr = portnum;
4206 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4207 cfg.dir = 0;
4208 cfg.timeout = 0; /* use default */
4209 if (mpt_config(ioc, &cfg) != 0)
4210 return -EFAULT;
4211
4212 if (header.PageLength > 0) {
4213 pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
4214 if (pbuf) {
4215 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
4216 cfg.physAddr = buf_dma;
4217 if (mpt_config(ioc, &cfg) != 0) {
4218 ioc->spi_data.maxBusWidth = MPT_NARROW;
4219 ioc->spi_data.maxSyncOffset = 0;
4220 ioc->spi_data.minSyncFactor = MPT_ASYNC;
4221 ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
4222 rc = 1;
4223 } else {
4224 /* Save the Port Page 0 data
4225 */
4226 SCSIPortPage0_t *pPP0 = (SCSIPortPage0_t *) pbuf;
4227 pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities);
4228 pPP0->PhysicalInterface = le32_to_cpu(pPP0->PhysicalInterface);
4229
4230 if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
4231 ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
4232 dinitprintk((KERN_INFO MYNAM " :%s noQas due to Capabilities=%x\n",
4233 ioc->name, pPP0->Capabilities));
4234 }
4235 ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
4236 data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK;
4237 if (data) {
4238 ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
4239 data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK;
4240 ioc->spi_data.minSyncFactor = (u8) (data >> 8);
4241 } else {
4242 ioc->spi_data.maxSyncOffset = 0;
4243 ioc->spi_data.minSyncFactor = MPT_ASYNC;
4244 }
4245
4246 ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK;
4247
4248 /* Update the minSyncFactor based on bus type.
4249 */
4250 if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
4251 (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) {
4252
4253 if (ioc->spi_data.minSyncFactor < MPT_ULTRA)
4254 ioc->spi_data.minSyncFactor = MPT_ULTRA;
4255 }
4256 }
4257 if (pbuf) {
4258 pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
4259 }
4260 }
4261 }
4262
4263 /* SCSI Port Page 2 - Read the header then the page.
4264 */
4265 header.PageVersion = 0;
4266 header.PageLength = 0;
4267 header.PageNumber = 2;
4268 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
4269 cfg.hdr = &header;
4270 cfg.physAddr = -1;
4271 cfg.pageAddr = portnum;
4272 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4273 cfg.dir = 0;
4274 if (mpt_config(ioc, &cfg) != 0)
4275 return -EFAULT;
4276
4277 if (header.PageLength > 0) {
4278 /* Allocate memory and read SCSI Port Page 2
4279 */
4280 pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
4281 if (pbuf) {
4282 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
4283 cfg.physAddr = buf_dma;
4284 if (mpt_config(ioc, &cfg) != 0) {
4285 /* Nvram data is left with INVALID mark
4286 */
4287 rc = 1;
4288 } else {
4289 SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t *) pbuf;
4290 MpiDeviceInfo_t *pdevice = NULL;
4291
4292 /* Save the Port Page 2 data
4293 * (reformat into a 32bit quantity)
4294 */
4295 data = le32_to_cpu(pPP2->PortFlags) & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
4296 ioc->spi_data.PortFlags = data;
4297 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4298 pdevice = &pPP2->DeviceSettings[ii];
4299 data = (le16_to_cpu(pdevice->DeviceFlags) << 16) |
4300 (pdevice->SyncFactor << 8) | pdevice->Timeout;
4301 ioc->spi_data.nvram[ii] = data;
4302 }
4303 }
4304
4305 pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
4306 }
4307 }
4308
4309 /* Update Adapter limits with those from NVRAM
4310 * Comment: Don't need to do this. Target performance
4311 * parameters will never exceed the adapters limits.
4312 */
4313
4314 return rc;
4315}
4316
4317/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4318/* mpt_readScsiDevicePageHeaders - save version and length of SDP1
4319 * @ioc: Pointer to a Adapter Strucutre
4320 * @portnum: IOC port number
4321 *
4322 * Return: -EFAULT if read of config page header fails
4323 * or 0 if success.
4324 */
4325static int
4326mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
4327{
4328 CONFIGPARMS cfg;
4329 ConfigPageHeader_t header;
4330
4331 /* Read the SCSI Device Page 1 header
4332 */
4333 header.PageVersion = 0;
4334 header.PageLength = 0;
4335 header.PageNumber = 1;
4336 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
4337 cfg.hdr = &header;
4338 cfg.physAddr = -1;
4339 cfg.pageAddr = portnum;
4340 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4341 cfg.dir = 0;
4342 cfg.timeout = 0;
4343 if (mpt_config(ioc, &cfg) != 0)
4344 return -EFAULT;
4345
4346 ioc->spi_data.sdp1version = cfg.hdr->PageVersion;
4347 ioc->spi_data.sdp1length = cfg.hdr->PageLength;
4348
4349 header.PageVersion = 0;
4350 header.PageLength = 0;
4351 header.PageNumber = 0;
4352 header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
4353 if (mpt_config(ioc, &cfg) != 0)
4354 return -EFAULT;
4355
4356 ioc->spi_data.sdp0version = cfg.hdr->PageVersion;
4357 ioc->spi_data.sdp0length = cfg.hdr->PageLength;
4358
4359 dcprintk((MYIOC_s_INFO_FMT "Headers: 0: version %d length %d\n",
4360 ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
4361
4362 dcprintk((MYIOC_s_INFO_FMT "Headers: 1: version %d length %d\n",
4363 ioc->name, ioc->spi_data.sdp1version, ioc->spi_data.sdp1length));
4364 return 0;
4365}
4366
4367/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4368/**
4369 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
4370 * @ioc: Pointer to a Adapter Strucutre
4371 * @portnum: IOC port number
4372 *
4373 * Return:
4374 * 0 on success
4375 * -EFAULT if read of config page header fails or data pointer not NULL
4376 * -ENOMEM if pci_alloc failed
4377 */
4378int
4379mpt_findImVolumes(MPT_ADAPTER *ioc)
4380{
4381 IOCPage2_t *pIoc2;
4382 u8 *mem;
4383 ConfigPageIoc2RaidVol_t *pIocRv;
4384 dma_addr_t ioc2_dma;
4385 CONFIGPARMS cfg;
4386 ConfigPageHeader_t header;
4387 int jj;
4388 int rc = 0;
4389 int iocpage2sz;
4390 u8 nVols, nPhys;
4391 u8 vid, vbus, vioc;
4392
4393 /* Read IOCP2 header then the page.
4394 */
4395 header.PageVersion = 0;
4396 header.PageLength = 0;
4397 header.PageNumber = 2;
4398 header.PageType = MPI_CONFIG_PAGETYPE_IOC;
4399 cfg.hdr = &header;
4400 cfg.physAddr = -1;
4401 cfg.pageAddr = 0;
4402 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4403 cfg.dir = 0;
4404 cfg.timeout = 0;
4405 if (mpt_config(ioc, &cfg) != 0)
4406 return -EFAULT;
4407
4408 if (header.PageLength == 0)
4409 return -EFAULT;
4410
4411 iocpage2sz = header.PageLength * 4;
4412 pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma);
4413 if (!pIoc2)
4414 return -ENOMEM;
4415
4416 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
4417 cfg.physAddr = ioc2_dma;
4418 if (mpt_config(ioc, &cfg) != 0)
4419 goto done_and_free;
4420
4421 if ( (mem = (u8 *)ioc->spi_data.pIocPg2) == NULL ) {
4422 mem = kmalloc(iocpage2sz, GFP_ATOMIC);
4423 if (mem) {
4424 ioc->spi_data.pIocPg2 = (IOCPage2_t *) mem;
4425 } else {
4426 goto done_and_free;
4427 }
4428 }
4429 memcpy(mem, (u8 *)pIoc2, iocpage2sz);
4430
4431 /* Identify RAID Volume Id's */
4432 nVols = pIoc2->NumActiveVolumes;
4433 if ( nVols == 0) {
4434 /* No RAID Volume.
4435 */
4436 goto done_and_free;
4437 } else {
4438 /* At least 1 RAID Volume
4439 */
4440 pIocRv = pIoc2->RaidVolume;
4441 ioc->spi_data.isRaid = 0;
4442 for (jj = 0; jj < nVols; jj++, pIocRv++) {
4443 vid = pIocRv->VolumeID;
4444 vbus = pIocRv->VolumeBus;
4445 vioc = pIocRv->VolumeIOC;
4446
4447 /* find the match
4448 */
4449 if (vbus == 0) {
4450 ioc->spi_data.isRaid |= (1 << vid);
4451 } else {
4452 /* Error! Always bus 0
4453 */
4454 }
4455 }
4456 }
4457
4458 /* Identify Hidden Physical Disk Id's */
4459 nPhys = pIoc2->NumActivePhysDisks;
4460 if (nPhys == 0) {
4461 /* No physical disks.
4462 */
4463 } else {
4464 mpt_read_ioc_pg_3(ioc);
4465 }
4466
4467done_and_free:
4468 pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma);
4469
4470 return rc;
4471}
4472
4473int
4474mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
4475{
4476 IOCPage3_t *pIoc3;
4477 u8 *mem;
4478 CONFIGPARMS cfg;
4479 ConfigPageHeader_t header;
4480 dma_addr_t ioc3_dma;
4481 int iocpage3sz = 0;
4482
4483 /* Free the old page
4484 */
4485 if (ioc->spi_data.pIocPg3) {
4486 kfree(ioc->spi_data.pIocPg3);
4487 ioc->spi_data.pIocPg3 = NULL;
4488 }
4489
4490 /* There is at least one physical disk.
4491 * Read and save IOC Page 3
4492 */
4493 header.PageVersion = 0;
4494 header.PageLength = 0;
4495 header.PageNumber = 3;
4496 header.PageType = MPI_CONFIG_PAGETYPE_IOC;
4497 cfg.hdr = &header;
4498 cfg.physAddr = -1;
4499 cfg.pageAddr = 0;
4500 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4501 cfg.dir = 0;
4502 cfg.timeout = 0;
4503 if (mpt_config(ioc, &cfg) != 0)
4504 return 0;
4505
4506 if (header.PageLength == 0)
4507 return 0;
4508
4509 /* Read Header good, alloc memory
4510 */
4511 iocpage3sz = header.PageLength * 4;
4512 pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma);
4513 if (!pIoc3)
4514 return 0;
4515
4516 /* Read the Page and save the data
4517 * into malloc'd memory.
4518 */
4519 cfg.physAddr = ioc3_dma;
4520 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
4521 if (mpt_config(ioc, &cfg) == 0) {
4522 mem = kmalloc(iocpage3sz, GFP_ATOMIC);
4523 if (mem) {
4524 memcpy(mem, (u8 *)pIoc3, iocpage3sz);
4525 ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem;
4526 }
4527 }
4528
4529 pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma);
4530
4531 return 0;
4532}
4533
4534static void
4535mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
4536{
4537 IOCPage4_t *pIoc4;
4538 CONFIGPARMS cfg;
4539 ConfigPageHeader_t header;
4540 dma_addr_t ioc4_dma;
4541 int iocpage4sz;
4542
4543 /* Read and save IOC Page 4
4544 */
4545 header.PageVersion = 0;
4546 header.PageLength = 0;
4547 header.PageNumber = 4;
4548 header.PageType = MPI_CONFIG_PAGETYPE_IOC;
4549 cfg.hdr = &header;
4550 cfg.physAddr = -1;
4551 cfg.pageAddr = 0;
4552 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4553 cfg.dir = 0;
4554 cfg.timeout = 0;
4555 if (mpt_config(ioc, &cfg) != 0)
4556 return;
4557
4558 if (header.PageLength == 0)
4559 return;
4560
4561 if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
4562 iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
4563 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
4564 if (!pIoc4)
4565 return;
4566 } else {
4567 ioc4_dma = ioc->spi_data.IocPg4_dma;
4568 iocpage4sz = ioc->spi_data.IocPg4Sz;
4569 }
4570
4571 /* Read the Page into dma memory.
4572 */
4573 cfg.physAddr = ioc4_dma;
4574 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
4575 if (mpt_config(ioc, &cfg) == 0) {
4576 ioc->spi_data.pIocPg4 = (IOCPage4_t *) pIoc4;
4577 ioc->spi_data.IocPg4_dma = ioc4_dma;
4578 ioc->spi_data.IocPg4Sz = iocpage4sz;
4579 } else {
4580 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
4581 ioc->spi_data.pIocPg4 = NULL;
4582 }
4583}
4584
4585static void
4586mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
4587{
4588 IOCPage1_t *pIoc1;
4589 CONFIGPARMS cfg;
4590 ConfigPageHeader_t header;
4591 dma_addr_t ioc1_dma;
4592 int iocpage1sz = 0;
4593 u32 tmp;
4594
4595 /* Check the Coalescing Timeout in IOC Page 1
4596 */
4597 header.PageVersion = 0;
4598 header.PageLength = 0;
4599 header.PageNumber = 1;
4600 header.PageType = MPI_CONFIG_PAGETYPE_IOC;
4601 cfg.hdr = &header;
4602 cfg.physAddr = -1;
4603 cfg.pageAddr = 0;
4604 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4605 cfg.dir = 0;
4606 cfg.timeout = 0;
4607 if (mpt_config(ioc, &cfg) != 0)
4608 return;
4609
4610 if (header.PageLength == 0)
4611 return;
4612
4613 /* Read Header good, alloc memory
4614 */
4615 iocpage1sz = header.PageLength * 4;
4616 pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma);
4617 if (!pIoc1)
4618 return;
4619
4620 /* Read the Page and check coalescing timeout
4621 */
4622 cfg.physAddr = ioc1_dma;
4623 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
4624 if (mpt_config(ioc, &cfg) == 0) {
4625
4626 tmp = le32_to_cpu(pIoc1->Flags) & MPI_IOCPAGE1_REPLY_COALESCING;
4627 if (tmp == MPI_IOCPAGE1_REPLY_COALESCING) {
4628 tmp = le32_to_cpu(pIoc1->CoalescingTimeout);
4629
4630 dprintk((MYIOC_s_INFO_FMT "Coalescing Enabled Timeout = %d\n",
4631 ioc->name, tmp));
4632
4633 if (tmp > MPT_COALESCING_TIMEOUT) {
4634 pIoc1->CoalescingTimeout = cpu_to_le32(MPT_COALESCING_TIMEOUT);
4635
4636 /* Write NVRAM and current
4637 */
4638 cfg.dir = 1;
4639 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
4640 if (mpt_config(ioc, &cfg) == 0) {
4641 dprintk((MYIOC_s_INFO_FMT "Reset Current Coalescing Timeout to = %d\n",
4642 ioc->name, MPT_COALESCING_TIMEOUT));
4643
4644 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM;
4645 if (mpt_config(ioc, &cfg) == 0) {
4646 dprintk((MYIOC_s_INFO_FMT "Reset NVRAM Coalescing Timeout to = %d\n",
4647 ioc->name, MPT_COALESCING_TIMEOUT));
4648 } else {
4649 dprintk((MYIOC_s_INFO_FMT "Reset NVRAM Coalescing Timeout Failed\n",
4650 ioc->name));
4651 }
4652
4653 } else {
4654 dprintk((MYIOC_s_WARN_FMT "Reset of Current Coalescing Timeout Failed!\n",
4655 ioc->name));
4656 }
4657 }
4658
4659 } else {
4660 dprintk((MYIOC_s_WARN_FMT "Coalescing Disabled\n", ioc->name));
4661 }
4662 }
4663
4664 pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma);
4665
4666 return;
4667}
4668
4669/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4670/*
4671 * SendEventNotification - Send EventNotification (on or off) request
4672 * to MPT adapter.
4673 * @ioc: Pointer to MPT_ADAPTER structure
4674 * @EvSwitch: Event switch flags
4675 */
4676static int
4677SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch)
4678{
4679 EventNotification_t *evnp;
4680
4681 evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc);
4682 if (evnp == NULL) {
4683 dprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
4684 ioc->name));
4685 return 0;
4686 }
4687 memset(evnp, 0, sizeof(*evnp));
4688
4689 dprintk((MYIOC_s_INFO_FMT "Sending EventNotification(%d)\n", ioc->name, EvSwitch));
4690
4691 evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
4692 evnp->ChainOffset = 0;
4693 evnp->MsgFlags = 0;
4694 evnp->Switch = EvSwitch;
4695
4696 mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp);
4697
4698 return 0;
4699}
4700
4701/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4702/**
4703 * SendEventAck - Send EventAck request to MPT adapter.
4704 * @ioc: Pointer to MPT_ADAPTER structure
4705 * @evnp: Pointer to original EventNotification request
4706 */
4707static int
4708SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
4709{
4710 EventAck_t *pAck;
4711
4712 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4713 printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK request frame!\n",
4714 ioc->name);
4715 return -1;
4716 }
4717 memset(pAck, 0, sizeof(*pAck));
4718
4719 dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name));
4720
4721 pAck->Function = MPI_FUNCTION_EVENT_ACK;
4722 pAck->ChainOffset = 0;
4723 pAck->MsgFlags = 0;
4724 pAck->Event = evnp->Event;
4725 pAck->EventContext = evnp->EventContext;
4726
4727 mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)pAck);
4728
4729 return 0;
4730}
4731
4732/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4733/**
4734 * mpt_config - Generic function to issue config message
4735 * @ioc - Pointer to an adapter structure
4736 * @cfg - Pointer to a configuration structure. Struct contains
4737 * action, page address, direction, physical address
4738 * and pointer to a configuration page header
4739 * Page header is updated.
4740 *
4741 * Returns 0 for success
4742 * -EPERM if not allowed due to ISR context
4743 * -EAGAIN if no msg frames currently available
4744 * -EFAULT for non-successful reply or no reply (timeout)
4745 */
4746int
4747mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
4748{
4749 Config_t *pReq;
4750 MPT_FRAME_HDR *mf;
4751 unsigned long flags;
4752 int ii, rc;
4753 u32 flagsLength;
4754 int in_isr;
4755
4756 /* (Bugzilla:fibrebugs, #513)
4757 * Bug fix (part 1)! 20010905 -sralston
4758 * Prevent calling wait_event() (below), if caller happens
4759 * to be in ISR context, because that is fatal!
4760 */
4761 in_isr = in_interrupt();
4762 if (in_isr) {
4763 dcprintk((MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
4764 ioc->name));
4765 return -EPERM;
4766 }
4767
4768 /* Get and Populate a free Frame
4769 */
4770 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4771 dcprintk((MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n",
4772 ioc->name));
4773 return -EAGAIN;
4774 }
4775 pReq = (Config_t *)mf;
4776 pReq->Action = pCfg->action;
4777 pReq->Reserved = 0;
4778 pReq->ChainOffset = 0;
4779 pReq->Function = MPI_FUNCTION_CONFIG;
4780 pReq->ExtPageLength = 0;
4781 pReq->ExtPageType = 0;
4782 pReq->MsgFlags = 0;
4783 for (ii=0; ii < 8; ii++)
4784 pReq->Reserved2[ii] = 0;
4785
4786 pReq->Header.PageVersion = pCfg->hdr->PageVersion;
4787 pReq->Header.PageLength = pCfg->hdr->PageLength;
4788 pReq->Header.PageNumber = pCfg->hdr->PageNumber;
4789 pReq->Header.PageType = (pCfg->hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
4790 pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
4791
4792 /* Add a SGE to the config request.
4793 */
4794 if (pCfg->dir)
4795 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
4796 else
4797 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
4798
4799 flagsLength |= pCfg->hdr->PageLength * 4;
4800
4801 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
4802
4803 dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
4804 ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
4805
4806 /* Append pCfg pointer to end of mf
4807 */
4808 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
4809
4810 /* Initalize the timer
4811 */
4812 init_timer(&pCfg->timer);
4813 pCfg->timer.data = (unsigned long) ioc;
4814 pCfg->timer.function = mpt_timer_expired;
4815 pCfg->wait_done = 0;
4816
4817 /* Set the timer; ensure 10 second minimum */
4818 if (pCfg->timeout < 10)
4819 pCfg->timer.expires = jiffies + HZ*10;
4820 else
4821 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
4822
4823 /* Add to end of Q, set timer and then issue this command */
4824 spin_lock_irqsave(&ioc->FreeQlock, flags);
4825 list_add_tail(&pCfg->linkage, &ioc->configQ);
4826 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
4827
4828 add_timer(&pCfg->timer);
4829 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4830 wait_event(mpt_waitq, pCfg->wait_done);
4831
4832 /* mf has been freed - do not access */
4833
4834 rc = pCfg->status;
4835
4836 return rc;
4837}
4838
4839/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4840/**
4841 * mpt_toolbox - Generic function to issue toolbox message
4842 * @ioc - Pointer to an adapter structure
4843 * @cfg - Pointer to a toolbox structure. Struct contains
4844 * action, page address, direction, physical address
4845 * and pointer to a configuration page header
4846 * Page header is updated.
4847 *
4848 * Returns 0 for success
4849 * -EPERM if not allowed due to ISR context
4850 * -EAGAIN if no msg frames currently available
4851 * -EFAULT for non-successful reply or no reply (timeout)
4852 */
4853int
4854mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
4855{
4856 ToolboxIstwiReadWriteRequest_t *pReq;
4857 MPT_FRAME_HDR *mf;
4858 struct pci_dev *pdev;
4859 unsigned long flags;
4860 int rc;
4861 u32 flagsLength;
4862 int in_isr;
4863
4864 /* (Bugzilla:fibrebugs, #513)
4865 * Bug fix (part 1)! 20010905 -sralston
4866 * Prevent calling wait_event() (below), if caller happens
4867 * to be in ISR context, because that is fatal!
4868 */
4869 in_isr = in_interrupt();
4870 if (in_isr) {
4871 dcprintk((MYIOC_s_WARN_FMT "toobox request not allowed in ISR context!\n",
4872 ioc->name));
4873 return -EPERM;
4874 }
4875
4876 /* Get and Populate a free Frame
4877 */
4878 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4879 dcprintk((MYIOC_s_WARN_FMT "mpt_toolbox: no msg frames!\n",
4880 ioc->name));
4881 return -EAGAIN;
4882 }
4883 pReq = (ToolboxIstwiReadWriteRequest_t *)mf;
4884 pReq->Tool = pCfg->action;
4885 pReq->Reserved = 0;
4886 pReq->ChainOffset = 0;
4887 pReq->Function = MPI_FUNCTION_TOOLBOX;
4888 pReq->Reserved1 = 0;
4889 pReq->Reserved2 = 0;
4890 pReq->MsgFlags = 0;
4891 pReq->Flags = pCfg->dir;
4892 pReq->BusNum = 0;
4893 pReq->Reserved3 = 0;
4894 pReq->NumAddressBytes = 0x01;
4895 pReq->Reserved4 = 0;
4896 pReq->DataLength = 0x04;
4897 pdev = (struct pci_dev *) ioc->pcidev;
4898 if (pdev->devfn & 1)
4899 pReq->DeviceAddr = 0xB2;
4900 else
4901 pReq->DeviceAddr = 0xB0;
4902 pReq->Addr1 = 0;
4903 pReq->Addr2 = 0;
4904 pReq->Addr3 = 0;
4905 pReq->Reserved5 = 0;
4906
4907 /* Add a SGE to the config request.
4908 */
4909
4910 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 4;
4911
4912 mpt_add_sge((char *)&pReq->SGL, flagsLength, pCfg->physAddr);
4913
4914 dcprintk((MYIOC_s_INFO_FMT "Sending Toolbox request, Tool=%x\n",
4915 ioc->name, pReq->Tool));
4916
4917 /* Append pCfg pointer to end of mf
4918 */
4919 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
4920
4921 /* Initalize the timer
4922 */
4923 init_timer(&pCfg->timer);
4924 pCfg->timer.data = (unsigned long) ioc;
4925 pCfg->timer.function = mpt_timer_expired;
4926 pCfg->wait_done = 0;
4927
4928 /* Set the timer; ensure 10 second minimum */
4929 if (pCfg->timeout < 10)
4930 pCfg->timer.expires = jiffies + HZ*10;
4931 else
4932 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
4933
4934 /* Add to end of Q, set timer and then issue this command */
4935 spin_lock_irqsave(&ioc->FreeQlock, flags);
4936 list_add_tail(&pCfg->linkage, &ioc->configQ);
4937 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
4938
4939 add_timer(&pCfg->timer);
4940 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4941 wait_event(mpt_waitq, pCfg->wait_done);
4942
4943 /* mf has been freed - do not access */
4944
4945 rc = pCfg->status;
4946
4947 return rc;
4948}
4949
4950/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4951/*
4952 * mpt_timer_expired - Call back for timer process.
4953 * Used only internal config functionality.
4954 * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
4955 */
4956static void
4957mpt_timer_expired(unsigned long data)
4958{
4959 MPT_ADAPTER *ioc = (MPT_ADAPTER *) data;
4960
4961 dcprintk((MYIOC_s_WARN_FMT "mpt_timer_expired! \n", ioc->name));
4962
4963 /* Perform a FW reload */
4964 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
4965 printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
4966
4967 /* No more processing.
4968 * Hard reset clean-up will wake up
4969 * process and free all resources.
4970 */
4971 dcprintk((MYIOC_s_WARN_FMT "mpt_timer_expired complete!\n", ioc->name));
4972
4973 return;
4974}
4975
4976/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4977/*
4978 * mpt_ioc_reset - Base cleanup for hard reset
4979 * @ioc: Pointer to the adapter structure
4980 * @reset_phase: Indicates pre- or post-reset functionality
4981 *
4982 * Remark: Free's resources with internally generated commands.
4983 */
4984static int
4985mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
4986{
4987 CONFIGPARMS *pCfg;
4988 unsigned long flags;
4989
4990 dprintk((KERN_WARNING MYNAM
4991 ": IOC %s_reset routed to MPT base driver!\n",
4992 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
4993 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
4994
4995 if (reset_phase == MPT_IOC_SETUP_RESET) {
4996 ;
4997 } else if (reset_phase == MPT_IOC_PRE_RESET) {
4998 /* If the internal config Q is not empty -
4999 * delete timer. MF resources will be freed when
5000 * the FIFO's are primed.
5001 */
5002 spin_lock_irqsave(&ioc->FreeQlock, flags);
5003 list_for_each_entry(pCfg, &ioc->configQ, linkage)
5004 del_timer(&pCfg->timer);
5005 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5006
5007 } else {
5008 CONFIGPARMS *pNext;
5009
5010 /* Search the configQ for internal commands.
5011 * Flush the Q, and wake up all suspended threads.
5012 */
5013 spin_lock_irqsave(&ioc->FreeQlock, flags);
5014 list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) {
5015 list_del(&pCfg->linkage);
5016
5017 pCfg->status = MPT_CONFIG_ERROR;
5018 pCfg->wait_done = 1;
5019 wake_up(&mpt_waitq);
5020 }
5021 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5022 }
5023
5024 return 1; /* currently means nothing really */
5025}
5026
5027
5028#ifdef CONFIG_PROC_FS /* { */
5029/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5030/*
5031 * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
5032 */
5033/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5034/*
5035 * procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries.
5036 *
5037 * Returns 0 for success, non-zero for failure.
5038 */
5039static int
5040procmpt_create(void)
5041{
5042 struct proc_dir_entry *ent;
5043
5044 mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL);
5045 if (mpt_proc_root_dir == NULL)
5046 return -ENOTDIR;
5047
5048 ent = create_proc_entry("summary", S_IFREG|S_IRUGO, mpt_proc_root_dir);
5049 if (ent)
5050 ent->read_proc = procmpt_summary_read;
5051
5052 ent = create_proc_entry("version", S_IFREG|S_IRUGO, mpt_proc_root_dir);
5053 if (ent)
5054 ent->read_proc = procmpt_version_read;
5055
5056 return 0;
5057}
5058
5059/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5060/*
5061 * procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries.
5062 *
5063 * Returns 0 for success, non-zero for failure.
5064 */
5065static void
5066procmpt_destroy(void)
5067{
5068 remove_proc_entry("version", mpt_proc_root_dir);
5069 remove_proc_entry("summary", mpt_proc_root_dir);
5070 remove_proc_entry(MPT_PROCFS_MPTBASEDIR, NULL);
5071}
5072
5073/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5074/*
5075 * procmpt_summary_read - Handle read request from /proc/mpt/summary
5076 * or from /proc/mpt/iocN/summary.
5077 * @buf: Pointer to area to write information
5078 * @start: Pointer to start pointer
5079 * @offset: Offset to start writing
5080 * @request:
5081 * @eof: Pointer to EOF integer
5082 * @data: Pointer
5083 *
5084 * Returns number of characters written to process performing the read.
5085 */
5086static int
5087procmpt_summary_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
5088{
5089 MPT_ADAPTER *ioc;
5090 char *out = buf;
5091 int len;
5092
5093 if (data) {
5094 int more = 0;
5095
5096 ioc = data;
5097 mpt_print_ioc_summary(ioc, out, &more, 0, 1);
5098
5099 out += more;
5100 } else {
5101 list_for_each_entry(ioc, &ioc_list, list) {
5102 int more = 0;
5103
5104 mpt_print_ioc_summary(ioc, out, &more, 0, 1);
5105
5106 out += more;
5107 if ((out-buf) >= request)
5108 break;
5109 }
5110 }
5111
5112 len = out - buf;
5113
5114 MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
5115}
5116
5117/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5118/*
5119 * procmpt_version_read - Handle read request from /proc/mpt/version.
5120 * @buf: Pointer to area to write information
5121 * @start: Pointer to start pointer
5122 * @offset: Offset to start writing
5123 * @request:
5124 * @eof: Pointer to EOF integer
5125 * @data: Pointer
5126 *
5127 * Returns number of characters written to process performing the read.
5128 */
5129static int
5130procmpt_version_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
5131{
5132 int ii;
5133 int scsi, lan, ctl, targ, dmp;
5134 char *drvname;
5135 int len;
5136
5137 len = sprintf(buf, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
5138 len += sprintf(buf+len, " Fusion MPT base driver\n");
5139
5140 scsi = lan = ctl = targ = dmp = 0;
5141 for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
5142 drvname = NULL;
5143 if (MptCallbacks[ii]) {
5144 switch (MptDriverClass[ii]) {
5145 case MPTSCSIH_DRIVER:
5146 if (!scsi++) drvname = "SCSI host";
5147 break;
5148 case MPTLAN_DRIVER:
5149 if (!lan++) drvname = "LAN";
5150 break;
5151 case MPTSTM_DRIVER:
5152 if (!targ++) drvname = "SCSI target";
5153 break;
5154 case MPTCTL_DRIVER:
5155 if (!ctl++) drvname = "ioctl";
5156 break;
5157 }
5158
5159 if (drvname)
5160 len += sprintf(buf+len, " Fusion MPT %s driver\n", drvname);
5161 }
5162 }
5163
5164 MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
5165}
5166
5167/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5168/*
5169 * procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info.
5170 * @buf: Pointer to area to write information
5171 * @start: Pointer to start pointer
5172 * @offset: Offset to start writing
5173 * @request:
5174 * @eof: Pointer to EOF integer
5175 * @data: Pointer
5176 *
5177 * Returns number of characters written to process performing the read.
5178 */
5179static int
5180procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
5181{
5182 MPT_ADAPTER *ioc = data;
5183 int len;
5184 char expVer[32];
5185 int sz;
5186 int p;
5187
5188 mpt_get_fw_exp_ver(expVer, ioc);
5189
5190 len = sprintf(buf, "%s:", ioc->name);
5191 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
5192 len += sprintf(buf+len, " (f/w download boot flag set)");
5193// if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL)
5194// len += sprintf(buf+len, " CONFIG_CHECKSUM_FAIL!");
5195
5196 len += sprintf(buf+len, "\n ProductID = 0x%04x (%s)\n",
5197 ioc->facts.ProductID,
5198 ioc->prod_name);
5199 len += sprintf(buf+len, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer);
5200 if (ioc->facts.FWImageSize)
5201 len += sprintf(buf+len, " (fw_size=%d)", ioc->facts.FWImageSize);
5202 len += sprintf(buf+len, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion);
5203 len += sprintf(buf+len, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit);
5204 len += sprintf(buf+len, " EventState = 0x%02x\n", ioc->facts.EventState);
5205
5206 len += sprintf(buf+len, " CurrentHostMfaHighAddr = 0x%08x\n",
5207 ioc->facts.CurrentHostMfaHighAddr);
5208 len += sprintf(buf+len, " CurrentSenseBufferHighAddr = 0x%08x\n",
5209 ioc->facts.CurrentSenseBufferHighAddr);
5210
5211 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
5212 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
5213
5214 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
5215 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
5216 /*
5217 * Rounding UP to nearest 4-kB boundary here...
5218 */
5219 sz = (ioc->req_sz * ioc->req_depth) + 128;
5220 sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000;
5221 len += sprintf(buf+len, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n",
5222 ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz);
5223 len += sprintf(buf+len, " {MaxReqSz=%d} {MaxReqDepth=%d}\n",
5224 4*ioc->facts.RequestFrameSize,
5225 ioc->facts.GlobalCredits);
5226
5227 len += sprintf(buf+len, " Frames @ 0x%p (Dma @ 0x%p)\n",
5228 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
5229 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
5230 len += sprintf(buf+len, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
5231 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
5232 len += sprintf(buf+len, " {MaxRepSz=%d} {MaxRepDepth=%d}\n",
5233 ioc->facts.CurReplyFrameSize,
5234 ioc->facts.ReplyQueueDepth);
5235
5236 len += sprintf(buf+len, " MaxDevices = %d\n",
5237 (ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices);
5238 len += sprintf(buf+len, " MaxBuses = %d\n", ioc->facts.MaxBuses);
5239
5240 /* per-port info */
5241 for (p=0; p < ioc->facts.NumberOfPorts; p++) {
5242 len += sprintf(buf+len, " PortNumber = %d (of %d)\n",
5243 p+1,
5244 ioc->facts.NumberOfPorts);
5245 if (ioc->bus_type == FC) {
5246 if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
5247 u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
5248 len += sprintf(buf+len, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
5249 a[5], a[4], a[3], a[2], a[1], a[0]);
5250 }
5251 len += sprintf(buf+len, " WWN = %08X%08X:%08X%08X\n",
5252 ioc->fc_port_page0[p].WWNN.High,
5253 ioc->fc_port_page0[p].WWNN.Low,
5254 ioc->fc_port_page0[p].WWPN.High,
5255 ioc->fc_port_page0[p].WWPN.Low);
5256 }
5257 }
5258
5259 MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
5260}
5261
5262#endif /* CONFIG_PROC_FS } */
5263
5264/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5265static void
5266mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc)
5267{
5268 buf[0] ='\0';
5269 if ((ioc->facts.FWVersion.Word >> 24) == 0x0E) {
5270 sprintf(buf, " (Exp %02d%02d)",
5271 (ioc->facts.FWVersion.Word >> 16) & 0x00FF, /* Month */
5272 (ioc->facts.FWVersion.Word >> 8) & 0x1F); /* Day */
5273
5274 /* insider hack! */
5275 if ((ioc->facts.FWVersion.Word >> 8) & 0x80)
5276 strcat(buf, " [MDBG]");
5277 }
5278}
5279
5280/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5281/**
5282 * mpt_print_ioc_summary - Write ASCII summary of IOC to a buffer.
5283 * @ioc: Pointer to MPT_ADAPTER structure
5284 * @buffer: Pointer to buffer where IOC summary info should be written
5285 * @size: Pointer to number of bytes we wrote (set by this routine)
5286 * @len: Offset at which to start writing in buffer
5287 * @showlan: Display LAN stuff?
5288 *
5289 * This routine writes (english readable) ASCII text, which represents
5290 * a summary of IOC information, to a buffer.
5291 */
5292void
5293mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int showlan)
5294{
5295 char expVer[32];
5296 int y;
5297
5298 mpt_get_fw_exp_ver(expVer, ioc);
5299
5300 /*
5301 * Shorter summary of attached ioc's...
5302 */
5303 y = sprintf(buffer+len, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
5304 ioc->name,
5305 ioc->prod_name,
5306 MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */
5307 ioc->facts.FWVersion.Word,
5308 expVer,
5309 ioc->facts.NumberOfPorts,
5310 ioc->req_depth);
5311
5312 if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
5313 u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
5314 y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
5315 a[5], a[4], a[3], a[2], a[1], a[0]);
5316 }
5317
5318#ifndef __sparc__
5319 y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
5320#else
5321 y += sprintf(buffer+len+y, ", IRQ=%s", __irq_itoa(ioc->pci_irq));
5322#endif
5323
5324 if (!ioc->active)
5325 y += sprintf(buffer+len+y, " (disabled)");
5326
5327 y += sprintf(buffer+len+y, "\n");
5328
5329 *size = y;
5330}
5331
5332/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5333/*
5334 * Reset Handling
5335 */
5336/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5337/**
5338 * mpt_HardResetHandler - Generic reset handler, issue SCSI Task
5339 * Management call based on input arg values. If TaskMgmt fails,
5340 * return associated SCSI request.
5341 * @ioc: Pointer to MPT_ADAPTER structure
5342 * @sleepFlag: Indicates if sleep or schedule must be called.
5343 *
5344 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
5345 * or a non-interrupt thread. In the former, must not call schedule().
5346 *
5347 * Remark: A return of -1 is a FATAL error case, as it means a
5348 * FW reload/initialization failed.
5349 *
5350 * Returns 0 for SUCCESS or -1 if FAILED.
5351 */
5352int
5353mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
5354{
5355 int rc;
5356 unsigned long flags;
5357
5358 dtmprintk((MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name));
5359#ifdef MFCNT
5360 printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name);
5361 printk("MF count 0x%x !\n", ioc->mfcnt);
5362#endif
5363
5364 /* Reset the adapter. Prevent more than 1 call to
5365 * mpt_do_ioc_recovery at any instant in time.
5366 */
5367 spin_lock_irqsave(&ioc->diagLock, flags);
5368 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){
5369 spin_unlock_irqrestore(&ioc->diagLock, flags);
5370 return 0;
5371 } else {
5372 ioc->diagPending = 1;
5373 }
5374 spin_unlock_irqrestore(&ioc->diagLock, flags);
5375
5376 /* FIXME: If do_ioc_recovery fails, repeat....
5377 */
5378
5379 /* The SCSI driver needs to adjust timeouts on all current
5380 * commands prior to the diagnostic reset being issued.
5381 * Prevents timeouts occuring during a diagnostic reset...very bad.
5382 * For all other protocol drivers, this is a no-op.
5383 */
5384 {
5385 int ii;
5386 int r = 0;
5387
5388 for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
5389 if (MptResetHandlers[ii]) {
5390 dtmprintk((MYIOC_s_INFO_FMT "Calling IOC reset_setup handler #%d\n",
5391 ioc->name, ii));
5392 r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_SETUP_RESET);
5393 if (ioc->alt_ioc) {
5394 dtmprintk((MYIOC_s_INFO_FMT "Calling alt-%s setup reset handler #%d\n",
5395 ioc->name, ioc->alt_ioc->name, ii));
5396 r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_SETUP_RESET);
5397 }
5398 }
5399 }
5400 }
5401
5402 if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) {
5403 printk(KERN_WARNING MYNAM ": WARNING - (%d) Cannot recover %s\n",
5404 rc, ioc->name);
5405 }
5406 ioc->reload_fw = 0;
5407 if (ioc->alt_ioc)
5408 ioc->alt_ioc->reload_fw = 0;
5409
5410 spin_lock_irqsave(&ioc->diagLock, flags);
5411 ioc->diagPending = 0;
5412 if (ioc->alt_ioc)
5413 ioc->alt_ioc->diagPending = 0;
5414 spin_unlock_irqrestore(&ioc->diagLock, flags);
5415
5416 dtmprintk((MYIOC_s_INFO_FMT "HardResetHandler rc = %d!\n", ioc->name, rc));
5417
5418 return rc;
5419}
5420
5421/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5422static char *
5423EventDescriptionStr(u8 event, u32 evData0)
5424{
5425 char *ds;
5426
5427 switch(event) {
5428 case MPI_EVENT_NONE:
5429 ds = "None";
5430 break;
5431 case MPI_EVENT_LOG_DATA:
5432 ds = "Log Data";
5433 break;
5434 case MPI_EVENT_STATE_CHANGE:
5435 ds = "State Change";
5436 break;
5437 case MPI_EVENT_UNIT_ATTENTION:
5438 ds = "Unit Attention";
5439 break;
5440 case MPI_EVENT_IOC_BUS_RESET:
5441 ds = "IOC Bus Reset";
5442 break;
5443 case MPI_EVENT_EXT_BUS_RESET:
5444 ds = "External Bus Reset";
5445 break;
5446 case MPI_EVENT_RESCAN:
5447 ds = "Bus Rescan Event";
5448 /* Ok, do we need to do anything here? As far as
5449 I can tell, this is when a new device gets added
5450 to the loop. */
5451 break;
5452 case MPI_EVENT_LINK_STATUS_CHANGE:
5453 if (evData0 == MPI_EVENT_LINK_STATUS_FAILURE)
5454 ds = "Link Status(FAILURE) Change";
5455 else
5456 ds = "Link Status(ACTIVE) Change";
5457 break;
5458 case MPI_EVENT_LOOP_STATE_CHANGE:
5459 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
5460 ds = "Loop State(LIP) Change";
5461 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
5462 ds = "Loop State(LPE) Change"; /* ??? */
5463 else
5464 ds = "Loop State(LPB) Change"; /* ??? */
5465 break;
5466 case MPI_EVENT_LOGOUT:
5467 ds = "Logout";
5468 break;
5469 case MPI_EVENT_EVENT_CHANGE:
5470 if (evData0)
5471 ds = "Events(ON) Change";
5472 else
5473 ds = "Events(OFF) Change";
5474 break;
5475 case MPI_EVENT_INTEGRATED_RAID:
5476 ds = "Integrated Raid";
5477 break;
5478 /*
5479 * MPT base "custom" events may be added here...
5480 */
5481 default:
5482 ds = "Unknown";
5483 break;
5484 }
5485 return ds;
5486}
5487
5488/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5489/*
5490 * ProcessEventNotification - Route a received EventNotificationReply to
5491 * all currently regeistered event handlers.
5492 * @ioc: Pointer to MPT_ADAPTER structure
5493 * @pEventReply: Pointer to EventNotification reply frame
5494 * @evHandlers: Pointer to integer, number of event handlers
5495 *
5496 * Returns sum of event handlers return values.
5497 */
5498static int
5499ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply, int *evHandlers)
5500{
5501 u16 evDataLen;
5502 u32 evData0 = 0;
5503// u32 evCtx;
5504 int ii;
5505 int r = 0;
5506 int handlers = 0;
5507 char *evStr;
5508 u8 event;
5509
5510 /*
5511 * Do platform normalization of values
5512 */
5513 event = le32_to_cpu(pEventReply->Event) & 0xFF;
5514// evCtx = le32_to_cpu(pEventReply->EventContext);
5515 evDataLen = le16_to_cpu(pEventReply->EventDataLength);
5516 if (evDataLen) {
5517 evData0 = le32_to_cpu(pEventReply->Data[0]);
5518 }
5519
5520 evStr = EventDescriptionStr(event, evData0);
5521 devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n",
5522 ioc->name,
5523 evStr,
5524 event));
5525
5526#if defined(MPT_DEBUG) || defined(MPT_DEBUG_EVENTS)
5527 printk(KERN_INFO MYNAM ": Event data:\n" KERN_INFO);
5528 for (ii = 0; ii < evDataLen; ii++)
5529 printk(" %08x", le32_to_cpu(pEventReply->Data[ii]));
5530 printk("\n");
5531#endif
5532
5533 /*
5534 * Do general / base driver event processing
5535 */
5536 switch(event) {
5537 case MPI_EVENT_NONE: /* 00 */
5538 case MPI_EVENT_LOG_DATA: /* 01 */
5539 case MPI_EVENT_STATE_CHANGE: /* 02 */
5540 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
5541 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
5542 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
5543 case MPI_EVENT_RESCAN: /* 06 */
5544 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
5545 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
5546 case MPI_EVENT_LOGOUT: /* 09 */
5547 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
5548 case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: /* 0C */
5549 default:
5550 break;
5551 case MPI_EVENT_EVENT_CHANGE: /* 0A */
5552 if (evDataLen) {
5553 u8 evState = evData0 & 0xFF;
5554
5555 /* CHECKME! What if evState unexpectedly says OFF (0)? */
5556
5557 /* Update EventState field in cached IocFacts */
5558 if (ioc->facts.Function) {
5559 ioc->facts.EventState = evState;
5560 }
5561 }
5562 break;
5563 }
5564
5565 /*
5566 * Should this event be logged? Events are written sequentially.
5567 * When buffer is full, start again at the top.
5568 */
5569 if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
5570 int idx;
5571
5572 idx = ioc->eventContext % ioc->eventLogSize;
5573
5574 ioc->events[idx].event = event;
5575 ioc->events[idx].eventContext = ioc->eventContext;
5576
5577 for (ii = 0; ii < 2; ii++) {
5578 if (ii < evDataLen)
5579 ioc->events[idx].data[ii] = le32_to_cpu(pEventReply->Data[ii]);
5580 else
5581 ioc->events[idx].data[ii] = 0;
5582 }
5583
5584 ioc->eventContext++;
5585 }
5586
5587
5588 /*
5589 * Call each currently registered protocol event handler.
5590 */
5591 for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
5592 if (MptEvHandlers[ii]) {
5593 devtprintk((MYIOC_s_INFO_FMT "Routing Event to event handler #%d\n",
5594 ioc->name, ii));
5595 r += (*(MptEvHandlers[ii]))(ioc, pEventReply);
5596 handlers++;
5597 }
5598 }
5599 /* FIXME? Examine results here? */
5600
5601 /*
5602 * If needed, send (a single) EventAck.
5603 */
5604 if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) {
5605 if ((ii = SendEventAck(ioc, pEventReply)) != 0) {
5606 devtprintk((MYIOC_s_WARN_FMT "SendEventAck returned %d\n",
5607 ioc->name, ii));
5608 }
5609 }
5610
5611 *evHandlers = handlers;
5612 return r;
5613}
5614
5615/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5616/*
5617 * mpt_fc_log_info - Log information returned from Fibre Channel IOC.
5618 * @ioc: Pointer to MPT_ADAPTER structure
5619 * @log_info: U32 LogInfo reply word from the IOC
5620 *
5621 * Refer to lsi/fc_log.h.
5622 */
5623static void
5624mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
5625{
5626 static char *subcl_str[8] = {
5627 "FCP Initiator", "FCP Target", "LAN", "MPI Message Layer",
5628 "FC Link", "Context Manager", "Invalid Field Offset", "State Change Info"
5629 };
5630 u8 subcl = (log_info >> 24) & 0x7;
5631
5632 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubCl={%s}\n",
5633 ioc->name, log_info, subcl_str[subcl]);
5634}
5635
5636/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5637/*
5638 * mpt_sp_log_info - Log information returned from SCSI Parallel IOC.
5639 * @ioc: Pointer to MPT_ADAPTER structure
5640 * @mr: Pointer to MPT reply frame
5641 * @log_info: U32 LogInfo word from the IOC
5642 *
5643 * Refer to lsi/sp_log.h.
5644 */
5645static void
5646mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
5647{
5648 u32 info = log_info & 0x00FF0000;
5649 char *desc = "unknown";
5650
5651 switch (info) {
5652 case 0x00010000:
5653 desc = "bug! MID not found";
5654 if (ioc->reload_fw == 0)
5655 ioc->reload_fw++;
5656 break;
5657
5658 case 0x00020000:
5659 desc = "Parity Error";
5660 break;
5661
5662 case 0x00030000:
5663 desc = "ASYNC Outbound Overrun";
5664 break;
5665
5666 case 0x00040000:
5667 desc = "SYNC Offset Error";
5668 break;
5669
5670 case 0x00050000:
5671 desc = "BM Change";
5672 break;
5673
5674 case 0x00060000:
5675 desc = "Msg In Overflow";
5676 break;
5677
5678 case 0x00070000:
5679 desc = "DMA Error";
5680 break;
5681
5682 case 0x00080000:
5683 desc = "Outbound DMA Overrun";
5684 break;
5685
5686 case 0x00090000:
5687 desc = "Task Management";
5688 break;
5689
5690 case 0x000A0000:
5691 desc = "Device Problem";
5692 break;
5693
5694 case 0x000B0000:
5695 desc = "Invalid Phase Change";
5696 break;
5697
5698 case 0x000C0000:
5699 desc = "Untagged Table Size";
5700 break;
5701
5702 }
5703
5704 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
5705}
5706
5707/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5708/*
5709 * mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
5710 * @ioc: Pointer to MPT_ADAPTER structure
5711 * @ioc_status: U32 IOCStatus word from IOC
5712 * @mf: Pointer to MPT request frame
5713 *
5714 * Refer to lsi/mpi.h.
5715 */
5716static void
5717mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
5718{
5719 u32 status = ioc_status & MPI_IOCSTATUS_MASK;
5720 char *desc = "";
5721
5722 switch (status) {
5723 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
5724 desc = "Invalid Function";
5725 break;
5726
5727 case MPI_IOCSTATUS_BUSY: /* 0x0002 */
5728 desc = "Busy";
5729 break;
5730
5731 case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
5732 desc = "Invalid SGL";
5733 break;
5734
5735 case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
5736 desc = "Internal Error";
5737 break;
5738
5739 case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
5740 desc = "Reserved";
5741 break;
5742
5743 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
5744 desc = "Insufficient Resources";
5745 break;
5746
5747 case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
5748 desc = "Invalid Field";
5749 break;
5750
5751 case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
5752 desc = "Invalid State";
5753 break;
5754
5755 case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */
5756 case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */
5757 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */
5758 case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */
5759 case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */
5760 case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */
5761 /* No message for Config IOCStatus values */
5762 break;
5763
5764 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
5765 /* No message for recovered error
5766 desc = "SCSI Recovered Error";
5767 */
5768 break;
5769
5770 case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
5771 desc = "SCSI Invalid Bus";
5772 break;
5773
5774 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
5775 desc = "SCSI Invalid TargetID";
5776 break;
5777
5778 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
5779 {
5780 SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
5781 U8 cdb = pScsiReq->CDB[0];
5782 if (cdb != 0x12) { /* Inquiry is issued for device scanning */
5783 desc = "SCSI Device Not There";
5784 }
5785 break;
5786 }
5787
5788 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
5789 desc = "SCSI Data Overrun";
5790 break;
5791
5792 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
5793 /* This error is checked in scsi_io_done(). Skip.
5794 desc = "SCSI Data Underrun";
5795 */
5796 break;
5797
5798 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
5799 desc = "SCSI I/O Data Error";
5800 break;
5801
5802 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
5803 desc = "SCSI Protocol Error";
5804 break;
5805
5806 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
5807 desc = "SCSI Task Terminated";
5808 break;
5809
5810 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
5811 desc = "SCSI Residual Mismatch";
5812 break;
5813
5814 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
5815 desc = "SCSI Task Management Failed";
5816 break;
5817
5818 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
5819 desc = "SCSI IOC Terminated";
5820 break;
5821
5822 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
5823 desc = "SCSI Ext Terminated";
5824 break;
5825
5826 default:
5827 desc = "Others";
5828 break;
5829 }
5830 if (desc != "")
5831 printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc);
5832}
5833
5834/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5835EXPORT_SYMBOL(ioc_list);
5836EXPORT_SYMBOL(mpt_proc_root_dir);
5837EXPORT_SYMBOL(mpt_register);
5838EXPORT_SYMBOL(mpt_deregister);
5839EXPORT_SYMBOL(mpt_event_register);
5840EXPORT_SYMBOL(mpt_event_deregister);
5841EXPORT_SYMBOL(mpt_reset_register);
5842EXPORT_SYMBOL(mpt_reset_deregister);
5843EXPORT_SYMBOL(mpt_device_driver_register);
5844EXPORT_SYMBOL(mpt_device_driver_deregister);
5845EXPORT_SYMBOL(mpt_get_msg_frame);
5846EXPORT_SYMBOL(mpt_put_msg_frame);
5847EXPORT_SYMBOL(mpt_free_msg_frame);
5848EXPORT_SYMBOL(mpt_add_sge);
5849EXPORT_SYMBOL(mpt_send_handshake_request);
5850EXPORT_SYMBOL(mpt_verify_adapter);
5851EXPORT_SYMBOL(mpt_GetIocState);
5852EXPORT_SYMBOL(mpt_print_ioc_summary);
5853EXPORT_SYMBOL(mpt_lan_index);
5854EXPORT_SYMBOL(mpt_stm_index);
5855EXPORT_SYMBOL(mpt_HardResetHandler);
5856EXPORT_SYMBOL(mpt_config);
5857EXPORT_SYMBOL(mpt_toolbox);
5858EXPORT_SYMBOL(mpt_findImVolumes);
5859EXPORT_SYMBOL(mpt_read_ioc_pg_3);
5860EXPORT_SYMBOL(mpt_alloc_fw_memory);
5861EXPORT_SYMBOL(mpt_free_fw_memory);
5862
5863static struct pci_driver mptbase_driver = {
5864 .name = "mptbase",
5865 .id_table = mptbase_pci_table,
5866 .probe = mptbase_probe,
5867 .remove = __devexit_p(mptbase_remove),
5868 .driver = {
5869 .shutdown = mptbase_shutdown,
5870 },
5871#ifdef CONFIG_PM
5872 .suspend = mptbase_suspend,
5873 .resume = mptbase_resume,
5874#endif
5875};
5876
5877/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5878/*
5879 * fusion_init - Fusion MPT base driver initialization routine.
5880 *
5881 * Returns 0 for success, non-zero for failure.
5882 */
5883static int __init
5884fusion_init(void)
5885{
5886 int i;
5887 int r;
5888
5889 show_mptmod_ver(my_NAME, my_VERSION);
5890 printk(KERN_INFO COPYRIGHT "\n");
5891
5892 for (i = 0; i < MPT_MAX_PROTOCOL_DRIVERS; i++) {
5893 MptCallbacks[i] = NULL;
5894 MptDriverClass[i] = MPTUNKNOWN_DRIVER;
5895 MptEvHandlers[i] = NULL;
5896 MptResetHandlers[i] = NULL;
5897 }
5898
5899 /* NEW! 20010120 -sralston
5900 * Register ourselves (mptbase) in order to facilitate
5901 * EventNotification handling.
5902 */
5903 mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER);
5904
5905 /* Register for hard reset handling callbacks.
5906 */
5907 if (mpt_reset_register(mpt_base_index, mpt_ioc_reset) == 0) {
5908 dprintk((KERN_INFO MYNAM ": Register for IOC reset notification\n"));
5909 } else {
5910 /* FIXME! */
5911 }
5912
5913#ifdef CONFIG_PROC_FS
5914 (void) procmpt_create();
5915#endif
5916 r = pci_register_driver(&mptbase_driver);
5917 if(r)
5918 return(r);
5919
5920 return r;
5921}
5922
5923/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5924/*
5925 * fusion_exit - Perform driver unload cleanup.
5926 *
5927 * This routine frees all resources associated with each MPT adapter
5928 * and removes all %MPT_PROCFS_MPTBASEDIR entries.
5929 */
5930static void __exit
5931fusion_exit(void)
5932{
5933
5934 dexitprintk((KERN_INFO MYNAM ": fusion_exit() called!\n"));
5935
5936 pci_unregister_driver(&mptbase_driver);
5937 mpt_reset_deregister(mpt_base_index);
5938
5939#ifdef CONFIG_PROC_FS
5940 procmpt_destroy();
5941#endif
5942}
5943
5944
5945module_init(fusion_init);
5946module_exit(fusion_exit);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
new file mode 100644
index 000000000000..be673070bc34
--- /dev/null
+++ b/drivers/message/fusion/mptbase.h
@@ -0,0 +1,1021 @@
1/*
2 * linux/drivers/message/fusion/mptbase.h
3 * High performance SCSI + LAN / Fibre Channel device drivers.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7 *
8 * Credits:
9 * (see mptbase.c)
10 *
11 * Copyright (c) 1999-2004 LSI Logic Corporation
12 * Originally By: Steven J. Ralston
13 * (mailto:sjralston1@netscape.net)
14 * (mailto:mpt_linux_developer@lsil.com)
15 *
16 * $Id: mptbase.h,v 1.144 2003/01/28 21:31:56 pdelaney Exp $
17 */
18/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
19/*
20 This program is free software; you can redistribute it and/or modify
21 it under the terms of the GNU General Public License as published by
22 the Free Software Foundation; version 2 of the License.
23
24 This program is distributed in the hope that it will be useful,
25 but WITHOUT ANY WARRANTY; without even the implied warranty of
26 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 GNU General Public License for more details.
28
29 NO WARRANTY
30 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
31 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
32 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
33 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
34 solely responsible for determining the appropriateness of using and
35 distributing the Program and assumes all risks associated with its
36 exercise of rights under this Agreement, including but not limited to
37 the risks and costs of program errors, damage to or loss of data,
38 programs or equipment, and unavailability or interruption of operations.
39
40 DISCLAIMER OF LIABILITY
41 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
42 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
44 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
45 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
46 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
47 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
48
49 You should have received a copy of the GNU General Public License
50 along with this program; if not, write to the Free Software
51 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
52*/
53
54#ifndef MPTBASE_H_INCLUDED
55#define MPTBASE_H_INCLUDED
56/*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
57
58#include <linux/version.h>
59#include <linux/config.h>
60#include <linux/kernel.h>
61#include <linux/pci.h>
62
63#include "lsi/mpi_type.h"
64#include "lsi/mpi.h" /* Fusion MPI(nterface) basic defs */
65#include "lsi/mpi_ioc.h" /* Fusion MPT IOC(ontroller) defs */
66#include "lsi/mpi_cnfg.h" /* IOC configuration support */
67#include "lsi/mpi_init.h" /* SCSI Host (initiator) protocol support */
68#include "lsi/mpi_lan.h" /* LAN over FC protocol support */
69#include "lsi/mpi_raid.h" /* Integrated Mirroring support */
70
71#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */
72#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */
73#include "lsi/mpi_tool.h" /* Tools support */
74#include "lsi/fc_log.h"
75
76/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
77
78#ifndef MODULEAUTHOR
79#define MODULEAUTHOR "LSI Logic Corporation"
80#endif
81
82#ifndef COPYRIGHT
83#define COPYRIGHT "Copyright (c) 1999-2004 " MODULEAUTHOR
84#endif
85
86#define MPT_LINUX_VERSION_COMMON "3.01.20"
87#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.01.20"
88#define WHAT_MAGIC_STRING "@" "(" "#" ")"
89
90#define show_mptmod_ver(s,ver) \
91 printk(KERN_INFO "%s %s\n", s, ver);
92
93/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
94/*
95 * Fusion MPT(linux) driver configurable stuff...
96 */
97#define MPT_MAX_ADAPTERS 18
98#define MPT_MAX_PROTOCOL_DRIVERS 16
99#define MPT_MAX_BUS 1 /* Do not change */
100#define MPT_MAX_FC_DEVICES 255
101#define MPT_MAX_SCSI_DEVICES 16
102#define MPT_LAST_LUN 255
103#define MPT_SENSE_BUFFER_ALLOC 64
104 /* allow for 256 max sense alloc, but only 255 max request */
105#if MPT_SENSE_BUFFER_ALLOC >= 256
106# undef MPT_SENSE_BUFFER_ALLOC
107# define MPT_SENSE_BUFFER_ALLOC 256
108# define MPT_SENSE_BUFFER_SIZE 255
109#else
110# define MPT_SENSE_BUFFER_SIZE MPT_SENSE_BUFFER_ALLOC
111#endif
112
113#define MPT_NAME_LENGTH 32
114
115#define MPT_PROCFS_MPTBASEDIR "mpt"
116 /* chg it to "driver/fusion" ? */
117#define MPT_PROCFS_SUMMARY_ALL_NODE MPT_PROCFS_MPTBASEDIR "/summary"
118#define MPT_PROCFS_SUMMARY_ALL_PATHNAME "/proc/" MPT_PROCFS_SUMMARY_ALL_NODE
119#define MPT_FW_REV_MAGIC_ID_STRING "FwRev="
120
121#define MPT_MAX_REQ_DEPTH 1023
122#define MPT_DEFAULT_REQ_DEPTH 256
123#define MPT_MIN_REQ_DEPTH 128
124
125#define MPT_MAX_REPLY_DEPTH MPT_MAX_REQ_DEPTH
126#define MPT_DEFAULT_REPLY_DEPTH 128
127#define MPT_MIN_REPLY_DEPTH 8
128#define MPT_MAX_REPLIES_PER_ISR 32
129
130#define MPT_MAX_FRAME_SIZE 128
131#define MPT_DEFAULT_FRAME_SIZE 128
132
133#define MPT_REPLY_FRAME_SIZE 0x40 /* Must be a multiple of 8 */
134
135#define MPT_SG_REQ_128_SCALE 1
136#define MPT_SG_REQ_96_SCALE 2
137#define MPT_SG_REQ_64_SCALE 4
138
139#define CAN_SLEEP 1
140#define NO_SLEEP 0
141
142#define MPT_COALESCING_TIMEOUT 0x10
143
144/*
145 * SCSI transfer rate defines.
146 */
147#define MPT_ULTRA320 0x08
148#define MPT_ULTRA160 0x09
149#define MPT_ULTRA2 0x0A
150#define MPT_ULTRA 0x0C
151#define MPT_FAST 0x19
152#define MPT_SCSI 0x32
153#define MPT_ASYNC 0xFF
154
155#define MPT_NARROW 0
156#define MPT_WIDE 1
157
158#define C0_1030 0x08
159#define XL_929 0x01
160
161
162/*
163 * Try to keep these at 2^N-1
164 */
165#define MPT_FC_CAN_QUEUE 127
166#define MPT_SCSI_CAN_QUEUE 127
167
168/*
169 * Set the MAX_SGE value based on user input.
170 */
171#ifdef CONFIG_FUSION_MAX_SGE
172#if CONFIG_FUSION_MAX_SGE < 16
173#define MPT_SCSI_SG_DEPTH 16
174#elif CONFIG_FUSION_MAX_SGE > 128
175#define MPT_SCSI_SG_DEPTH 128
176#else
177#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE
178#endif
179#else
180#define MPT_SCSI_SG_DEPTH 40
181#endif
182
183#ifdef __KERNEL__ /* { */
184/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
185
186#include <linux/proc_fs.h>
187
188/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
189/*
190 * Attempt semi-consistent error & warning msgs across
191 * MPT drivers. NOTE: Users of these macro defs must
192 * themselves define their own MYNAM.
193 */
194#define MYIOC_s_INFO_FMT KERN_INFO MYNAM ": %s: "
195#define MYIOC_s_NOTE_FMT KERN_NOTICE MYNAM ": %s: "
196#define MYIOC_s_WARN_FMT KERN_WARNING MYNAM ": %s: WARNING - "
197#define MYIOC_s_ERR_FMT KERN_ERR MYNAM ": %s: ERROR - "
198
199/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
200/*
201 * MPT protocol driver defs...
202 */
203typedef enum {
204 MPTBASE_DRIVER, /* MPT base class */
205 MPTCTL_DRIVER, /* MPT ioctl class */
206 MPTSCSIH_DRIVER, /* MPT SCSI host (initiator) class */
207 MPTLAN_DRIVER, /* MPT LAN class */
208 MPTSTM_DRIVER, /* MPT SCSI target mode class */
209 MPTUNKNOWN_DRIVER
210} MPT_DRIVER_CLASS;
211
212struct mpt_pci_driver{
213 int (*probe) (struct pci_dev *dev, const struct pci_device_id *id);
214 void (*remove) (struct pci_dev *dev);
215 void (*shutdown) (struct device * dev);
216#ifdef CONFIG_PM
217 int (*resume) (struct pci_dev *dev);
218 int (*suspend) (struct pci_dev *dev, u32 state);
219#endif
220};
221
222/*
223 * MPT adapter / port / bus / device info structures...
224 */
225
226typedef union _MPT_FRAME_TRACKER {
227 struct {
228 struct list_head list;
229 u32 arg1;
230 u32 pad;
231 void *argp1;
232 } linkage;
233 /*
234 * NOTE: When request frames are free, on the linkage structure
235 * contets are valid. All other values are invalid.
236 * In particular, do NOT reply on offset [2]
237 * (in words) being the * message context.
238 * The message context must be reset (computed via base address
239 * + an offset) prior to issuing any command.
240 *
241 * NOTE2: On non-32-bit systems, where pointers are LARGE,
242 * using the linkage pointers destroys our sacred MsgContext
243 * field contents. But we don't care anymore because these
244 * are now reset in mpt_put_msg_frame() just prior to sending
245 * a request off to the IOC.
246 */
247 struct {
248 u32 __hdr[2];
249 /*
250 * The following _MUST_ match the location of the
251 * MsgContext field in the MPT message headers.
252 */
253 union {
254 u32 MsgContext;
255 struct {
256 u16 req_idx; /* Request index */
257 u8 cb_idx; /* callback function index */
258 u8 rsvd;
259 } fld;
260 } msgctxu;
261 } hwhdr;
262 /*
263 * Remark: 32 bit identifier:
264 * 31-24: reserved
265 * 23-16: call back index
266 * 15-0 : request index
267 */
268} MPT_FRAME_TRACKER;
269
270/*
271 * We might want to view/access a frame as:
272 * 1) generic request header
273 * 2) SCSIIORequest
274 * 3) SCSIIOReply
275 * 4) MPIDefaultReply
276 * 5) frame tracker
277 */
278typedef struct _MPT_FRAME_HDR {
279 union {
280 MPIHeader_t hdr;
281 SCSIIORequest_t scsireq;
282 SCSIIOReply_t sreply;
283 ConfigReply_t configreply;
284 MPIDefaultReply_t reply;
285 MPT_FRAME_TRACKER frame;
286 } u;
287} MPT_FRAME_HDR;
288
289#define MPT_REQ_MSGFLAGS_DROPME 0x80
290
291typedef struct _MPT_SGL_HDR {
292 SGESimple32_t sge[1];
293} MPT_SGL_HDR;
294
295typedef struct _MPT_SGL64_HDR {
296 SGESimple64_t sge[1];
297} MPT_SGL64_HDR;
298
299/*
300 * System interface register set
301 */
302
303typedef struct _SYSIF_REGS
304{
305 u32 Doorbell; /* 00 System<->IOC Doorbell reg */
306 u32 WriteSequence; /* 04 Write Sequence register */
307 u32 Diagnostic; /* 08 Diagnostic register */
308 u32 TestBase; /* 0C Test Base Address */
309 u32 DiagRwData; /* 10 Read Write Data (fw download) */
310 u32 DiagRwAddress; /* 14 Read Write Address (fw download)*/
311 u32 Reserved1[6]; /* 18-2F reserved for future use */
312 u32 IntStatus; /* 30 Interrupt Status */
313 u32 IntMask; /* 34 Interrupt Mask */
314 u32 Reserved2[2]; /* 38-3F reserved for future use */
315 u32 RequestFifo; /* 40 Request Post/Free FIFO */
316 u32 ReplyFifo; /* 44 Reply Post/Free FIFO */
317 u32 Reserved3[2]; /* 48-4F reserved for future use */
318 u32 HostIndex; /* 50 Host Index register */
319 u32 Reserved4[15]; /* 54-8F */
320 u32 Fubar; /* 90 For Fubar usage */
321 u32 Reserved5[27]; /* 94-FF */
322} SYSIF_REGS;
323
324/*
325 * NOTE: Use MPI_{DOORBELL,WRITESEQ,DIAG}_xxx defs in lsi/mpi.h
326 * in conjunction with SYSIF_REGS accesses!
327 */
328
329
330/*
331 * Dynamic Multi-Pathing specific stuff...
332 */
333
334/* VirtDevice negoFlags field */
335#define MPT_TARGET_NO_NEGO_WIDE 0x01
336#define MPT_TARGET_NO_NEGO_SYNC 0x02
337#define MPT_TARGET_NO_NEGO_QAS 0x04
338#define MPT_TAPE_NEGO_IDP 0x08
339
340/*
341 * VirtDevice - FC LUN device or SCSI target device
342 */
343typedef struct _VirtDevice {
344 struct scsi_device *device;
345 u8 tflags;
346 u8 ioc_id;
347 u8 target_id;
348 u8 bus_id;
349 u8 minSyncFactor; /* 0xFF is async */
350 u8 maxOffset; /* 0 if async */
351 u8 maxWidth; /* 0 if narrow, 1 if wide */
352 u8 negoFlags; /* bit field, see above */
353 u8 raidVolume; /* set, if RAID Volume */
354 u8 type; /* byte 0 of Inquiry data */
355 u8 cflags; /* controller flags */
356 u8 rsvd1raid;
357 u16 fc_phys_lun;
358 u16 fc_xlat_lun;
359 u32 num_luns;
360 u32 luns[8]; /* Max LUNs is 256 */
361 u8 pad[4];
362 u8 inq_data[8];
363 /* IEEE Registered Extended Identifier
364 obtained via INQUIRY VPD page 0x83 */
365 /* NOTE: Do not separate uniq_prepad and uniq_data
366 as they are treateed as a single entity in the code */
367 u8 uniq_prepad[8];
368 u8 uniq_data[20];
369 u8 pad2[4];
370} VirtDevice;
371
372/*
373 * Fibre Channel (SCSI) target device and associated defines...
374 */
375#define MPT_TARGET_DEFAULT_DV_STATUS 0x00
376#define MPT_TARGET_FLAGS_VALID_NEGO 0x01
377#define MPT_TARGET_FLAGS_VALID_INQUIRY 0x02
378#define MPT_TARGET_FLAGS_Q_YES 0x08
379#define MPT_TARGET_FLAGS_VALID_56 0x10
380#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20
381
382/*
383 * /proc/mpt interface
384 */
385typedef struct {
386 const char *name;
387 mode_t mode;
388 int pad;
389 read_proc_t *read_proc;
390 write_proc_t *write_proc;
391} mpt_proc_entry_t;
392
393#define MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len) \
394do { \
395 len -= offset; \
396 if (len < request) { \
397 *eof = 1; \
398 if (len <= 0) \
399 return 0; \
400 } else \
401 len = request; \
402 *start = buf + offset; \
403 return len; \
404} while (0)
405
406
407/*
408 * IOCTL structure and associated defines
409 */
410
411#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/
412#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
413#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */
414#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */
415#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
416#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */
417#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */
418
419#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */
420
421typedef struct _MPT_IOCTL {
422 struct _MPT_ADAPTER *ioc;
423 u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
424 u8 sense[MPT_SENSE_BUFFER_ALLOC];
425 int wait_done; /* wake-up value for this ioc */
426 u8 rsvd;
427 u8 status; /* current command status */
428 u8 reset; /* 1 if bus reset allowed */
429 u8 target; /* target for reset */
430 struct semaphore sem_ioc;
431} MPT_IOCTL;
432
433/*
434 * Event Structure and define
435 */
436#define MPTCTL_EVENT_LOG_SIZE (0x0000000A)
437typedef struct _mpt_ioctl_events {
438 u32 event; /* Specified by define above */
439 u32 eventContext; /* Index or counter */
440 int data[2]; /* First 8 bytes of Event Data */
441} MPT_IOCTL_EVENTS;
442
443/*
444 * CONFIGPARM status defines
445 */
446#define MPT_CONFIG_GOOD MPI_IOCSTATUS_SUCCESS
447#define MPT_CONFIG_ERROR 0x002F
448
449/*
450 * Substructure to store SCSI specific configuration page data
451 */
452 /* dvStatus defines: */
453#define MPT_SCSICFG_NEGOTIATE 0x01 /* Negotiate on next IO */
454#define MPT_SCSICFG_NEED_DV 0x02 /* Schedule DV */
455#define MPT_SCSICFG_DV_PENDING 0x04 /* DV on this physical id pending */
456#define MPT_SCSICFG_DV_NOT_DONE 0x08 /* DV has not been performed */
457#define MPT_SCSICFG_BLK_NEGO 0x10 /* WriteSDP1 with WDTR and SDTR disabled */
458#define MPT_SCSICFG_RELOAD_IOC_PG3 0x20 /* IOC Pg 3 data is obsolete */
459 /* Args passed to writeSDP1: */
460#define MPT_SCSICFG_USE_NVRAM 0x01 /* WriteSDP1 using NVRAM */
461#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */
462/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */
463
464typedef struct _ScsiCfgData {
465 u32 PortFlags;
466 int *nvram; /* table of device NVRAM values */
467 IOCPage2_t *pIocPg2; /* table of Raid Volumes */
468 IOCPage3_t *pIocPg3; /* table of physical disks */
469 IOCPage4_t *pIocPg4; /* SEP devices addressing */
470 dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */
471 int IocPg4Sz; /* IOCPage4 size */
472 u8 dvStatus[MPT_MAX_SCSI_DEVICES];
473 int isRaid; /* bit field, 1 if RAID */
474 u8 minSyncFactor; /* 0xFF if async */
475 u8 maxSyncOffset; /* 0 if async */
476 u8 maxBusWidth; /* 0 if narrow, 1 if wide */
477 u8 busType; /* SE, LVD, HD */
478 u8 sdp1version; /* SDP1 version */
479 u8 sdp1length; /* SDP1 length */
480 u8 sdp0version; /* SDP0 version */
481 u8 sdp0length; /* SDP0 length */
482 u8 dvScheduled; /* 1 if scheduled */
483 u8 forceDv; /* 1 to force DV scheduling */
484 u8 noQas; /* Disable QAS for this adapter */
485 u8 Saf_Te; /* 1 to force all Processors as SAF-TE if Inquiry data length is too short to check for SAF-TE */
486 u8 rsvd[1];
487} ScsiCfgData;
488
489/*
490 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
491 */
492typedef struct _MPT_ADAPTER
493{
494 int id; /* Unique adapter id N {0,1,2,...} */
495 int pci_irq; /* This irq */
496 char name[MPT_NAME_LENGTH]; /* "iocN" */
497 char *prod_name; /* "LSIFC9x9" */
498 SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */
499 SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */
500 u8 bus_type;
501 u32 mem_phys; /* == f4020000 (mmap) */
502 u32 pio_mem_phys; /* Programmed IO (downloadboot) */
503 int mem_size; /* mmap memory size */
504 int alloc_total;
505 u32 last_state;
506 int active;
507 u8 *alloc; /* frames alloc ptr */
508 dma_addr_t alloc_dma;
509 u32 alloc_sz;
510 MPT_FRAME_HDR *reply_frames; /* Reply msg frames - rounded up! */
511 u32 reply_frames_low_dma;
512 int reply_depth; /* Num Allocated reply frames */
513 int reply_sz; /* Reply frame size */
514 int num_chain; /* Number of chain buffers */
515 /* Pool of buffers for chaining. ReqToChain
516 * and ChainToChain track index of chain buffers.
517 * ChainBuffer (DMA) virt/phys addresses.
518 * FreeChainQ (lock) locking mechanisms.
519 */
520 int *ReqToChain;
521 int *RequestNB;
522 int *ChainToChain;
523 u8 *ChainBuffer;
524 dma_addr_t ChainBufferDMA;
525 struct list_head FreeChainQ;
526 spinlock_t FreeChainQlock;
527 /* We (host driver) get to manage our own RequestQueue! */
528 dma_addr_t req_frames_dma;
529 MPT_FRAME_HDR *req_frames; /* Request msg frames - rounded up! */
530 u32 req_frames_low_dma;
531 int req_depth; /* Number of request frames */
532 int req_sz; /* Request frame size (bytes) */
533 spinlock_t FreeQlock;
534 struct list_head FreeQ;
535 /* Pool of SCSI sense buffers for commands coming from
536 * the SCSI mid-layer. We have one 256 byte sense buffer
537 * for each REQ entry.
538 */
539 u8 *sense_buf_pool;
540 dma_addr_t sense_buf_pool_dma;
541 u32 sense_buf_low_dma;
542 int mtrr_reg;
543 struct pci_dev *pcidev; /* struct pci_dev pointer */
544 u8 __iomem *memmap; /* mmap address */
545 struct Scsi_Host *sh; /* Scsi Host pointer */
546 ScsiCfgData spi_data; /* Scsi config. data */
547 MPT_IOCTL *ioctl; /* ioctl data pointer */
548 struct proc_dir_entry *ioc_dentry;
549 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
550 spinlock_t diagLock; /* diagnostic reset lock */
551 int diagPending;
552 u32 biosVersion; /* BIOS version from IO Unit Page 2 */
553 int eventTypes; /* Event logging parameters */
554 int eventContext; /* Next event context */
555 int eventLogSize; /* Max number of cached events */
556 struct _mpt_ioctl_events *events; /* pointer to event log */
557 u8 *cached_fw; /* Pointer to FW */
558 dma_addr_t cached_fw_dma;
559 struct list_head configQ; /* linked list of config. requests */
560 int hs_reply_idx;
561#ifndef MFCNT
562 u32 pad0;
563#else
564 u32 mfcnt;
565#endif
566 u32 NB_for_64_byte_frame;
567 u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)];
568 u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)];
569 IOCFactsReply_t facts;
570 PortFactsReply_t pfacts[2];
571 FCPortPage0_t fc_port_page0[2];
572 LANPage0_t lan_cnfg_page0;
573 LANPage1_t lan_cnfg_page1;
574 u8 FirstWhoInit;
575 u8 upload_fw; /* If set, do a fw upload */
576 u8 reload_fw; /* Force a FW Reload on next reset */
577 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
578 u8 pad1[4];
579 struct list_head list;
580 struct net_device *netdev;
581} MPT_ADAPTER;
582
583/*
584 * New return value convention:
585 * 1 = Ok to free associated request frame
586 * 0 = not Ok ...
587 */
588typedef int (*MPT_CALLBACK)(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
589typedef int (*MPT_EVHANDLER)(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply);
590typedef int (*MPT_RESETHANDLER)(MPT_ADAPTER *ioc, int reset_phase);
591/* reset_phase defs */
592#define MPT_IOC_PRE_RESET 0
593#define MPT_IOC_POST_RESET 1
594#define MPT_IOC_SETUP_RESET 2
595
596/*
597 * Invent MPT host event (super-set of MPI Events)
598 * Fitted to 1030's 64-byte [max] request frame size
599 */
600typedef struct _MPT_HOST_EVENT {
601 EventNotificationReply_t MpiEvent; /* 8 32-bit words! */
602 u32 pad[6];
603 void *next;
604} MPT_HOST_EVENT;
605
606#define MPT_HOSTEVENT_IOC_BRINGUP 0x91
607#define MPT_HOSTEVENT_IOC_RECOVER 0x92
608
609/* Define the generic types based on the size
610 * of the dma_addr_t type.
611 */
612typedef struct _mpt_sge {
613 u32 FlagsLength;
614 dma_addr_t Address;
615} MptSge_t;
616
617#define mpt_addr_size() \
618 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
619 MPI_SGE_FLAGS_32_BIT_ADDRESSING)
620
621#define mpt_msg_flags() \
622 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
623 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32)
624
625/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
626/*
627 * Funky (private) macros...
628 */
629#ifdef MPT_DEBUG
630#define dprintk(x) printk x
631#else
632#define dprintk(x)
633#endif
634
635#ifdef MPT_DEBUG_INIT
636#define dinitprintk(x) printk x
637#define DBG_DUMP_FW_REQUEST_FRAME(mfp) \
638 { int i, n = 10; \
639 u32 *m = (u32 *)(mfp); \
640 printk(KERN_INFO " "); \
641 for (i=0; i<n; i++) \
642 printk(" %08x", le32_to_cpu(m[i])); \
643 printk("\n"); \
644 }
645#else
646#define dinitprintk(x)
647#define DBG_DUMP_FW_REQUEST_FRAME(mfp)
648#endif
649
650#ifdef MPT_DEBUG_EXIT
651#define dexitprintk(x) printk x
652#else
653#define dexitprintk(x)
654#endif
655
656#if defined MPT_DEBUG_FAIL || defined (MPT_DEBUG_SG)
657#define dfailprintk(x) printk x
658#else
659#define dfailprintk(x)
660#endif
661
662#ifdef MPT_DEBUG_HANDSHAKE
663#define dhsprintk(x) printk x
664#else
665#define dhsprintk(x)
666#endif
667
668#ifdef MPT_DEBUG_EVENTS
669#define devtprintk(x) printk x
670#else
671#define devtprintk(x)
672#endif
673
674#ifdef MPT_DEBUG_RESET
675#define drsprintk(x) printk x
676#else
677#define drsprintk(x)
678#endif
679
680//#if defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME)
681#if defined(MPT_DEBUG_MSG_FRAME)
682#define dmfprintk(x) printk x
683#define DBG_DUMP_REQUEST_FRAME(mfp) \
684 { int i, n = 24; \
685 u32 *m = (u32 *)(mfp); \
686 for (i=0; i<n; i++) { \
687 if (i && ((i%8)==0)) \
688 printk("\n"); \
689 printk("%08x ", le32_to_cpu(m[i])); \
690 } \
691 printk("\n"); \
692 }
693#else
694#define dmfprintk(x)
695#define DBG_DUMP_REQUEST_FRAME(mfp)
696#endif
697
698#ifdef MPT_DEBUG_IRQ
699#define dirqprintk(x) printk x
700#else
701#define dirqprintk(x)
702#endif
703
704#ifdef MPT_DEBUG_SG
705#define dsgprintk(x) printk x
706#else
707#define dsgprintk(x)
708#endif
709
710#if defined(MPT_DEBUG_DL) || defined(MPT_DEBUG)
711#define ddlprintk(x) printk x
712#else
713#define ddlprintk(x)
714#endif
715
716#ifdef MPT_DEBUG_DV
717#define ddvprintk(x) printk x
718#else
719#define ddvprintk(x)
720#endif
721
722#ifdef MPT_DEBUG_NEGO
723#define dnegoprintk(x) printk x
724#else
725#define dnegoprintk(x)
726#endif
727
728#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
729#define ddvtprintk(x) printk x
730#else
731#define ddvtprintk(x)
732#endif
733
734#ifdef MPT_DEBUG_IOCTL
735#define dctlprintk(x) printk x
736#else
737#define dctlprintk(x)
738#endif
739
740#ifdef MPT_DEBUG_REPLY
741#define dreplyprintk(x) printk x
742#else
743#define dreplyprintk(x)
744#endif
745
746#ifdef MPT_DEBUG_TM
747#define dtmprintk(x) printk x
748#define DBG_DUMP_TM_REQUEST_FRAME(mfp) \
749 { u32 *m = (u32 *)(mfp); \
750 int i, n = 13; \
751 printk("TM_REQUEST:\n"); \
752 for (i=0; i<n; i++) { \
753 if (i && ((i%8)==0)) \
754 printk("\n"); \
755 printk("%08x ", le32_to_cpu(m[i])); \
756 } \
757 printk("\n"); \
758 }
759#define DBG_DUMP_TM_REPLY_FRAME(mfp) \
760 { u32 *m = (u32 *)(mfp); \
761 int i, n = (le32_to_cpu(m[0]) & 0x00FF0000) >> 16; \
762 printk("TM_REPLY MessageLength=%d:\n", n); \
763 for (i=0; i<n; i++) { \
764 if (i && ((i%8)==0)) \
765 printk("\n"); \
766 printk(" %08x", le32_to_cpu(m[i])); \
767 } \
768 printk("\n"); \
769 }
770#else
771#define dtmprintk(x)
772#define DBG_DUMP_TM_REQUEST_FRAME(mfp)
773#define DBG_DUMP_TM_REPLY_FRAME(mfp)
774#endif
775
776#ifdef MPT_DEBUG_NEH
777#define nehprintk(x) printk x
778#else
779#define nehprintk(x)
780#endif
781
782#if defined(MPT_DEBUG_CONFIG) || defined(MPT_DEBUG)
783#define dcprintk(x) printk x
784#else
785#define dcprintk(x)
786#endif
787
788#if defined(MPT_DEBUG_SCSI) || defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME)
789#define dsprintk(x) printk x
790#else
791#define dsprintk(x)
792#endif
793
794
795#define MPT_INDEX_2_MFPTR(ioc,idx) \
796 (MPT_FRAME_HDR*)( (u8*)(ioc)->req_frames + (ioc)->req_sz * (idx) )
797
798#define MFPTR_2_MPT_INDEX(ioc,mf) \
799 (int)( ((u8*)mf - (u8*)(ioc)->req_frames) / (ioc)->req_sz )
800
801#define MPT_INDEX_2_RFPTR(ioc,idx) \
802 (MPT_FRAME_HDR*)( (u8*)(ioc)->reply_frames + (ioc)->req_sz * (idx) )
803
804#if defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME)
805#define DBG_DUMP_REPLY_FRAME(mfp) \
806 { u32 *m = (u32 *)(mfp); \
807 int i, n = (le32_to_cpu(m[0]) & 0x00FF0000) >> 16; \
808 printk(KERN_INFO " "); \
809 for (i=0; i<n; i++) \
810 printk(" %08x", le32_to_cpu(m[i])); \
811 printk("\n"); \
812 }
813#define DBG_DUMP_REQUEST_FRAME_HDR(mfp) \
814 { int i, n = 3; \
815 u32 *m = (u32 *)(mfp); \
816 printk(KERN_INFO " "); \
817 for (i=0; i<n; i++) \
818 printk(" %08x", le32_to_cpu(m[i])); \
819 printk("\n"); \
820 }
821#else
822#define DBG_DUMP_REPLY_FRAME(mfp)
823#define DBG_DUMP_REQUEST_FRAME_HDR(mfp)
824#endif
825
826
827/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
828
829#define SCSI_STD_SENSE_BYTES 18
830#define SCSI_STD_INQUIRY_BYTES 36
831#define SCSI_MAX_INQUIRY_BYTES 96
832
833/*
834 * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers
835 * Private to the driver.
836 */
837/* LOCAL structure and fields used when processing
838 * internally generated commands. These include:
839 * bus scan, dv and config requests.
840 */
841typedef struct _MPT_LOCAL_REPLY {
842 ConfigPageHeader_t header;
843 int completion;
844 u8 sense[SCSI_STD_SENSE_BYTES];
845 u8 scsiStatus;
846 u8 skip;
847 u32 pad;
848} MPT_LOCAL_REPLY;
849
850#define MPT_HOST_BUS_UNKNOWN (0xFF)
851#define MPT_HOST_TOO_MANY_TM (0x05)
852#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
853#define MPT_HOST_NO_CHAIN (0xFFFFFFFF)
854#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF)
855#define MPT_NVRAM_SYNC_MASK (0x0000FF00)
856#define MPT_NVRAM_SYNC_SHIFT (8)
857#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000)
858#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000)
859#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000)
860#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000)
861#define MPT_NVRAM_WIDE_DISABLE (0x00100000)
862#define MPT_NVRAM_BOOT_CHOICE (0x00200000)
863
864/* The TM_STATE variable is used to provide strict single threading of TM
865 * requests as well as communicate TM error conditions.
866 */
867#define TM_STATE_NONE (0)
868#define TM_STATE_IN_PROGRESS (1)
869#define TM_STATE_ERROR (2)
870
871typedef enum {
872 FC,
873 SCSI,
874 SAS
875} BUS_TYPE;
876
877typedef struct _MPT_SCSI_HOST {
878 MPT_ADAPTER *ioc;
879 int port;
880 u32 pad0;
881 struct scsi_cmnd **ScsiLookup;
882 VirtDevice **Targets;
883 MPT_LOCAL_REPLY *pLocal; /* used for internal commands */
884 struct timer_list timer;
885 /* Pool of memory for holding SCpnts before doing
886 * OS callbacks. freeQ is the free pool.
887 */
888 u8 tmPending;
889 u8 resetPending;
890 u8 negoNvram; /* DV disabled, nego NVRAM */
891 u8 pad1;
892 u8 tmState;
893 u8 rsvd[2];
894 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
895 struct scsi_cmnd *abortSCpnt;
896 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
897 unsigned long hard_resets; /* driver forced bus resets count */
898 unsigned long soft_resets; /* fw/external bus resets count */
899 unsigned long timeouts; /* cmd timeouts */
900 ushort sel_timeout[MPT_MAX_FC_DEVICES];
901} MPT_SCSI_HOST;
902
903/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
904/*
905 * More Dynamic Multi-Pathing stuff...
906 */
907
908/* Forward decl, a strange C thing, to prevent gcc compiler warnings */
909struct scsi_cmnd;
910
911/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
912/*
913 * Generic structure passed to the base mpt_config function.
914 */
915typedef struct _x_config_parms {
916 struct list_head linkage; /* linked list */
917 struct timer_list timer; /* timer function for this request */
918 ConfigPageHeader_t *hdr;
919 dma_addr_t physAddr;
920 int wait_done; /* wait for this request */
921 u32 pageAddr; /* properly formatted */
922 u8 action;
923 u8 dir;
924 u8 timeout; /* seconds */
925 u8 pad1;
926 u16 status;
927 u16 pad2;
928} CONFIGPARMS;
929
930/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
931/*
932 * Public entry points...
933 */
934extern int mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass);
935extern void mpt_deregister(int cb_idx);
936extern int mpt_event_register(int cb_idx, MPT_EVHANDLER ev_cbfunc);
937extern void mpt_event_deregister(int cb_idx);
938extern int mpt_reset_register(int cb_idx, MPT_RESETHANDLER reset_func);
939extern void mpt_reset_deregister(int cb_idx);
940extern int mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx);
941extern void mpt_device_driver_deregister(int cb_idx);
942extern MPT_FRAME_HDR *mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc);
943extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
944extern void mpt_put_msg_frame(int handle, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
945extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
946
947extern int mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
948extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
949extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
950extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
951extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
952extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
953extern int mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
954extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
955extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
956extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
957extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
958
959/*
960 * Public data decl's...
961 */
962extern struct list_head ioc_list;
963extern struct proc_dir_entry *mpt_proc_root_dir;
964
965extern int mpt_lan_index; /* needed by mptlan.c */
966extern int mpt_stm_index; /* needed by mptstm.c */
967
968/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
969#endif /* } __KERNEL__ */
970
971#if defined(__alpha__) || defined(__sparc_v9__) || defined(__ia64__) || defined(__x86_64__)
972#define CAST_U32_TO_PTR(x) ((void *)(u64)x)
973#define CAST_PTR_TO_U32(x) ((u32)(u64)x)
974#else
975#define CAST_U32_TO_PTR(x) ((void *)x)
976#define CAST_PTR_TO_U32(x) ((u32)x)
977#endif
978
979#define MPT_PROTOCOL_FLAGS_c_c_c_c(pflags) \
980 ((pflags) & MPI_PORTFACTS_PROTOCOL_INITIATOR) ? 'I' : 'i', \
981 ((pflags) & MPI_PORTFACTS_PROTOCOL_TARGET) ? 'T' : 't', \
982 ((pflags) & MPI_PORTFACTS_PROTOCOL_LAN) ? 'L' : 'l', \
983 ((pflags) & MPI_PORTFACTS_PROTOCOL_LOGBUSADDR) ? 'B' : 'b'
984
985/*
986 * Shifted SGE Defines - Use in SGE with FlagsLength member.
987 * Otherwise, use MPI_xxx defines (refer to "lsi/mpi.h" header).
988 * Defaults: 32 bit SGE, SYSTEM_ADDRESS if direction bit is 0, read
989 */
990#define MPT_TRANSFER_IOC_TO_HOST (0x00000000)
991#define MPT_TRANSFER_HOST_TO_IOC (0x04000000)
992#define MPT_SGE_FLAGS_LAST_ELEMENT (0x80000000)
993#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000)
994#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000)
995#define MPT_SGE_FLAGS_DIRECTION (0x04000000)
996#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
997#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000)
998
999#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000)
1000#define MPT_SGE_FLAGS_SIMPLE_ELEMENT (0x10000000)
1001#define MPT_SGE_FLAGS_CHAIN_ELEMENT (0x30000000)
1002#define MPT_SGE_FLAGS_ELEMENT_MASK (0x30000000)
1003
1004#define MPT_SGE_FLAGS_SSIMPLE_READ \
1005 (MPT_SGE_FLAGS_LAST_ELEMENT | \
1006 MPT_SGE_FLAGS_END_OF_BUFFER | \
1007 MPT_SGE_FLAGS_END_OF_LIST | \
1008 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
1009 MPT_SGE_FLAGS_ADDRESSING | \
1010 MPT_TRANSFER_IOC_TO_HOST)
1011#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
1012 (MPT_SGE_FLAGS_LAST_ELEMENT | \
1013 MPT_SGE_FLAGS_END_OF_BUFFER | \
1014 MPT_SGE_FLAGS_END_OF_LIST | \
1015 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
1016 MPT_SGE_FLAGS_ADDRESSING | \
1017 MPT_TRANSFER_HOST_TO_IOC)
1018
1019/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1020#endif
1021
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
new file mode 100644
index 000000000000..70b0cfb5ac5c
--- /dev/null
+++ b/drivers/message/fusion/mptctl.c
@@ -0,0 +1,2878 @@
1/*
2 * linux/drivers/message/fusion/mptctl.c
3 * Fusion MPT misc device (ioctl) driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7 *
8 * Credits:
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
11 *
12 * A special thanks to Pamela Delaney (LSI Logic) for tons of work
13 * and countless enhancements while adding support for the 1030
14 * chip family. Pam has been instrumental in the development of
15 * of the 2.xx.xx series fusion drivers, and her contributions are
16 * far too numerous to hope to list in one place.
17 *
18 * A huge debt of gratitude is owed to David S. Miller (DaveM)
19 * for fixing much of the stupid and broken stuff in the early
20 * driver while porting to sparc64 platform. THANK YOU!
21 *
22 * A big THANKS to Eddie C. Dost for fixing the ioctl path
23 * and most importantly f/w download on sparc64 platform!
24 * (plus Eddie's other helpful hints and insights)
25 *
26 * Thanks to Arnaldo Carvalho de Melo for finding and patching
27 * a potential memory leak in mptctl_do_fw_download(),
28 * and for some kmalloc insight:-)
29 *
30 * (see also mptbase.c)
31 *
32 * Copyright (c) 1999-2004 LSI Logic Corporation
33 * Originally By: Steven J. Ralston, Noah Romer
34 * (mailto:sjralston1@netscape.net)
35 * (mailto:mpt_linux_developer@lsil.com)
36 *
37 * $Id: mptctl.c,v 1.63 2002/12/03 21:26:33 pdelaney Exp $
38 */
39/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
40/*
41 This program is free software; you can redistribute it and/or modify
42 it under the terms of the GNU General Public License as published by
43 the Free Software Foundation; version 2 of the License.
44
45 This program is distributed in the hope that it will be useful,
46 but WITHOUT ANY WARRANTY; without even the implied warranty of
47 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
48 GNU General Public License for more details.
49
50 NO WARRANTY
51 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
52 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
53 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
54 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
55 solely responsible for determining the appropriateness of using and
56 distributing the Program and assumes all risks associated with its
57 exercise of rights under this Agreement, including but not limited to
58 the risks and costs of program errors, damage to or loss of data,
59 programs or equipment, and unavailability or interruption of operations.
60
61 DISCLAIMER OF LIABILITY
62 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
63 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
65 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
66 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
67 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
68 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
69
70 You should have received a copy of the GNU General Public License
71 along with this program; if not, write to the Free Software
72 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
73*/
74/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
75
76#include <linux/version.h>
77#include <linux/kernel.h>
78#include <linux/module.h>
79#include <linux/errno.h>
80#include <linux/init.h>
81#include <linux/slab.h>
82#include <linux/types.h>
83#include <linux/pci.h>
84#include <linux/delay.h> /* for mdelay */
85#include <linux/miscdevice.h>
86#include <linux/smp_lock.h>
87#include <linux/compat.h>
88
89#include <asm/io.h>
90#include <asm/uaccess.h>
91
92#include <scsi/scsi.h>
93#include <scsi/scsi_cmnd.h>
94#include <scsi/scsi_device.h>
95#include <scsi/scsi_host.h>
96#include <scsi/scsi_tcq.h>
97
98#define COPYRIGHT "Copyright (c) 1999-2004 LSI Logic Corporation"
99#define MODULEAUTHOR "Steven J. Ralston, Noah Romer, Pamela Delaney"
100#include "mptbase.h"
101#include "mptctl.h"
102
103/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
104#define my_NAME "Fusion MPT misc device (ioctl) driver"
105#define my_VERSION MPT_LINUX_VERSION_COMMON
106#define MYNAM "mptctl"
107
108MODULE_AUTHOR(MODULEAUTHOR);
109MODULE_DESCRIPTION(my_NAME);
110MODULE_LICENSE("GPL");
111
112/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
113
114static int mptctl_id = -1;
115
116static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
117
118/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
119
120struct buflist {
121 u8 *kptr;
122 int len;
123};
124
125/*
126 * Function prototypes. Called from OS entry point mptctl_ioctl.
127 * arg contents specific to function.
128 */
129static int mptctl_fw_download(unsigned long arg);
130static int mptctl_getiocinfo (unsigned long arg, unsigned int cmd);
131static int mptctl_gettargetinfo (unsigned long arg);
132static int mptctl_readtest (unsigned long arg);
133static int mptctl_mpt_command (unsigned long arg);
134static int mptctl_eventquery (unsigned long arg);
135static int mptctl_eventenable (unsigned long arg);
136static int mptctl_eventreport (unsigned long arg);
137static int mptctl_replace_fw (unsigned long arg);
138
139static int mptctl_do_reset(unsigned long arg);
140static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
141static int mptctl_hp_targetinfo(unsigned long arg);
142
143static int mptctl_probe(struct pci_dev *, const struct pci_device_id *);
144static void mptctl_remove(struct pci_dev *);
145
146#ifdef CONFIG_COMPAT
147static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
148#endif
149/*
150 * Private function calls.
151 */
152static int mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr);
153static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
154static MptSge_t *kbuf_alloc_2_sgl( int bytes, u32 dir, int sge_offset, int *frags,
155 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
156static void kfree_sgl( MptSge_t *sgl, dma_addr_t sgl_dma,
157 struct buflist *buflist, MPT_ADAPTER *ioc);
158static void mptctl_timeout_expired (MPT_IOCTL *ioctl);
159static int mptctl_bus_reset(MPT_IOCTL *ioctl);
160static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd);
161static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
162
163/*
164 * Reset Handler cleanup function
165 */
166static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
167
168/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
169/*
170 * Scatter gather list (SGL) sizes and limits...
171 */
172//#define MAX_SCSI_FRAGS 9
173#define MAX_FRAGS_SPILL1 9
174#define MAX_FRAGS_SPILL2 15
175#define FRAGS_PER_BUCKET (MAX_FRAGS_SPILL2 + 1)
176
177//#define MAX_CHAIN_FRAGS 64
178//#define MAX_CHAIN_FRAGS (15+15+15+16)
179#define MAX_CHAIN_FRAGS (4 * MAX_FRAGS_SPILL2 + 1)
180
181// Define max sg LIST bytes ( == (#frags + #chains) * 8 bytes each)
182// Works out to: 592d bytes! (9+1)*8 + 4*(15+1)*8
183// ^----------------- 80 + 512
184#define MAX_SGL_BYTES ((MAX_FRAGS_SPILL1 + 1 + (4 * FRAGS_PER_BUCKET)) * 8)
185
186/* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */
187#define MAX_KMALLOC_SZ (128*1024)
188
189#define MPT_IOCTL_DEFAULT_TIMEOUT 10 /* Default timeout value (seconds) */
190
191/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
192/**
193 * mptctl_syscall_down - Down the MPT adapter syscall semaphore.
194 * @ioc: Pointer to MPT adapter
195 * @nonblock: boolean, non-zero if O_NONBLOCK is set
196 *
197 * All of the ioctl commands can potentially sleep, which is illegal
198 * with a spinlock held, thus we perform mutual exclusion here.
199 *
200 * Returns negative errno on error, or zero for success.
201 */
202static inline int
203mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
204{
205 int rc = 0;
206 dctlprintk((KERN_INFO MYNAM "::mptctl_syscall_down(%p,%d) called\n", ioc, nonblock));
207
208 if (nonblock) {
209 if (down_trylock(&ioc->ioctl->sem_ioc))
210 rc = -EAGAIN;
211 } else {
212 if (down_interruptible(&ioc->ioctl->sem_ioc))
213 rc = -ERESTARTSYS;
214 }
215 dctlprintk((KERN_INFO MYNAM "::mptctl_syscall_down return %d\n", rc));
216 return rc;
217}
218
219/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
220/*
221 * This is the callback for any message we have posted. The message itself
222 * will be returned to the message pool when we return from the IRQ
223 *
224 * This runs in irq context so be short and sweet.
225 */
226static int
227mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
228{
229 char *sense_data;
230 int sz, req_index;
231 u16 iocStatus;
232 u8 cmd;
233
234 dctlprintk(("mptctl_reply()!\n"));
235 if (req)
236 cmd = req->u.hdr.Function;
237 else
238 return 1;
239
240 if (ioc->ioctl) {
241
242 if (reply==NULL) {
243
244 dctlprintk(("mptctl_reply() NULL Reply "
245 "Function=%x!\n", cmd));
246
247 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
248 ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
249
250 /* We are done, issue wake up
251 */
252 ioc->ioctl->wait_done = 1;
253 wake_up (&mptctl_wait);
254 return 1;
255
256 }
257
258 dctlprintk(("mptctl_reply() with req=%p "
259 "reply=%p Function=%x!\n", req, reply, cmd));
260
261 /* Copy the reply frame (which much exist
262 * for non-SCSI I/O) to the IOC structure.
263 */
264 dctlprintk(("Copying Reply Frame @%p to ioc%d!\n",
265 reply, ioc->id));
266 memcpy(ioc->ioctl->ReplyFrame, reply,
267 min(ioc->reply_sz, 4*reply->u.reply.MsgLength));
268 ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID;
269
270 /* Set the command status to GOOD if IOC Status is GOOD
271 * OR if SCSI I/O cmd and data underrun or recovered error.
272 */
273 iocStatus = reply->u.reply.IOCStatus & MPI_IOCSTATUS_MASK;
274 if (iocStatus == MPI_IOCSTATUS_SUCCESS)
275 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
276
277 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
278 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
279 ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
280
281 if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) ||
282 (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) {
283 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
284 }
285 }
286
287 /* Copy the sense data - if present
288 */
289 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) &&
290 (reply->u.sreply.SCSIState &
291 MPI_SCSI_STATE_AUTOSENSE_VALID)){
292 sz = req->u.scsireq.SenseBufferLength;
293 req_index =
294 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
295 sense_data =
296 ((u8 *)ioc->sense_buf_pool +
297 (req_index * MPT_SENSE_BUFFER_ALLOC));
298 memcpy(ioc->ioctl->sense, sense_data, sz);
299 ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID;
300 }
301
302 if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT)
303 mptctl_free_tm_flags(ioc);
304
305 /* We are done, issue wake up
306 */
307 ioc->ioctl->wait_done = 1;
308 wake_up (&mptctl_wait);
309 }
310 return 1;
311}
312
313/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
314/* mptctl_timeout_expired
315 *
316 * Expecting an interrupt, however timed out.
317 *
318 */
319static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
320{
321 int rc = 1;
322
323 dctlprintk((KERN_NOTICE MYNAM ": Timeout Expired! Host %d\n",
324 ioctl->ioc->id));
325 if (ioctl == NULL)
326 return;
327
328 ioctl->wait_done = 0;
329 if (ioctl->reset & MPTCTL_RESET_OK)
330 rc = mptctl_bus_reset(ioctl);
331
332 if (rc) {
333 /* Issue a reset for this device.
334 * The IOC is not responding.
335 */
336 dctlprintk((MYIOC_s_INFO_FMT "Calling HardReset! \n",
337 ioctl->ioc->name));
338 mpt_HardResetHandler(ioctl->ioc, NO_SLEEP);
339 }
340 return;
341
342}
343
344/* mptctl_bus_reset
345 *
346 * Bus reset code.
347 *
348 */
349static int mptctl_bus_reset(MPT_IOCTL *ioctl)
350{
351 MPT_FRAME_HDR *mf;
352 SCSITaskMgmt_t *pScsiTm;
353 MPT_SCSI_HOST *hd;
354 int ii;
355 int retval;
356
357
358 ioctl->reset &= ~MPTCTL_RESET_OK;
359
360 if (ioctl->ioc->sh == NULL)
361 return -EPERM;
362
363 hd = (MPT_SCSI_HOST *) ioctl->ioc->sh->hostdata;
364 if (hd == NULL)
365 return -EPERM;
366
367 /* Single threading ....
368 */
369 if (mptctl_set_tm_flags(hd) != 0)
370 return -EPERM;
371
372 /* Send request
373 */
374 if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) {
375 dctlprintk((MYIOC_s_WARN_FMT "IssueTaskMgmt, no msg frames!!\n",
376 ioctl->ioc->name));
377
378 mptctl_free_tm_flags(ioctl->ioc);
379 return -ENOMEM;
380 }
381
382 dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n",
383 ioctl->ioc->name, mf));
384
385 pScsiTm = (SCSITaskMgmt_t *) mf;
386 pScsiTm->TargetID = ioctl->target;
387 pScsiTm->Bus = hd->port; /* 0 */
388 pScsiTm->ChainOffset = 0;
389 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
390 pScsiTm->Reserved = 0;
391 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
392 pScsiTm->Reserved1 = 0;
393 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
394
395 for (ii= 0; ii < 8; ii++)
396 pScsiTm->LUN[ii] = 0;
397
398 for (ii=0; ii < 7; ii++)
399 pScsiTm->Reserved2[ii] = 0;
400
401 pScsiTm->TaskMsgContext = 0;
402 dtmprintk((MYIOC_s_INFO_FMT
403 "mptctl_bus_reset: issued.\n", ioctl->ioc->name));
404
405 DBG_DUMP_TM_REQUEST_FRAME((u32 *)mf);
406
407 ioctl->wait_done=0;
408 if ((retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc,
409 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP)) != 0) {
410 dfailprintk((MYIOC_s_ERR_FMT "_send_handshake FAILED!"
411 " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd,
412 hd->ioc, mf));
413 goto mptctl_bus_reset_done;
414 }
415
416 /* Now wait for the command to complete */
417 ii = wait_event_interruptible_timeout(mptctl_wait,
418 ioctl->wait_done == 1,
419 HZ*5 /* 5 second timeout */);
420
421 if(ii <=0 && (ioctl->wait_done != 1 )) {
422 ioctl->wait_done = 0;
423 retval = -1; /* return failure */
424 }
425
426mptctl_bus_reset_done:
427
428 mpt_free_msg_frame(hd->ioc, mf);
429 mptctl_free_tm_flags(ioctl->ioc);
430 return retval;
431}
432
433static int
434mptctl_set_tm_flags(MPT_SCSI_HOST *hd) {
435 unsigned long flags;
436
437 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
438
439 if (hd->tmState == TM_STATE_NONE) {
440 hd->tmState = TM_STATE_IN_PROGRESS;
441 hd->tmPending = 1;
442 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
443 } else {
444 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
445 return -EBUSY;
446 }
447
448 return 0;
449}
450
451static void
452mptctl_free_tm_flags(MPT_ADAPTER *ioc)
453{
454 MPT_SCSI_HOST * hd;
455 unsigned long flags;
456
457 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
458 if (hd == NULL)
459 return;
460
461 spin_lock_irqsave(&ioc->FreeQlock, flags);
462
463 hd->tmState = TM_STATE_NONE;
464 hd->tmPending = 0;
465 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
466
467 return;
468}
469
470/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
471/* mptctl_ioc_reset
472 *
473 * Clean-up functionality. Used only if there has been a
474 * reload of the FW due.
475 *
476 */
477static int
478mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
479{
480 MPT_IOCTL *ioctl = ioc->ioctl;
481 dctlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to IOCTL driver!\n",
482 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
483 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
484
485 if(ioctl == NULL)
486 return 1;
487
488 switch(reset_phase) {
489 case MPT_IOC_SETUP_RESET:
490 ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET;
491 break;
492 case MPT_IOC_POST_RESET:
493 ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET;
494 break;
495 case MPT_IOC_PRE_RESET:
496 default:
497 break;
498 }
499
500 return 1;
501}
502
503/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
504/*
505 * MPT ioctl handler
506 * cmd - specify the particular IOCTL command to be issued
507 * arg - data specific to the command. Must not be null.
508 */
509static long
510__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
511{
512 mpt_ioctl_header __user *uhdr = (void __user *) arg;
513 mpt_ioctl_header khdr;
514 int iocnum;
515 unsigned iocnumX;
516 int nonblock = (file->f_flags & O_NONBLOCK);
517 int ret;
518 MPT_ADAPTER *iocp = NULL;
519
520 dctlprintk(("mptctl_ioctl() called\n"));
521
522 if (copy_from_user(&khdr, uhdr, sizeof(khdr))) {
523 printk(KERN_ERR "%s::mptctl_ioctl() @%d - "
524 "Unable to copy mpt_ioctl_header data @ %p\n",
525 __FILE__, __LINE__, uhdr);
526 return -EFAULT;
527 }
528 ret = -ENXIO; /* (-6) No such device or address */
529
530 /* Verify intended MPT adapter - set iocnum and the adapter
531 * pointer (iocp)
532 */
533 iocnumX = khdr.iocnum & 0xFF;
534 if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
535 (iocp == NULL)) {
536 dctlprintk((KERN_ERR "%s::mptctl_ioctl() @%d - ioc%d not found!\n",
537 __FILE__, __LINE__, iocnumX));
538 return -ENODEV;
539 }
540
541 if (!iocp->active) {
542 printk(KERN_ERR "%s::mptctl_ioctl() @%d - Controller disabled.\n",
543 __FILE__, __LINE__);
544 return -EFAULT;
545 }
546
547 /* Handle those commands that are just returning
548 * information stored in the driver.
549 * These commands should never time out and are unaffected
550 * by TM and FW reloads.
551 */
552 if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
553 return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
554 } else if (cmd == MPTTARGETINFO) {
555 return mptctl_gettargetinfo(arg);
556 } else if (cmd == MPTTEST) {
557 return mptctl_readtest(arg);
558 } else if (cmd == MPTEVENTQUERY) {
559 return mptctl_eventquery(arg);
560 } else if (cmd == MPTEVENTENABLE) {
561 return mptctl_eventenable(arg);
562 } else if (cmd == MPTEVENTREPORT) {
563 return mptctl_eventreport(arg);
564 } else if (cmd == MPTFWREPLACE) {
565 return mptctl_replace_fw(arg);
566 }
567
568 /* All of these commands require an interrupt or
569 * are unknown/illegal.
570 */
571 if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
572 return ret;
573
574 dctlprintk((MYIOC_s_INFO_FMT ": mptctl_ioctl()\n", iocp->name));
575
576 if (cmd == MPTFWDOWNLOAD)
577 ret = mptctl_fw_download(arg);
578 else if (cmd == MPTCOMMAND)
579 ret = mptctl_mpt_command(arg);
580 else if (cmd == MPTHARDRESET)
581 ret = mptctl_do_reset(arg);
582 else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
583 ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
584 else if (cmd == HP_GETTARGETINFO)
585 ret = mptctl_hp_targetinfo(arg);
586 else
587 ret = -EINVAL;
588
589 up(&iocp->ioctl->sem_ioc);
590
591 return ret;
592}
593
594static long
595mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
596{
597 long ret;
598 lock_kernel();
599 ret = __mptctl_ioctl(file, cmd, arg);
600 unlock_kernel();
601 return ret;
602}
603
604static int mptctl_do_reset(unsigned long arg)
605{
606 struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
607 struct mpt_ioctl_diag_reset krinfo;
608 MPT_ADAPTER *iocp;
609
610 dctlprintk((KERN_INFO "mptctl_do_reset called.\n"));
611
612 if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
613 printk(KERN_ERR "%s@%d::mptctl_do_reset - "
614 "Unable to copy mpt_ioctl_diag_reset struct @ %p\n",
615 __FILE__, __LINE__, urinfo);
616 return -EFAULT;
617 }
618
619 if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
620 dctlprintk((KERN_ERR "%s@%d::mptctl_do_reset - ioc%d not found!\n",
621 __FILE__, __LINE__, krinfo.hdr.iocnum));
622 return -ENODEV; /* (-6) No such device or address */
623 }
624
625 if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) {
626 printk (KERN_ERR "%s@%d::mptctl_do_reset - reset failed.\n",
627 __FILE__, __LINE__);
628 return -1;
629 }
630
631 return 0;
632}
633
634/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
635/*
636 * MPT FW download function. Cast the arg into the mpt_fw_xfer structure.
637 * This structure contains: iocnum, firmware length (bytes),
638 * pointer to user space memory where the fw image is stored.
639 *
640 * Outputs: None.
641 * Return: 0 if successful
642 * -EFAULT if data unavailable
643 * -ENXIO if no such device
644 * -EAGAIN if resource problem
645 * -ENOMEM if no memory for SGE
646 * -EMLINK if too many chain buffers required
647 * -EBADRQC if adapter does not support FW download
648 * -EBUSY if adapter is busy
649 * -ENOMSG if FW upload returned bad status
650 */
651static int
652mptctl_fw_download(unsigned long arg)
653{
654 struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
655 struct mpt_fw_xfer kfwdl;
656
657 dctlprintk((KERN_INFO "mptctl_fwdl called. mptctl_id = %xh\n", mptctl_id)); //tc
658 if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) {
659 printk(KERN_ERR "%s@%d::_ioctl_fwdl - "
660 "Unable to copy mpt_fw_xfer struct @ %p\n",
661 __FILE__, __LINE__, ufwdl);
662 return -EFAULT;
663 }
664
665 return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
666}
667
668/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
669/*
670 * FW Download engine.
671 * Outputs: None.
672 * Return: 0 if successful
673 * -EFAULT if data unavailable
674 * -ENXIO if no such device
675 * -EAGAIN if resource problem
676 * -ENOMEM if no memory for SGE
677 * -EMLINK if too many chain buffers required
678 * -EBADRQC if adapter does not support FW download
679 * -EBUSY if adapter is busy
680 * -ENOMSG if FW upload returned bad status
681 */
682static int
683mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
684{
685 FWDownload_t *dlmsg;
686 MPT_FRAME_HDR *mf;
687 MPT_ADAPTER *iocp;
688 FWDownloadTCSGE_t *ptsge;
689 MptSge_t *sgl, *sgIn;
690 char *sgOut;
691 struct buflist *buflist;
692 struct buflist *bl;
693 dma_addr_t sgl_dma;
694 int ret;
695 int numfrags = 0;
696 int maxfrags;
697 int n = 0;
698 u32 sgdir;
699 u32 nib;
700 int fw_bytes_copied = 0;
701 int i;
702 int sge_offset = 0;
703 u16 iocstat;
704 pFWDownloadReply_t ReplyMsg = NULL;
705
706 dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id));
707
708 dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf));
709 dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen));
710 dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc));
711
712 if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) {
713 dctlprintk(("%s@%d::_ioctl_fwdl - ioc%d not found!\n",
714 __FILE__, __LINE__, ioc));
715 return -ENODEV; /* (-6) No such device or address */
716 }
717
718 /* Valid device. Get a message frame and construct the FW download message.
719 */
720 if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
721 return -EAGAIN;
722 dlmsg = (FWDownload_t*) mf;
723 ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
724 sgOut = (char *) (ptsge + 1);
725
726 /*
727 * Construct f/w download request
728 */
729 dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW;
730 dlmsg->Reserved = 0;
731 dlmsg->ChainOffset = 0;
732 dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD;
733 dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0;
734 dlmsg->MsgFlags = 0;
735
736 /* Set up the Transaction SGE.
737 */
738 ptsge->Reserved = 0;
739 ptsge->ContextSize = 0;
740 ptsge->DetailsLength = 12;
741 ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
742 ptsge->Reserved_0100_Checksum = 0;
743 ptsge->ImageOffset = 0;
744 ptsge->ImageSize = cpu_to_le32(fwlen);
745
746 /* Add the SGL
747 */
748
749 /*
750 * Need to kmalloc area(s) for holding firmware image bytes.
751 * But we need to do it piece meal, using a proper
752 * scatter gather list (with 128kB MAX hunks).
753 *
754 * A practical limit here might be # of sg hunks that fit into
755 * a single IOC request frame; 12 or 8 (see below), so:
756 * For FC9xx: 12 x 128kB == 1.5 mB (max)
757 * For C1030: 8 x 128kB == 1 mB (max)
758 * We could support chaining, but things get ugly(ier:)
759 *
760 * Set the sge_offset to the start of the sgl (bytes).
761 */
762 sgdir = 0x04000000; /* IOC will READ from sys mem */
763 sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t);
764 if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset,
765 &numfrags, &buflist, &sgl_dma, iocp)) == NULL)
766 return -ENOMEM;
767
768 /*
769 * We should only need SGL with 2 simple_32bit entries (up to 256 kB)
770 * for FC9xx f/w image, but calculate max number of sge hunks
771 * we can fit into a request frame, and limit ourselves to that.
772 * (currently no chain support)
773 * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE
774 * Request maxfrags
775 * 128 12
776 * 96 8
777 * 64 4
778 */
779 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t))
780 / (sizeof(dma_addr_t) + sizeof(u32));
781 if (numfrags > maxfrags) {
782 ret = -EMLINK;
783 goto fwdl_out;
784 }
785
786 dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags));
787
788 /*
789 * Parse SG list, copying sgl itself,
790 * plus f/w image hunks from user space as we go...
791 */
792 ret = -EFAULT;
793 sgIn = sgl;
794 bl = buflist;
795 for (i=0; i < numfrags; i++) {
796
797 /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE
798 * Skip everything but Simple. If simple, copy from
799 * user space into kernel space.
800 * Note: we should not have anything but Simple as
801 * Chain SGE are illegal.
802 */
803 nib = (sgIn->FlagsLength & 0x30000000) >> 28;
804 if (nib == 0 || nib == 3) {
805 ;
806 } else if (sgIn->Address) {
807 mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
808 n++;
809 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
810 printk(KERN_ERR "%s@%d::_ioctl_fwdl - "
811 "Unable to copy f/w buffer hunk#%d @ %p\n",
812 __FILE__, __LINE__, n, ufwbuf);
813 goto fwdl_out;
814 }
815 fw_bytes_copied += bl->len;
816 }
817 sgIn++;
818 bl++;
819 sgOut += (sizeof(dma_addr_t) + sizeof(u32));
820 }
821
822#ifdef MPT_DEBUG
823 {
824 u32 *m = (u32 *)mf;
825 printk(KERN_INFO MYNAM ": F/W download request:\n" KERN_INFO " ");
826 for (i=0; i < 7+numfrags*2; i++)
827 printk(" %08x", le32_to_cpu(m[i]));
828 printk("\n");
829 }
830#endif
831
832 /*
833 * Finally, perform firmware download.
834 */
835 iocp->ioctl->wait_done = 0;
836 mpt_put_msg_frame(mptctl_id, iocp, mf);
837
838 /* Now wait for the command to complete */
839 ret = wait_event_interruptible_timeout(mptctl_wait,
840 iocp->ioctl->wait_done == 1,
841 HZ*60);
842
843 if(ret <=0 && (iocp->ioctl->wait_done != 1 )) {
844 /* Now we need to reset the board */
845 mptctl_timeout_expired(iocp->ioctl);
846 ret = -ENODATA;
847 goto fwdl_out;
848 }
849
850 if (sgl)
851 kfree_sgl(sgl, sgl_dma, buflist, iocp);
852
853 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame;
854 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
855 if (iocstat == MPI_IOCSTATUS_SUCCESS) {
856 printk(KERN_INFO MYNAM ": F/W update successfully sent to %s!\n", iocp->name);
857 return 0;
858 } else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) {
859 printk(KERN_WARNING MYNAM ": ?Hmmm... %s says it doesn't support F/W download!?!\n",
860 iocp->name);
861 printk(KERN_WARNING MYNAM ": (time to go bang on somebodies door)\n");
862 return -EBADRQC;
863 } else if (iocstat == MPI_IOCSTATUS_BUSY) {
864 printk(KERN_WARNING MYNAM ": Warning! %s says: IOC_BUSY!\n", iocp->name);
865 printk(KERN_WARNING MYNAM ": (try again later?)\n");
866 return -EBUSY;
867 } else {
868 printk(KERN_WARNING MYNAM "::ioctl_fwdl() ERROR! %s returned [bad] status = %04xh\n",
869 iocp->name, iocstat);
870 printk(KERN_WARNING MYNAM ": (bad VooDoo)\n");
871 return -ENOMSG;
872 }
873 return 0;
874
875fwdl_out:
876 kfree_sgl(sgl, sgl_dma, buflist, iocp);
877 return ret;
878}
879
880/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
881/*
882 * SGE Allocation routine
883 *
884 * Inputs: bytes - number of bytes to be transferred
885 * sgdir - data direction
886 * sge_offset - offset (in bytes) from the start of the request
887 * frame to the first SGE
888 * ioc - pointer to the mptadapter
889 * Outputs: frags - number of scatter gather elements
890 * blp - point to the buflist pointer
891 * sglbuf_dma - pointer to the (dma) sgl
892 * Returns: Null if failes
893 * pointer to the (virtual) sgl if successful.
894 */
895static MptSge_t *
896kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
897 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc)
898{
899 MptSge_t *sglbuf = NULL; /* pointer to array of SGE */
900 /* and chain buffers */
901 struct buflist *buflist = NULL; /* kernel routine */
902 MptSge_t *sgl;
903 int numfrags = 0;
904 int fragcnt = 0;
905 int alloc_sz = min(bytes,MAX_KMALLOC_SZ); // avoid kernel warning msg!
906 int bytes_allocd = 0;
907 int this_alloc;
908 dma_addr_t pa; // phys addr
909 int i, buflist_ent;
910 int sg_spill = MAX_FRAGS_SPILL1;
911 int dir;
912 /* initialization */
913 *frags = 0;
914 *blp = NULL;
915
916 /* Allocate and initialize an array of kernel
917 * structures for the SG elements.
918 */
919 i = MAX_SGL_BYTES / 8;
920 buflist = kmalloc(i, GFP_USER);
921 if (buflist == NULL)
922 return NULL;
923 memset(buflist, 0, i);
924 buflist_ent = 0;
925
926 /* Allocate a single block of memory to store the sg elements and
927 * the chain buffers. The calling routine is responsible for
928 * copying the data in this array into the correct place in the
929 * request and chain buffers.
930 */
931 sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma);
932 if (sglbuf == NULL)
933 goto free_and_fail;
934
935 if (sgdir & 0x04000000)
936 dir = PCI_DMA_TODEVICE;
937 else
938 dir = PCI_DMA_FROMDEVICE;
939
940 /* At start:
941 * sgl = sglbuf = point to beginning of sg buffer
942 * buflist_ent = 0 = first kernel structure
943 * sg_spill = number of SGE that can be written before the first
944 * chain element.
945 *
946 */
947 sgl = sglbuf;
948 sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1;
949 while (bytes_allocd < bytes) {
950 this_alloc = min(alloc_sz, bytes-bytes_allocd);
951 buflist[buflist_ent].len = this_alloc;
952 buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev,
953 this_alloc,
954 &pa);
955 if (buflist[buflist_ent].kptr == NULL) {
956 alloc_sz = alloc_sz / 2;
957 if (alloc_sz == 0) {
958 printk(KERN_WARNING MYNAM "-SG: No can do - "
959 "not enough memory! :-(\n");
960 printk(KERN_WARNING MYNAM "-SG: (freeing %d frags)\n",
961 numfrags);
962 goto free_and_fail;
963 }
964 continue;
965 } else {
966 dma_addr_t dma_addr;
967
968 bytes_allocd += this_alloc;
969 sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc);
970 dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir);
971 sgl->Address = dma_addr;
972
973 fragcnt++;
974 numfrags++;
975 sgl++;
976 buflist_ent++;
977 }
978
979 if (bytes_allocd >= bytes)
980 break;
981
982 /* Need to chain? */
983 if (fragcnt == sg_spill) {
984 printk(KERN_WARNING MYNAM "-SG: No can do - " "Chain required! :-(\n");
985 printk(KERN_WARNING MYNAM "(freeing %d frags)\n", numfrags);
986 goto free_and_fail;
987 }
988
989 /* overflow check... */
990 if (numfrags*8 > MAX_SGL_BYTES){
991 /* GRRRRR... */
992 printk(KERN_WARNING MYNAM "-SG: No can do - "
993 "too many SG frags! :-(\n");
994 printk(KERN_WARNING MYNAM "-SG: (freeing %d frags)\n",
995 numfrags);
996 goto free_and_fail;
997 }
998 }
999
1000 /* Last sge fixup: set LE+eol+eob bits */
1001 sgl[-1].FlagsLength |= 0xC1000000;
1002
1003 *frags = numfrags;
1004 *blp = buflist;
1005
1006 dctlprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - "
1007 "%d SG frags generated!\n",
1008 numfrags));
1009
1010 dctlprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - "
1011 "last (big) alloc_sz=%d\n",
1012 alloc_sz));
1013
1014 return sglbuf;
1015
1016free_and_fail:
1017 if (sglbuf != NULL) {
1018 int i;
1019
1020 for (i = 0; i < numfrags; i++) {
1021 dma_addr_t dma_addr;
1022 u8 *kptr;
1023 int len;
1024
1025 if ((sglbuf[i].FlagsLength >> 24) == 0x30)
1026 continue;
1027
1028 dma_addr = sglbuf[i].Address;
1029 kptr = buflist[i].kptr;
1030 len = buflist[i].len;
1031
1032 pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
1033 }
1034 pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma);
1035 }
1036 kfree(buflist);
1037 return NULL;
1038}
1039
1040/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1041/*
1042 * Routine to free the SGL elements.
1043 */
1044static void
1045kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc)
1046{
1047 MptSge_t *sg = sgl;
1048 struct buflist *bl = buflist;
1049 u32 nib;
1050 int dir;
1051 int n = 0;
1052
1053 if (sg->FlagsLength & 0x04000000)
1054 dir = PCI_DMA_TODEVICE;
1055 else
1056 dir = PCI_DMA_FROMDEVICE;
1057
1058 nib = (sg->FlagsLength & 0xF0000000) >> 28;
1059 while (! (nib & 0x4)) { /* eob */
1060 /* skip ignore/chain. */
1061 if (nib == 0 || nib == 3) {
1062 ;
1063 } else if (sg->Address) {
1064 dma_addr_t dma_addr;
1065 void *kptr;
1066 int len;
1067
1068 dma_addr = sg->Address;
1069 kptr = bl->kptr;
1070 len = bl->len;
1071 pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
1072 pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
1073 n++;
1074 }
1075 sg++;
1076 bl++;
1077 nib = (le32_to_cpu(sg->FlagsLength) & 0xF0000000) >> 28;
1078 }
1079
1080 /* we're at eob! */
1081 if (sg->Address) {
1082 dma_addr_t dma_addr;
1083 void *kptr;
1084 int len;
1085
1086 dma_addr = sg->Address;
1087 kptr = bl->kptr;
1088 len = bl->len;
1089 pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
1090 pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
1091 n++;
1092 }
1093
1094 pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma);
1095 kfree(buflist);
1096 dctlprintk((KERN_INFO MYNAM "-SG: Free'd 1 SGL buf + %d kbufs!\n", n));
1097}
1098
1099/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1100/*
1101 * mptctl_getiocinfo - Query the host adapter for IOC information.
1102 * @arg: User space argument
1103 *
1104 * Outputs: None.
1105 * Return: 0 if successful
1106 * -EFAULT if data unavailable
1107 * -ENODEV if no such device/adapter
1108 */
1109static int
1110mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1111{
1112 struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
1113 struct mpt_ioctl_iocinfo *karg;
1114 MPT_ADAPTER *ioc;
1115 struct pci_dev *pdev;
1116 struct Scsi_Host *sh;
1117 MPT_SCSI_HOST *hd;
1118 int iocnum;
1119 int numDevices = 0;
1120 unsigned int max_id;
1121 int ii;
1122 int port;
1123 int cim_rev;
1124 u8 revision;
1125
1126 dctlprintk((": mptctl_getiocinfo called.\n"));
1127 /* Add of PCI INFO results in unaligned access for
1128 * IA64 and Sparc. Reset long to int. Return no PCI
1129 * data for obsolete format.
1130 */
1131 if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0))
1132 cim_rev = 0;
1133 else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1))
1134 cim_rev = 1;
1135 else if (data_size == sizeof(struct mpt_ioctl_iocinfo))
1136 cim_rev = 2;
1137 else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12))
1138 cim_rev = 0; /* obsolete */
1139 else
1140 return -EFAULT;
1141
1142 karg = kmalloc(data_size, GFP_KERNEL);
1143 if (karg == NULL) {
1144 printk(KERN_ERR "%s::mpt_ioctl_iocinfo() @%d - no memory available!\n",
1145 __FILE__, __LINE__);
1146 return -ENOMEM;
1147 }
1148
1149 if (copy_from_user(karg, uarg, data_size)) {
1150 printk(KERN_ERR "%s@%d::mptctl_getiocinfo - "
1151 "Unable to read in mpt_ioctl_iocinfo struct @ %p\n",
1152 __FILE__, __LINE__, uarg);
1153 kfree(karg);
1154 return -EFAULT;
1155 }
1156
1157 if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
1158 (ioc == NULL)) {
1159 dctlprintk((KERN_ERR "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
1160 __FILE__, __LINE__, iocnum));
1161 kfree(karg);
1162 return -ENODEV;
1163 }
1164
1165 /* Verify the data transfer size is correct.
1166 * Ignore the port setting.
1167 */
1168 if (karg->hdr.maxDataSize != data_size) {
1169 printk(KERN_ERR "%s@%d::mptctl_getiocinfo - "
1170 "Structure size mismatch. Command not completed.\n",
1171 __FILE__, __LINE__);
1172 kfree(karg);
1173 return -EFAULT;
1174 }
1175
1176 /* Fill in the data and return the structure to the calling
1177 * program
1178 */
1179 if (ioc->bus_type == FC)
1180 karg->adapterType = MPT_IOCTL_INTERFACE_FC;
1181 else
1182 karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
1183
1184 port = karg->hdr.port;
1185
1186 karg->port = port;
1187 pdev = (struct pci_dev *) ioc->pcidev;
1188
1189 karg->pciId = pdev->device;
1190 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1191 karg->hwRev = revision;
1192 karg->subSystemDevice = pdev->subsystem_device;
1193 karg->subSystemVendor = pdev->subsystem_vendor;
1194
1195 if (cim_rev == 1) {
1196 /* Get the PCI bus, device, and function numbers for the IOC
1197 */
1198 karg->pciInfo.u.bits.busNumber = pdev->bus->number;
1199 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
1200 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1201 } else if (cim_rev == 2) {
1202 /* Get the PCI bus, device, function and segment ID numbers
1203 for the IOC */
1204 karg->pciInfo.u.bits.busNumber = pdev->bus->number;
1205 karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
1206 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1207 karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
1208 karg->pciInfo.segmentID = pci_domain_nr(pdev->bus);
1209 }
1210
1211 /* Get number of devices
1212 */
1213 if ((sh = ioc->sh) != NULL) {
1214 /* sh->max_id = maximum target ID + 1
1215 */
1216 max_id = sh->max_id - 1;
1217 hd = (MPT_SCSI_HOST *) sh->hostdata;
1218
1219 /* Check all of the target structures and
1220 * keep a counter.
1221 */
1222 if (hd && hd->Targets) {
1223 for (ii = 0; ii <= max_id; ii++) {
1224 if (hd->Targets[ii])
1225 numDevices++;
1226 }
1227 }
1228 }
1229 karg->numDevices = numDevices;
1230
1231 /* Set the BIOS and FW Version
1232 */
1233 karg->FWVersion = ioc->facts.FWVersion.Word;
1234 karg->BIOSVersion = ioc->biosVersion;
1235
1236 /* Set the Version Strings.
1237 */
1238 strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH);
1239 karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0';
1240
1241 karg->busChangeEvent = 0;
1242 karg->hostId = ioc->pfacts[port].PortSCSIID;
1243 karg->rsvd[0] = karg->rsvd[1] = 0;
1244
1245 /* Copy the data from kernel memory to user memory
1246 */
1247 if (copy_to_user((char __user *)arg, karg, data_size)) {
1248 printk(KERN_ERR "%s@%d::mptctl_getiocinfo - "
1249 "Unable to write out mpt_ioctl_iocinfo struct @ %p\n",
1250 __FILE__, __LINE__, uarg);
1251 kfree(karg);
1252 return -EFAULT;
1253 }
1254
1255 kfree(karg);
1256 return 0;
1257}
1258
1259/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1260/*
1261 * mptctl_gettargetinfo - Query the host adapter for target information.
1262 * @arg: User space argument
1263 *
1264 * Outputs: None.
1265 * Return: 0 if successful
1266 * -EFAULT if data unavailable
1267 * -ENODEV if no such device/adapter
1268 */
1269static int
1270mptctl_gettargetinfo (unsigned long arg)
1271{
1272 struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
1273 struct mpt_ioctl_targetinfo karg;
1274 MPT_ADAPTER *ioc;
1275 struct Scsi_Host *sh;
1276 MPT_SCSI_HOST *hd;
1277 VirtDevice *vdev;
1278 char *pmem;
1279 int *pdata;
1280 IOCPage2_t *pIoc2;
1281 IOCPage3_t *pIoc3;
1282 int iocnum;
1283 int numDevices = 0;
1284 unsigned int max_id;
1285 int id, jj, indexed_lun, lun_index;
1286 u32 lun;
1287 int maxWordsLeft;
1288 int numBytes;
1289 u8 port, devType, bus_id;
1290
1291 dctlprintk(("mptctl_gettargetinfo called.\n"));
1292 if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) {
1293 printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - "
1294 "Unable to read in mpt_ioctl_targetinfo struct @ %p\n",
1295 __FILE__, __LINE__, uarg);
1296 return -EFAULT;
1297 }
1298
1299 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
1300 (ioc == NULL)) {
1301 dctlprintk((KERN_ERR "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
1302 __FILE__, __LINE__, iocnum));
1303 return -ENODEV;
1304 }
1305
1306 /* Get the port number and set the maximum number of bytes
1307 * in the returned structure.
1308 * Ignore the port setting.
1309 */
1310 numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
1311 maxWordsLeft = numBytes/sizeof(int);
1312 port = karg.hdr.port;
1313
1314 if (maxWordsLeft <= 0) {
1315 printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - no memory available!\n",
1316 __FILE__, __LINE__);
1317 return -ENOMEM;
1318 }
1319
1320 /* Fill in the data and return the structure to the calling
1321 * program
1322 */
1323
1324 /* struct mpt_ioctl_targetinfo does not contain sufficient space
1325 * for the target structures so when the IOCTL is called, there is
1326 * not sufficient stack space for the structure. Allocate memory,
1327 * populate the memory, copy back to the user, then free memory.
1328 * targetInfo format:
1329 * bits 31-24: reserved
1330 * 23-16: LUN
1331 * 15- 8: Bus Number
1332 * 7- 0: Target ID
1333 */
1334 pmem = kmalloc(numBytes, GFP_KERNEL);
1335 if (pmem == NULL) {
1336 printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - no memory available!\n",
1337 __FILE__, __LINE__);
1338 return -ENOMEM;
1339 }
1340 memset(pmem, 0, numBytes);
1341 pdata = (int *) pmem;
1342
1343 /* Get number of devices
1344 */
1345 if ((sh = ioc->sh) != NULL) {
1346
1347 max_id = sh->max_id - 1;
1348 hd = (MPT_SCSI_HOST *) sh->hostdata;
1349
1350 /* Check all of the target structures.
1351 * Save the Id and increment the counter,
1352 * if ptr non-null.
1353 * sh->max_id = maximum target ID + 1
1354 */
1355 if (hd && hd->Targets) {
1356 mpt_findImVolumes(ioc);
1357 pIoc2 = ioc->spi_data.pIocPg2;
1358 for ( id = 0; id <= max_id; ) {
1359 if ( pIoc2 && pIoc2->NumActiveVolumes ) {
1360 if ( id == pIoc2->RaidVolume[0].VolumeID ) {
1361 if (maxWordsLeft <= 0) {
1362 printk(KERN_ERR "mptctl_gettargetinfo - "
1363 "buffer is full but volume is available on ioc %d\n, numDevices=%d", iocnum, numDevices);
1364 goto data_space_full;
1365 }
1366 if ( ( pIoc2->RaidVolume[0].Flags & MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE ) == 0 )
1367 devType = 0x80;
1368 else
1369 devType = 0xC0;
1370 bus_id = pIoc2->RaidVolume[0].VolumeBus;
1371 numDevices++;
1372 *pdata = ( (devType << 24) | (bus_id << 8) | id );
1373 dctlprintk((KERN_ERR "mptctl_gettargetinfo - "
1374 "volume ioc=%d target=%x numDevices=%d pdata=%p\n", iocnum, *pdata, numDevices, pdata));
1375 pdata++;
1376 --maxWordsLeft;
1377 goto next_id;
1378 } else {
1379 pIoc3 = ioc->spi_data.pIocPg3;
1380 for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) {
1381 if ( pIoc3->PhysDisk[jj].PhysDiskID == id )
1382 goto next_id;
1383 }
1384 }
1385 }
1386 if ( (vdev = hd->Targets[id]) ) {
1387 for (jj = 0; jj <= MPT_LAST_LUN; jj++) {
1388 lun_index = (jj >> 5);
1389 indexed_lun = (jj % 32);
1390 lun = (1 << indexed_lun);
1391 if (vdev->luns[lun_index] & lun) {
1392 if (maxWordsLeft <= 0) {
1393 printk(KERN_ERR "mptctl_gettargetinfo - "
1394 "buffer is full but more targets are available on ioc %d numDevices=%d\n", iocnum, numDevices);
1395 goto data_space_full;
1396 }
1397 bus_id = vdev->bus_id;
1398 numDevices++;
1399 *pdata = ( (jj << 16) | (bus_id << 8) | id );
1400 dctlprintk((KERN_ERR "mptctl_gettargetinfo - "
1401 "target ioc=%d target=%x numDevices=%d pdata=%p\n", iocnum, *pdata, numDevices, pdata));
1402 pdata++;
1403 --maxWordsLeft;
1404 }
1405 }
1406 }
1407next_id:
1408 id++;
1409 }
1410 }
1411 }
1412data_space_full:
1413 karg.numDevices = numDevices;
1414
1415 /* Copy part of the data from kernel memory to user memory
1416 */
1417 if (copy_to_user((char __user *)arg, &karg,
1418 sizeof(struct mpt_ioctl_targetinfo))) {
1419 printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - "
1420 "Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
1421 __FILE__, __LINE__, uarg);
1422 kfree(pmem);
1423 return -EFAULT;
1424 }
1425
1426 /* Copy the remaining data from kernel memory to user memory
1427 */
1428 if (copy_to_user(uarg->targetInfo, pmem, numBytes)) {
1429 printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - "
1430 "Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
1431 __FILE__, __LINE__, pdata);
1432 kfree(pmem);
1433 return -EFAULT;
1434 }
1435
1436 kfree(pmem);
1437
1438 return 0;
1439}
1440
1441/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1442/* MPT IOCTL Test function.
1443 *
1444 * Outputs: None.
1445 * Return: 0 if successful
1446 * -EFAULT if data unavailable
1447 * -ENODEV if no such device/adapter
1448 */
1449static int
1450mptctl_readtest (unsigned long arg)
1451{
1452 struct mpt_ioctl_test __user *uarg = (void __user *) arg;
1453 struct mpt_ioctl_test karg;
1454 MPT_ADAPTER *ioc;
1455 int iocnum;
1456
1457 dctlprintk(("mptctl_readtest called.\n"));
1458 if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
1459 printk(KERN_ERR "%s@%d::mptctl_readtest - "
1460 "Unable to read in mpt_ioctl_test struct @ %p\n",
1461 __FILE__, __LINE__, uarg);
1462 return -EFAULT;
1463 }
1464
1465 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
1466 (ioc == NULL)) {
1467 dctlprintk((KERN_ERR "%s::mptctl_readtest() @%d - ioc%d not found!\n",
1468 __FILE__, __LINE__, iocnum));
1469 return -ENODEV;
1470 }
1471
1472 /* Fill in the data and return the structure to the calling
1473 * program
1474 */
1475
1476#ifdef MFCNT
1477 karg.chip_type = ioc->mfcnt;
1478#else
1479 karg.chip_type = ioc->pcidev->device;
1480#endif
1481 strncpy (karg.name, ioc->name, MPT_MAX_NAME);
1482 karg.name[MPT_MAX_NAME-1]='\0';
1483 strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH);
1484 karg.product[MPT_PRODUCT_LENGTH-1]='\0';
1485
1486 /* Copy the data from kernel memory to user memory
1487 */
1488 if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_test))) {
1489 printk(KERN_ERR "%s@%d::mptctl_readtest - "
1490 "Unable to write out mpt_ioctl_test struct @ %p\n",
1491 __FILE__, __LINE__, uarg);
1492 return -EFAULT;
1493 }
1494
1495 return 0;
1496}
1497
1498/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1499/*
1500 * mptctl_eventquery - Query the host adapter for the event types
1501 * that are being logged.
1502 * @arg: User space argument
1503 *
1504 * Outputs: None.
1505 * Return: 0 if successful
1506 * -EFAULT if data unavailable
1507 * -ENODEV if no such device/adapter
1508 */
1509static int
1510mptctl_eventquery (unsigned long arg)
1511{
1512 struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
1513 struct mpt_ioctl_eventquery karg;
1514 MPT_ADAPTER *ioc;
1515 int iocnum;
1516
1517 dctlprintk(("mptctl_eventquery called.\n"));
1518 if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
1519 printk(KERN_ERR "%s@%d::mptctl_eventquery - "
1520 "Unable to read in mpt_ioctl_eventquery struct @ %p\n",
1521 __FILE__, __LINE__, uarg);
1522 return -EFAULT;
1523 }
1524
1525 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
1526 (ioc == NULL)) {
1527 dctlprintk((KERN_ERR "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
1528 __FILE__, __LINE__, iocnum));
1529 return -ENODEV;
1530 }
1531
1532 karg.eventEntries = ioc->eventLogSize;
1533 karg.eventTypes = ioc->eventTypes;
1534
1535 /* Copy the data from kernel memory to user memory
1536 */
1537 if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) {
1538 printk(KERN_ERR "%s@%d::mptctl_eventquery - "
1539 "Unable to write out mpt_ioctl_eventquery struct @ %p\n",
1540 __FILE__, __LINE__, uarg);
1541 return -EFAULT;
1542 }
1543 return 0;
1544}
1545
1546/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1547static int
1548mptctl_eventenable (unsigned long arg)
1549{
1550 struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
1551 struct mpt_ioctl_eventenable karg;
1552 MPT_ADAPTER *ioc;
1553 int iocnum;
1554
1555 dctlprintk(("mptctl_eventenable called.\n"));
1556 if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
1557 printk(KERN_ERR "%s@%d::mptctl_eventenable - "
1558 "Unable to read in mpt_ioctl_eventenable struct @ %p\n",
1559 __FILE__, __LINE__, uarg);
1560 return -EFAULT;
1561 }
1562
1563 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
1564 (ioc == NULL)) {
1565 dctlprintk((KERN_ERR "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
1566 __FILE__, __LINE__, iocnum));
1567 return -ENODEV;
1568 }
1569
1570 if (ioc->events == NULL) {
1571 /* Have not yet allocated memory - do so now.
1572 */
1573 int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
1574 ioc->events = kmalloc(sz, GFP_KERNEL);
1575 if (ioc->events == NULL) {
1576 printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
1577 return -ENOMEM;
1578 }
1579 memset(ioc->events, 0, sz);
1580 ioc->alloc_total += sz;
1581
1582 ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE;
1583 ioc->eventContext = 0;
1584 }
1585
1586 /* Update the IOC event logging flag.
1587 */
1588 ioc->eventTypes = karg.eventTypes;
1589
1590 return 0;
1591}
1592
1593/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1594static int
1595mptctl_eventreport (unsigned long arg)
1596{
1597 struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
1598 struct mpt_ioctl_eventreport karg;
1599 MPT_ADAPTER *ioc;
1600 int iocnum;
1601 int numBytes, maxEvents, max;
1602
1603 dctlprintk(("mptctl_eventreport called.\n"));
1604 if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
1605 printk(KERN_ERR "%s@%d::mptctl_eventreport - "
1606 "Unable to read in mpt_ioctl_eventreport struct @ %p\n",
1607 __FILE__, __LINE__, uarg);
1608 return -EFAULT;
1609 }
1610
1611 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
1612 (ioc == NULL)) {
1613 dctlprintk((KERN_ERR "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
1614 __FILE__, __LINE__, iocnum));
1615 return -ENODEV;
1616 }
1617
1618 numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
1619 maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
1620
1621
1622 max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents;
1623
1624 /* If fewer than 1 event is requested, there must have
1625 * been some type of error.
1626 */
1627 if ((max < 1) || !ioc->events)
1628 return -ENODATA;
1629
1630 /* Copy the data from kernel memory to user memory
1631 */
1632 numBytes = max * sizeof(MPT_IOCTL_EVENTS);
1633 if (copy_to_user(uarg->eventData, ioc->events, numBytes)) {
1634 printk(KERN_ERR "%s@%d::mptctl_eventreport - "
1635 "Unable to write out mpt_ioctl_eventreport struct @ %p\n",
1636 __FILE__, __LINE__, ioc->events);
1637 return -EFAULT;
1638 }
1639
1640 return 0;
1641}
1642
1643/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1644static int
1645mptctl_replace_fw (unsigned long arg)
1646{
1647 struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
1648 struct mpt_ioctl_replace_fw karg;
1649 MPT_ADAPTER *ioc;
1650 int iocnum;
1651 int newFwSize;
1652
1653 dctlprintk(("mptctl_replace_fw called.\n"));
1654 if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
1655 printk(KERN_ERR "%s@%d::mptctl_replace_fw - "
1656 "Unable to read in mpt_ioctl_replace_fw struct @ %p\n",
1657 __FILE__, __LINE__, uarg);
1658 return -EFAULT;
1659 }
1660
1661 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
1662 (ioc == NULL)) {
1663 dctlprintk((KERN_ERR "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
1664 __FILE__, __LINE__, iocnum));
1665 return -ENODEV;
1666 }
1667
1668 /* If caching FW, Free the old FW image
1669 */
1670 if (ioc->cached_fw == NULL)
1671 return 0;
1672
1673 mpt_free_fw_memory(ioc);
1674
1675 /* Allocate memory for the new FW image
1676 */
1677 newFwSize = karg.newImageSize;
1678
1679 if (newFwSize & 0x01)
1680 newFwSize += 1;
1681 if (newFwSize & 0x02)
1682 newFwSize += 2;
1683
1684 mpt_alloc_fw_memory(ioc, newFwSize);
1685 if (ioc->cached_fw == NULL)
1686 return -ENOMEM;
1687
1688 /* Copy the data from user memory to kernel space
1689 */
1690 if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) {
1691 printk(KERN_ERR "%s@%d::mptctl_replace_fw - "
1692 "Unable to read in mpt_ioctl_replace_fw image "
1693 "@ %p\n", __FILE__, __LINE__, uarg);
1694 mpt_free_fw_memory(ioc);
1695 return -EFAULT;
1696 }
1697
1698 /* Update IOCFactsReply
1699 */
1700 ioc->facts.FWImageSize = newFwSize;
1701 return 0;
1702}
1703
1704/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1705/* MPT IOCTL MPTCOMMAND function.
1706 * Cast the arg into the mpt_ioctl_mpt_command structure.
1707 *
1708 * Outputs: None.
1709 * Return: 0 if successful
1710 * -EBUSY if previous command timout and IOC reset is not complete.
1711 * -EFAULT if data unavailable
1712 * -ENODEV if no such device/adapter
1713 * -ETIME if timer expires
1714 * -ENOMEM if memory allocation error
1715 */
1716static int
1717mptctl_mpt_command (unsigned long arg)
1718{
1719 struct mpt_ioctl_command __user *uarg = (void __user *) arg;
1720 struct mpt_ioctl_command karg;
1721 MPT_ADAPTER *ioc;
1722 int iocnum;
1723 int rc;
1724
1725 dctlprintk(("mptctl_command called.\n"));
1726
1727 if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) {
1728 printk(KERN_ERR "%s@%d::mptctl_mpt_command - "
1729 "Unable to read in mpt_ioctl_command struct @ %p\n",
1730 __FILE__, __LINE__, uarg);
1731 return -EFAULT;
1732 }
1733
1734 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
1735 (ioc == NULL)) {
1736 dctlprintk((KERN_ERR "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
1737 __FILE__, __LINE__, iocnum));
1738 return -ENODEV;
1739 }
1740
1741 rc = mptctl_do_mpt_command (karg, &uarg->MF);
1742
1743 return rc;
1744}
1745
1746/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1747/* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands.
1748 *
1749 * Outputs: None.
1750 * Return: 0 if successful
1751 * -EBUSY if previous command timout and IOC reset is not complete.
1752 * -EFAULT if data unavailable
1753 * -ENODEV if no such device/adapter
1754 * -ETIME if timer expires
1755 * -ENOMEM if memory allocation error
1756 * -EPERM if SCSI I/O and target is untagged
1757 */
1758static int
1759mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1760{
1761 MPT_ADAPTER *ioc;
1762 MPT_FRAME_HDR *mf = NULL;
1763 MPIHeader_t *hdr;
1764 char *psge;
1765 struct buflist bufIn; /* data In buffer */
1766 struct buflist bufOut; /* data Out buffer */
1767 dma_addr_t dma_addr_in;
1768 dma_addr_t dma_addr_out;
1769 int sgSize = 0; /* Num SG elements */
1770 int iocnum, flagsLength;
1771 int sz, rc = 0;
1772 int msgContext;
1773 u16 req_idx;
1774 ulong timeout;
1775
1776 dctlprintk(("mptctl_do_mpt_command called.\n"));
1777 bufIn.kptr = bufOut.kptr = NULL;
1778
1779 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
1780 (ioc == NULL)) {
1781 dctlprintk((KERN_ERR "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
1782 __FILE__, __LINE__, iocnum));
1783 return -ENODEV;
1784 }
1785 if (!ioc->ioctl) {
1786 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1787 "No memory available during driver init.\n",
1788 __FILE__, __LINE__);
1789 return -ENOMEM;
1790 } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) {
1791 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1792 "Busy with IOC Reset \n", __FILE__, __LINE__);
1793 return -EBUSY;
1794 }
1795
1796 /* Verify that the final request frame will not be too large.
1797 */
1798 sz = karg.dataSgeOffset * 4;
1799 if (karg.dataInSize > 0)
1800 sz += sizeof(dma_addr_t) + sizeof(u32);
1801 if (karg.dataOutSize > 0)
1802 sz += sizeof(dma_addr_t) + sizeof(u32);
1803
1804 if (sz > ioc->req_sz) {
1805 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1806 "Request frame too large (%d) maximum (%d)\n",
1807 __FILE__, __LINE__, sz, ioc->req_sz);
1808 return -EFAULT;
1809 }
1810
1811 /* Get a free request frame and save the message context.
1812 */
1813 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL)
1814 return -EAGAIN;
1815
1816 hdr = (MPIHeader_t *) mf;
1817 msgContext = le32_to_cpu(hdr->MsgContext);
1818 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1819
1820 /* Copy the request frame
1821 * Reset the saved message context.
1822 * Request frame in user space
1823 */
1824 if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) {
1825 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1826 "Unable to read MF from mpt_ioctl_command struct @ %p\n",
1827 __FILE__, __LINE__, mfPtr);
1828 rc = -EFAULT;
1829 goto done_free_mem;
1830 }
1831 hdr->MsgContext = cpu_to_le32(msgContext);
1832
1833
1834 /* Verify that this request is allowed.
1835 */
1836 switch (hdr->Function) {
1837 case MPI_FUNCTION_IOC_FACTS:
1838 case MPI_FUNCTION_PORT_FACTS:
1839 karg.dataOutSize = karg.dataInSize = 0;
1840 break;
1841
1842 case MPI_FUNCTION_CONFIG:
1843 case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND:
1844 case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND:
1845 case MPI_FUNCTION_FW_UPLOAD:
1846 case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1847 case MPI_FUNCTION_FW_DOWNLOAD:
1848 case MPI_FUNCTION_FC_PRIMITIVE_SEND:
1849 break;
1850
1851 case MPI_FUNCTION_SCSI_IO_REQUEST:
1852 if (ioc->sh) {
1853 SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
1854 VirtDevice *pTarget = NULL;
1855 MPT_SCSI_HOST *hd = NULL;
1856 int qtag = MPI_SCSIIO_CONTROL_UNTAGGED;
1857 int scsidir = 0;
1858 int target = (int) pScsiReq->TargetID;
1859 int dataSize;
1860
1861 if ((target < 0) || (target >= ioc->sh->max_id)) {
1862 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1863 "Target ID out of bounds. \n",
1864 __FILE__, __LINE__);
1865 rc = -ENODEV;
1866 goto done_free_mem;
1867 }
1868
1869 pScsiReq->MsgFlags = mpt_msg_flags();
1870
1871 /* verify that app has not requested
1872 * more sense data than driver
1873 * can provide, if so, reset this parameter
1874 * set the sense buffer pointer low address
1875 * update the control field to specify Q type
1876 */
1877 if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE)
1878 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
1879 else
1880 pScsiReq->SenseBufferLength = karg.maxSenseBytes;
1881
1882 pScsiReq->SenseBufferLowAddr =
1883 cpu_to_le32(ioc->sense_buf_low_dma
1884 + (req_idx * MPT_SENSE_BUFFER_ALLOC));
1885
1886 if ((hd = (MPT_SCSI_HOST *) ioc->sh->hostdata)) {
1887 if (hd->Targets)
1888 pTarget = hd->Targets[target];
1889 }
1890
1891 if (pTarget &&(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES))
1892 qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
1893
1894 /* Have the IOCTL driver set the direction based
1895 * on the dataOutSize (ordering issue with Sparc).
1896 */
1897 if (karg.dataOutSize > 0) {
1898 scsidir = MPI_SCSIIO_CONTROL_WRITE;
1899 dataSize = karg.dataOutSize;
1900 } else {
1901 scsidir = MPI_SCSIIO_CONTROL_READ;
1902 dataSize = karg.dataInSize;
1903 }
1904
1905 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
1906 pScsiReq->DataLength = cpu_to_le32(dataSize);
1907
1908 ioc->ioctl->reset = MPTCTL_RESET_OK;
1909 ioc->ioctl->target = target;
1910
1911 } else {
1912 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1913 "SCSI driver is not loaded. \n",
1914 __FILE__, __LINE__);
1915 rc = -EFAULT;
1916 goto done_free_mem;
1917 }
1918 break;
1919
1920 case MPI_FUNCTION_RAID_ACTION:
1921 /* Just add a SGE
1922 */
1923 break;
1924
1925 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1926 if (ioc->sh) {
1927 SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
1928 int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
1929 int scsidir = MPI_SCSIIO_CONTROL_READ;
1930 int dataSize;
1931
1932 pScsiReq->MsgFlags = mpt_msg_flags();
1933
1934 /* verify that app has not requested
1935 * more sense data than driver
1936 * can provide, if so, reset this parameter
1937 * set the sense buffer pointer low address
1938 * update the control field to specify Q type
1939 */
1940 if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE)
1941 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
1942 else
1943 pScsiReq->SenseBufferLength = karg.maxSenseBytes;
1944
1945 pScsiReq->SenseBufferLowAddr =
1946 cpu_to_le32(ioc->sense_buf_low_dma
1947 + (req_idx * MPT_SENSE_BUFFER_ALLOC));
1948
1949 /* All commands to physical devices are tagged
1950 */
1951
1952 /* Have the IOCTL driver set the direction based
1953 * on the dataOutSize (ordering issue with Sparc).
1954 */
1955 if (karg.dataOutSize > 0) {
1956 scsidir = MPI_SCSIIO_CONTROL_WRITE;
1957 dataSize = karg.dataOutSize;
1958 } else {
1959 scsidir = MPI_SCSIIO_CONTROL_READ;
1960 dataSize = karg.dataInSize;
1961 }
1962
1963 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
1964 pScsiReq->DataLength = cpu_to_le32(dataSize);
1965
1966 ioc->ioctl->reset = MPTCTL_RESET_OK;
1967 ioc->ioctl->target = pScsiReq->TargetID;
1968 } else {
1969 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1970 "SCSI driver is not loaded. \n",
1971 __FILE__, __LINE__);
1972 rc = -EFAULT;
1973 goto done_free_mem;
1974 }
1975 break;
1976
1977 case MPI_FUNCTION_SCSI_TASK_MGMT:
1978 {
1979 MPT_SCSI_HOST *hd = NULL;
1980 if ((ioc->sh == NULL) || ((hd = (MPT_SCSI_HOST *)ioc->sh->hostdata) == NULL)) {
1981 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
1982 "SCSI driver not loaded or SCSI host not found. \n",
1983 __FILE__, __LINE__);
1984 rc = -EFAULT;
1985 goto done_free_mem;
1986 } else if (mptctl_set_tm_flags(hd) != 0) {
1987 rc = -EPERM;
1988 goto done_free_mem;
1989 }
1990 }
1991 break;
1992
1993 case MPI_FUNCTION_IOC_INIT:
1994 {
1995 IOCInit_t *pInit = (IOCInit_t *) mf;
1996 u32 high_addr, sense_high;
1997
1998 /* Verify that all entries in the IOC INIT match
1999 * existing setup (and in LE format).
2000 */
2001 if (sizeof(dma_addr_t) == sizeof(u64)) {
2002 high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32));
2003 sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
2004 } else {
2005 high_addr = 0;
2006 sense_high= 0;
2007 }
2008
2009 if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) ||
2010 (pInit->MaxBuses != ioc->facts.MaxBuses) ||
2011 (pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) ||
2012 (pInit->HostMfaHighAddr != high_addr) ||
2013 (pInit->SenseBufferHighAddr != sense_high)) {
2014 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
2015 "IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n",
2016 __FILE__, __LINE__);
2017 rc = -EFAULT;
2018 goto done_free_mem;
2019 }
2020 }
2021 break;
2022 default:
2023 /*
2024 * MPI_FUNCTION_PORT_ENABLE
2025 * MPI_FUNCTION_TARGET_CMD_BUFFER_POST
2026 * MPI_FUNCTION_TARGET_ASSIST
2027 * MPI_FUNCTION_TARGET_STATUS_SEND
2028 * MPI_FUNCTION_TARGET_MODE_ABORT
2029 * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET
2030 * MPI_FUNCTION_IO_UNIT_RESET
2031 * MPI_FUNCTION_HANDSHAKE
2032 * MPI_FUNCTION_REPLY_FRAME_REMOVAL
2033 * MPI_FUNCTION_EVENT_NOTIFICATION
2034 * (driver handles event notification)
2035 * MPI_FUNCTION_EVENT_ACK
2036 */
2037
2038 /* What to do with these??? CHECK ME!!!
2039 MPI_FUNCTION_FC_LINK_SRVC_BUF_POST
2040 MPI_FUNCTION_FC_LINK_SRVC_RSP
2041 MPI_FUNCTION_FC_ABORT
2042 MPI_FUNCTION_LAN_SEND
2043 MPI_FUNCTION_LAN_RECEIVE
2044 MPI_FUNCTION_LAN_RESET
2045 */
2046
2047 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
2048 "Illegal request (function 0x%x) \n",
2049 __FILE__, __LINE__, hdr->Function);
2050 rc = -EFAULT;
2051 goto done_free_mem;
2052 }
2053
2054 /* Add the SGL ( at most one data in SGE and one data out SGE )
2055 * In the case of two SGE's - the data out (write) will always
2056 * preceede the data in (read) SGE. psgList is used to free the
2057 * allocated memory.
2058 */
2059 psge = (char *) (((int *) mf) + karg.dataSgeOffset);
2060 flagsLength = 0;
2061
2062 /* bufIn and bufOut are used for user to kernel space transfers
2063 */
2064 bufIn.kptr = bufOut.kptr = NULL;
2065 bufIn.len = bufOut.len = 0;
2066
2067 if (karg.dataOutSize > 0)
2068 sgSize ++;
2069
2070 if (karg.dataInSize > 0)
2071 sgSize ++;
2072
2073 if (sgSize > 0) {
2074
2075 /* Set up the dataOut memory allocation */
2076 if (karg.dataOutSize > 0) {
2077 if (karg.dataInSize > 0) {
2078 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2079 MPI_SGE_FLAGS_END_OF_BUFFER |
2080 MPI_SGE_FLAGS_DIRECTION |
2081 mpt_addr_size() )
2082 << MPI_SGE_FLAGS_SHIFT;
2083 } else {
2084 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
2085 }
2086 flagsLength |= karg.dataOutSize;
2087 bufOut.len = karg.dataOutSize;
2088 bufOut.kptr = pci_alloc_consistent(
2089 ioc->pcidev, bufOut.len, &dma_addr_out);
2090
2091 if (bufOut.kptr == NULL) {
2092 rc = -ENOMEM;
2093 goto done_free_mem;
2094 } else {
2095 /* Set up this SGE.
2096 * Copy to MF and to sglbuf
2097 */
2098 mpt_add_sge(psge, flagsLength, dma_addr_out);
2099 psge += (sizeof(u32) + sizeof(dma_addr_t));
2100
2101 /* Copy user data to kernel space.
2102 */
2103 if (copy_from_user(bufOut.kptr,
2104 karg.dataOutBufPtr,
2105 bufOut.len)) {
2106 printk(KERN_ERR
2107 "%s@%d::mptctl_do_mpt_command - Unable "
2108 "to read user data "
2109 "struct @ %p\n",
2110 __FILE__, __LINE__,karg.dataOutBufPtr);
2111 rc = -EFAULT;
2112 goto done_free_mem;
2113 }
2114 }
2115 }
2116
2117 if (karg.dataInSize > 0) {
2118 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
2119 flagsLength |= karg.dataInSize;
2120
2121 bufIn.len = karg.dataInSize;
2122 bufIn.kptr = pci_alloc_consistent(ioc->pcidev,
2123 bufIn.len, &dma_addr_in);
2124
2125 if (bufIn.kptr == NULL) {
2126 rc = -ENOMEM;
2127 goto done_free_mem;
2128 } else {
2129 /* Set up this SGE
2130 * Copy to MF and to sglbuf
2131 */
2132 mpt_add_sge(psge, flagsLength, dma_addr_in);
2133 }
2134 }
2135 } else {
2136 /* Add a NULL SGE
2137 */
2138 mpt_add_sge(psge, flagsLength, (dma_addr_t) -1);
2139 }
2140
2141 ioc->ioctl->wait_done = 0;
2142 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
2143
2144 DBG_DUMP_TM_REQUEST_FRAME((u32 *)mf);
2145
2146 if (mpt_send_handshake_request(mptctl_id, ioc,
2147 sizeof(SCSITaskMgmt_t), (u32*)mf,
2148 CAN_SLEEP) != 0) {
2149 dfailprintk((MYIOC_s_ERR_FMT "_send_handshake FAILED!"
2150 " (ioc %p, mf %p) \n", ioc->name,
2151 ioc, mf));
2152 mptctl_free_tm_flags(ioc);
2153 rc = -ENODATA;
2154 goto done_free_mem;
2155 }
2156
2157 } else
2158 mpt_put_msg_frame(mptctl_id, ioc, mf);
2159
2160 /* Now wait for the command to complete */
2161 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
2162 timeout = wait_event_interruptible_timeout(mptctl_wait,
2163 ioc->ioctl->wait_done == 1,
2164 HZ*timeout);
2165
2166 if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) {
2167 /* Now we need to reset the board */
2168
2169 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT)
2170 mptctl_free_tm_flags(ioc);
2171
2172 mptctl_timeout_expired(ioc->ioctl);
2173 rc = -ENODATA;
2174 goto done_free_mem;
2175 }
2176
2177 mf = NULL;
2178
2179 /* If a valid reply frame, copy to the user.
2180 * Offset 2: reply length in U32's
2181 */
2182 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) {
2183 if (karg.maxReplyBytes < ioc->reply_sz) {
2184 sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]);
2185 } else {
2186 sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]);
2187 }
2188
2189 if (sz > 0) {
2190 if (copy_to_user(karg.replyFrameBufPtr,
2191 &ioc->ioctl->ReplyFrame, sz)){
2192 printk(KERN_ERR
2193 "%s@%d::mptctl_do_mpt_command - "
2194 "Unable to write out reply frame %p\n",
2195 __FILE__, __LINE__, karg.replyFrameBufPtr);
2196 rc = -ENODATA;
2197 goto done_free_mem;
2198 }
2199 }
2200 }
2201
2202 /* If valid sense data, copy to user.
2203 */
2204 if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) {
2205 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
2206 if (sz > 0) {
2207 if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) {
2208 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
2209 "Unable to write sense data to user %p\n",
2210 __FILE__, __LINE__,
2211 karg.senseDataPtr);
2212 rc = -ENODATA;
2213 goto done_free_mem;
2214 }
2215 }
2216 }
2217
2218 /* If the overall status is _GOOD and data in, copy data
2219 * to user.
2220 */
2221 if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) &&
2222 (karg.dataInSize > 0) && (bufIn.kptr)) {
2223
2224 if (copy_to_user(karg.dataInBufPtr,
2225 bufIn.kptr, karg.dataInSize)) {
2226 printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
2227 "Unable to write data to user %p\n",
2228 __FILE__, __LINE__,
2229 karg.dataInBufPtr);
2230 rc = -ENODATA;
2231 }
2232 }
2233
2234done_free_mem:
2235
2236 ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD |
2237 MPT_IOCTL_STATUS_SENSE_VALID |
2238 MPT_IOCTL_STATUS_RF_VALID );
2239
2240 /* Free the allocated memory.
2241 */
2242 if (bufOut.kptr != NULL) {
2243 pci_free_consistent(ioc->pcidev,
2244 bufOut.len, (void *) bufOut.kptr, dma_addr_out);
2245 }
2246
2247 if (bufIn.kptr != NULL) {
2248 pci_free_consistent(ioc->pcidev,
2249 bufIn.len, (void *) bufIn.kptr, dma_addr_in);
2250 }
2251
2252 /* mf is null if command issued successfully
2253 * otherwise, failure occured after mf acquired.
2254 */
2255 if (mf)
2256 mpt_free_msg_frame(ioc, mf);
2257
2258 return rc;
2259}
2260
2261/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2262/* Prototype Routine for the HP HOST INFO command.
2263 *
2264 * Outputs: None.
2265 * Return: 0 if successful
2266 * -EFAULT if data unavailable
2267 * -EBUSY if previous command timout and IOC reset is not complete.
2268 * -ENODEV if no such device/adapter
2269 * -ETIME if timer expires
2270 * -ENOMEM if memory allocation error
2271 */
2272static int
2273mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2274{
2275 hp_host_info_t __user *uarg = (void __user *) arg;
2276 MPT_ADAPTER *ioc;
2277 struct pci_dev *pdev;
2278 char *pbuf;
2279 dma_addr_t buf_dma;
2280 hp_host_info_t karg;
2281 CONFIGPARMS cfg;
2282 ConfigPageHeader_t hdr;
2283 int iocnum;
2284 int rc, cim_rev;
2285
2286 dctlprintk((": mptctl_hp_hostinfo called.\n"));
2287 /* Reset long to int. Should affect IA64 and SPARC only
2288 */
2289 if (data_size == sizeof(hp_host_info_t))
2290 cim_rev = 1;
2291 else if (data_size == sizeof(hp_host_info_rev0_t))
2292 cim_rev = 0; /* obsolete */
2293 else
2294 return -EFAULT;
2295
2296 if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) {
2297 printk(KERN_ERR "%s@%d::mptctl_hp_host_info - "
2298 "Unable to read in hp_host_info struct @ %p\n",
2299 __FILE__, __LINE__, uarg);
2300 return -EFAULT;
2301 }
2302
2303 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
2304 (ioc == NULL)) {
2305 dctlprintk((KERN_ERR "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
2306 __FILE__, __LINE__, iocnum));
2307 return -ENODEV;
2308 }
2309
2310 /* Fill in the data and return the structure to the calling
2311 * program
2312 */
2313 pdev = (struct pci_dev *) ioc->pcidev;
2314
2315 karg.vendor = pdev->vendor;
2316 karg.device = pdev->device;
2317 karg.subsystem_id = pdev->subsystem_device;
2318 karg.subsystem_vendor = pdev->subsystem_vendor;
2319 karg.devfn = pdev->devfn;
2320 karg.bus = pdev->bus->number;
2321
2322 /* Save the SCSI host no. if
2323 * SCSI driver loaded
2324 */
2325 if (ioc->sh != NULL)
2326 karg.host_no = ioc->sh->host_no;
2327 else
2328 karg.host_no = -1;
2329
2330 /* Reformat the fw_version into a string
2331 */
2332 karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ?
2333 ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0';
2334 karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0';
2335 karg.fw_version[2] = '.';
2336 karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ?
2337 ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
2338 karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
2339 karg.fw_version[5] = '.';
2340 karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
2341 ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
2342 karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
2343 karg.fw_version[8] = '.';
2344 karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
2345 ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
2346 karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
2347 karg.fw_version[11] = '\0';
2348
2349 /* Issue a config request to get the device serial number
2350 */
2351 hdr.PageVersion = 0;
2352 hdr.PageLength = 0;
2353 hdr.PageNumber = 0;
2354 hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
2355 cfg.hdr = &hdr;
2356 cfg.physAddr = -1;
2357 cfg.pageAddr = 0;
2358 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
2359 cfg.dir = 0; /* read */
2360 cfg.timeout = 10;
2361
2362 strncpy(karg.serial_number, " ", 24);
2363 if (mpt_config(ioc, &cfg) == 0) {
2364 if (cfg.hdr->PageLength > 0) {
2365 /* Issue the second config page request */
2366 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2367
2368 pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
2369 if (pbuf) {
2370 cfg.physAddr = buf_dma;
2371 if (mpt_config(ioc, &cfg) == 0) {
2372 ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
2373 if (strlen(pdata->BoardTracerNumber) > 1) {
2374 strncpy(karg.serial_number, pdata->BoardTracerNumber, 24);
2375 karg.serial_number[24-1]='\0';
2376 }
2377 }
2378 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
2379 pbuf = NULL;
2380 }
2381 }
2382 }
2383 rc = mpt_GetIocState(ioc, 1);
2384 switch (rc) {
2385 case MPI_IOC_STATE_OPERATIONAL:
2386 karg.ioc_status = HP_STATUS_OK;
2387 break;
2388
2389 case MPI_IOC_STATE_FAULT:
2390 karg.ioc_status = HP_STATUS_FAILED;
2391 break;
2392
2393 case MPI_IOC_STATE_RESET:
2394 case MPI_IOC_STATE_READY:
2395 default:
2396 karg.ioc_status = HP_STATUS_OTHER;
2397 break;
2398 }
2399
2400 karg.base_io_addr = pci_resource_start(pdev, 0);
2401
2402 if (ioc->bus_type == FC)
2403 karg.bus_phys_width = HP_BUS_WIDTH_UNK;
2404 else
2405 karg.bus_phys_width = HP_BUS_WIDTH_16;
2406
2407 karg.hard_resets = 0;
2408 karg.soft_resets = 0;
2409 karg.timeouts = 0;
2410 if (ioc->sh != NULL) {
2411 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)ioc->sh->hostdata;
2412
2413 if (hd && (cim_rev == 1)) {
2414 karg.hard_resets = hd->hard_resets;
2415 karg.soft_resets = hd->soft_resets;
2416 karg.timeouts = hd->timeouts;
2417 }
2418 }
2419
2420 cfg.pageAddr = 0;
2421 cfg.action = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
2422 cfg.dir = MPI_TB_ISTWI_FLAGS_READ;
2423 cfg.timeout = 10;
2424 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
2425 if (pbuf) {
2426 cfg.physAddr = buf_dma;
2427 if ((mpt_toolbox(ioc, &cfg)) == 0) {
2428 karg.rsvd = *(u32 *)pbuf;
2429 }
2430 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
2431 pbuf = NULL;
2432 }
2433
2434 /* Copy the data from kernel memory to user memory
2435 */
2436 if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) {
2437 printk(KERN_ERR "%s@%d::mptctl_hpgethostinfo - "
2438 "Unable to write out hp_host_info @ %p\n",
2439 __FILE__, __LINE__, uarg);
2440 return -EFAULT;
2441 }
2442
2443 return 0;
2444
2445}
2446
2447/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2448/* Prototype Routine for the HP TARGET INFO command.
2449 *
2450 * Outputs: None.
2451 * Return: 0 if successful
2452 * -EFAULT if data unavailable
2453 * -EBUSY if previous command timout and IOC reset is not complete.
2454 * -ENODEV if no such device/adapter
2455 * -ETIME if timer expires
2456 * -ENOMEM if memory allocation error
2457 */
2458static int
2459mptctl_hp_targetinfo(unsigned long arg)
2460{
2461 hp_target_info_t __user *uarg = (void __user *) arg;
2462 SCSIDevicePage0_t *pg0_alloc;
2463 SCSIDevicePage3_t *pg3_alloc;
2464 MPT_ADAPTER *ioc;
2465 MPT_SCSI_HOST *hd = NULL;
2466 hp_target_info_t karg;
2467 int iocnum;
2468 int data_sz;
2469 dma_addr_t page_dma;
2470 CONFIGPARMS cfg;
2471 ConfigPageHeader_t hdr;
2472 int tmp, np, rc = 0;
2473
2474 dctlprintk((": mptctl_hp_targetinfo called.\n"));
2475 if (copy_from_user(&karg, uarg, sizeof(hp_target_info_t))) {
2476 printk(KERN_ERR "%s@%d::mptctl_hp_targetinfo - "
2477 "Unable to read in hp_host_targetinfo struct @ %p\n",
2478 __FILE__, __LINE__, uarg);
2479 return -EFAULT;
2480 }
2481
2482 if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
2483 (ioc == NULL)) {
2484 dctlprintk((KERN_ERR "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
2485 __FILE__, __LINE__, iocnum));
2486 return -ENODEV;
2487 }
2488
2489 /* There is nothing to do for FCP parts.
2490 */
2491 if (ioc->bus_type == FC)
2492 return 0;
2493
2494 if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL))
2495 return 0;
2496
2497 if (ioc->sh->host_no != karg.hdr.host)
2498 return -ENODEV;
2499
2500 /* Get the data transfer speeds
2501 */
2502 data_sz = ioc->spi_data.sdp0length * 4;
2503 pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
2504 if (pg0_alloc) {
2505 hdr.PageVersion = ioc->spi_data.sdp0version;
2506 hdr.PageLength = data_sz;
2507 hdr.PageNumber = 0;
2508 hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
2509
2510 cfg.hdr = &hdr;
2511 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2512 cfg.dir = 0;
2513 cfg.timeout = 0;
2514 cfg.physAddr = page_dma;
2515
2516 cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
2517
2518 if ((rc = mpt_config(ioc, &cfg)) == 0) {
2519 np = le32_to_cpu(pg0_alloc->NegotiatedParameters);
2520 karg.negotiated_width = np & MPI_SCSIDEVPAGE0_NP_WIDE ?
2521 HP_BUS_WIDTH_16 : HP_BUS_WIDTH_8;
2522
2523 if (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) {
2524 tmp = (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
2525 if (tmp < 0x09)
2526 karg.negotiated_speed = HP_DEV_SPEED_ULTRA320;
2527 else if (tmp <= 0x09)
2528 karg.negotiated_speed = HP_DEV_SPEED_ULTRA160;
2529 else if (tmp <= 0x0A)
2530 karg.negotiated_speed = HP_DEV_SPEED_ULTRA2;
2531 else if (tmp <= 0x0C)
2532 karg.negotiated_speed = HP_DEV_SPEED_ULTRA;
2533 else if (tmp <= 0x25)
2534 karg.negotiated_speed = HP_DEV_SPEED_FAST;
2535 else
2536 karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
2537 } else
2538 karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
2539 }
2540
2541 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma);
2542 }
2543
2544 /* Set defaults
2545 */
2546 karg.message_rejects = -1;
2547 karg.phase_errors = -1;
2548 karg.parity_errors = -1;
2549 karg.select_timeouts = -1;
2550
2551 /* Get the target error parameters
2552 */
2553 hdr.PageVersion = 0;
2554 hdr.PageLength = 0;
2555 hdr.PageNumber = 3;
2556 hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
2557
2558 cfg.hdr = &hdr;
2559 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
2560 cfg.dir = 0;
2561 cfg.timeout = 0;
2562 cfg.physAddr = -1;
2563 if ((mpt_config(ioc, &cfg) == 0) && (cfg.hdr->PageLength > 0)) {
2564 /* Issue the second config page request */
2565 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2566 data_sz = (int) cfg.hdr->PageLength * 4;
2567 pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
2568 ioc->pcidev, data_sz, &page_dma);
2569 if (pg3_alloc) {
2570 cfg.physAddr = page_dma;
2571 cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
2572 if ((rc = mpt_config(ioc, &cfg)) == 0) {
2573 karg.message_rejects = (u32) le16_to_cpu(pg3_alloc->MsgRejectCount);
2574 karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount);
2575 karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount);
2576 }
2577 pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma);
2578 }
2579 }
2580 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2581 if (hd != NULL)
2582 karg.select_timeouts = hd->sel_timeout[karg.hdr.id];
2583
2584 /* Copy the data from kernel memory to user memory
2585 */
2586 if (copy_to_user((char __user *)arg, &karg, sizeof(hp_target_info_t))) {
2587 printk(KERN_ERR "%s@%d::mptctl_hp_target_info - "
2588 "Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
2589 __FILE__, __LINE__, uarg);
2590 return -EFAULT;
2591 }
2592
2593 return 0;
2594}
2595
2596/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2597
2598static struct file_operations mptctl_fops = {
2599 .owner = THIS_MODULE,
2600 .llseek = no_llseek,
2601 .unlocked_ioctl = mptctl_ioctl,
2602#ifdef CONFIG_COMPAT
2603 .compat_ioctl = compat_mpctl_ioctl,
2604#endif
2605};
2606
2607static struct miscdevice mptctl_miscdev = {
2608 MPT_MINOR,
2609 MYNAM,
2610 &mptctl_fops
2611};
2612
2613/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2614
2615#ifdef CONFIG_COMPAT
2616
2617#include <linux/ioctl32.h>
2618
2619static int
2620compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
2621 unsigned long arg)
2622{
2623 struct mpt_fw_xfer32 kfw32;
2624 struct mpt_fw_xfer kfw;
2625 MPT_ADAPTER *iocp = NULL;
2626 int iocnum, iocnumX;
2627 int nonblock = (filp->f_flags & O_NONBLOCK);
2628 int ret;
2629
2630 dctlprintk((KERN_INFO MYNAM "::compat_mptfwxfer_ioctl() called\n"));
2631
2632 if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32)))
2633 return -EFAULT;
2634
2635 /* Verify intended MPT adapter */
2636 iocnumX = kfw32.iocnum & 0xFF;
2637 if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
2638 (iocp == NULL)) {
2639 dctlprintk((KERN_ERR MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n",
2640 __LINE__, iocnumX));
2641 return -ENODEV;
2642 }
2643
2644 if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
2645 return ret;
2646
2647 kfw.iocnum = iocnum;
2648 kfw.fwlen = kfw32.fwlen;
2649 kfw.bufp = compat_ptr(kfw32.bufp);
2650
2651 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
2652
2653 up(&iocp->ioctl->sem_ioc);
2654
2655 return ret;
2656}
2657
2658static int
2659compat_mpt_command(struct file *filp, unsigned int cmd,
2660 unsigned long arg)
2661{
2662 struct mpt_ioctl_command32 karg32;
2663 struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg;
2664 struct mpt_ioctl_command karg;
2665 MPT_ADAPTER *iocp = NULL;
2666 int iocnum, iocnumX;
2667 int nonblock = (filp->f_flags & O_NONBLOCK);
2668 int ret;
2669
2670 dctlprintk((KERN_INFO MYNAM "::compat_mpt_command() called\n"));
2671
2672 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32)))
2673 return -EFAULT;
2674
2675 /* Verify intended MPT adapter */
2676 iocnumX = karg32.hdr.iocnum & 0xFF;
2677 if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
2678 (iocp == NULL)) {
2679 dctlprintk((KERN_ERR MYNAM "::compat_mpt_command @%d - ioc%d not found!\n",
2680 __LINE__, iocnumX));
2681 return -ENODEV;
2682 }
2683
2684 if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
2685 return ret;
2686
2687 /* Copy data to karg */
2688 karg.hdr.iocnum = karg32.hdr.iocnum;
2689 karg.hdr.port = karg32.hdr.port;
2690 karg.timeout = karg32.timeout;
2691 karg.maxReplyBytes = karg32.maxReplyBytes;
2692
2693 karg.dataInSize = karg32.dataInSize;
2694 karg.dataOutSize = karg32.dataOutSize;
2695 karg.maxSenseBytes = karg32.maxSenseBytes;
2696 karg.dataSgeOffset = karg32.dataSgeOffset;
2697
2698 karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr;
2699 karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr;
2700 karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr;
2701 karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr;
2702
2703 /* Pass new structure to do_mpt_command
2704 */
2705 ret = mptctl_do_mpt_command (karg, &uarg->MF);
2706
2707 up(&iocp->ioctl->sem_ioc);
2708
2709 return ret;
2710}
2711
2712static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2713{
2714 long ret;
2715 lock_kernel();
2716 switch (cmd) {
2717 case MPTIOCINFO:
2718 case MPTIOCINFO1:
2719 case MPTIOCINFO2:
2720 case MPTTARGETINFO:
2721 case MPTEVENTQUERY:
2722 case MPTEVENTENABLE:
2723 case MPTEVENTREPORT:
2724 case MPTHARDRESET:
2725 case HP_GETHOSTINFO:
2726 case HP_GETTARGETINFO:
2727 case MPTTEST:
2728 ret = __mptctl_ioctl(f, cmd, arg);
2729 break;
2730 case MPTCOMMAND32:
2731 ret = compat_mpt_command(f, cmd, arg);
2732 break;
2733 case MPTFWDOWNLOAD32:
2734 ret = compat_mptfwxfer_ioctl(f, cmd, arg);
2735 break;
2736 default:
2737 ret = -ENOIOCTLCMD;
2738 break;
2739 }
2740 unlock_kernel();
2741 return ret;
2742}
2743
2744#endif
2745
2746
2747/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2748/*
2749 * mptctl_probe - Installs ioctl devices per bus.
2750 * @pdev: Pointer to pci_dev structure
2751 *
2752 * Returns 0 for success, non-zero for failure.
2753 *
2754 */
2755
2756static int
2757mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2758{
2759 int err;
2760 int sz;
2761 u8 *mem;
2762 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2763
2764 /*
2765 * Allocate and inite a MPT_IOCTL structure
2766 */
2767 sz = sizeof (MPT_IOCTL);
2768 mem = kmalloc(sz, GFP_KERNEL);
2769 if (mem == NULL) {
2770 err = -ENOMEM;
2771 goto out_fail;
2772 }
2773
2774 memset(mem, 0, sz);
2775 ioc->ioctl = (MPT_IOCTL *) mem;
2776 ioc->ioctl->ioc = ioc;
2777 sema_init(&ioc->ioctl->sem_ioc, 1);
2778 return 0;
2779
2780out_fail:
2781
2782 mptctl_remove(pdev);
2783 return err;
2784}
2785
2786/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2787/*
2788 * mptctl_remove - Removed ioctl devices
2789 * @pdev: Pointer to pci_dev structure
2790 *
2791 *
2792 */
2793static void
2794mptctl_remove(struct pci_dev *pdev)
2795{
2796 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2797
2798 kfree ( ioc->ioctl );
2799}
2800
2801static struct mpt_pci_driver mptctl_driver = {
2802 .probe = mptctl_probe,
2803 .remove = mptctl_remove,
2804};
2805
2806/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2807static int __init mptctl_init(void)
2808{
2809 int err;
2810 int where = 1;
2811
2812 show_mptmod_ver(my_NAME, my_VERSION);
2813
2814 if(mpt_device_driver_register(&mptctl_driver,
2815 MPTCTL_DRIVER) != 0 ) {
2816 dprintk((KERN_INFO MYNAM
2817 ": failed to register dd callbacks\n"));
2818 }
2819
2820 /* Register this device */
2821 err = misc_register(&mptctl_miscdev);
2822 if (err < 0) {
2823 printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR);
2824 goto out_fail;
2825 }
2826 printk(KERN_INFO MYNAM ": Registered with Fusion MPT base driver\n");
2827 printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n",
2828 mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
2829
2830 /*
2831 * Install our handler
2832 */
2833 ++where;
2834 if ((mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER)) < 0) {
2835 printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
2836 misc_deregister(&mptctl_miscdev);
2837 err = -EBUSY;
2838 goto out_fail;
2839 }
2840
2841 if (mpt_reset_register(mptctl_id, mptctl_ioc_reset) == 0) {
2842 dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
2843 } else {
2844 /* FIXME! */
2845 }
2846
2847 return 0;
2848
2849out_fail:
2850
2851 mpt_device_driver_deregister(MPTCTL_DRIVER);
2852
2853 return err;
2854}
2855
2856/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2857static void mptctl_exit(void)
2858{
2859 misc_deregister(&mptctl_miscdev);
2860 printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
2861 mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
2862
2863 /* De-register reset handler from base module */
2864 mpt_reset_deregister(mptctl_id);
2865 dprintk((KERN_INFO MYNAM ": Deregistered for IOC reset notifications\n"));
2866
2867 /* De-register callback handler from base module */
2868 mpt_deregister(mptctl_id);
2869 printk(KERN_INFO MYNAM ": Deregistered from Fusion MPT base driver\n");
2870
2871 mpt_device_driver_deregister(MPTCTL_DRIVER);
2872
2873}
2874
2875/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2876
2877module_init(mptctl_init);
2878module_exit(mptctl_exit);
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
new file mode 100644
index 000000000000..cc4ecf0382df
--- /dev/null
+++ b/drivers/message/fusion/mptctl.h
@@ -0,0 +1,484 @@
1/*
2 * linux/drivers/message/fusion/mptioctl.h
3 * Fusion MPT misc device (ioctl) driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7 *
8 * Credits:
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
11 *
12 * A huge debt of gratitude is owed to David S. Miller (DaveM)
13 * for fixing much of the stupid and broken stuff in the early
14 * driver while porting to sparc64 platform. THANK YOU!
15 *
16 * (see also mptbase.c)
17 *
18 * Copyright (c) 1999-2004 LSI Logic Corporation
19 * Originally By: Steven J. Ralston
20 * (mailto:sjralston1@netscape.net)
21 * (mailto:mpt_linux_developer@lsil.com)
22 *
23 * $Id: mptctl.h,v 1.13 2002/12/03 21:26:33 pdelaney Exp $
24 */
25/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
26/*
27 This program is free software; you can redistribute it and/or modify
28 it under the terms of the GNU General Public License as published by
29 the Free Software Foundation; version 2 of the License.
30
31 This program is distributed in the hope that it will be useful,
32 but WITHOUT ANY WARRANTY; without even the implied warranty of
33 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 GNU General Public License for more details.
35
36 NO WARRANTY
37 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
38 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
39 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
40 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
41 solely responsible for determining the appropriateness of using and
42 distributing the Program and assumes all risks associated with its
43 exercise of rights under this Agreement, including but not limited to
44 the risks and costs of program errors, damage to or loss of data,
45 programs or equipment, and unavailability or interruption of operations.
46
47 DISCLAIMER OF LIABILITY
48 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
49 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
51 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
52 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
53 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
54 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
55
56 You should have received a copy of the GNU General Public License
57 along with this program; if not, write to the Free Software
58 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
59*/
60
61#ifndef MPTCTL_H_INCLUDED
62#define MPTCTL_H_INCLUDED
63/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
64
65#include "linux/version.h"
66
67
68/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69/*
70 *
71 */
72#define MPT_MISCDEV_BASENAME "mptctl"
73#define MPT_MISCDEV_PATHNAME "/dev/" MPT_MISCDEV_BASENAME
74
75#define MPT_PRODUCT_LENGTH 12
76
77/*
78 * Generic MPT Control IOCTLs and structures
79 */
80#define MPT_MAGIC_NUMBER 'm'
81
82#define MPTRWPERF _IOWR(MPT_MAGIC_NUMBER,0,struct mpt_raw_r_w)
83
84#define MPTFWDOWNLOAD _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer)
85#define MPTCOMMAND _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command)
86
87#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
88#define MPTFWDOWNLOAD32 _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer32)
89#define MPTCOMMAND32 _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command32)
90#endif
91
92#define MPTIOCINFO _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo)
93#define MPTIOCINFO1 _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo_rev0)
94#define MPTIOCINFO2 _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo_rev1)
95#define MPTTARGETINFO _IOWR(MPT_MAGIC_NUMBER,18,struct mpt_ioctl_targetinfo)
96#define MPTTEST _IOWR(MPT_MAGIC_NUMBER,19,struct mpt_ioctl_test)
97#define MPTEVENTQUERY _IOWR(MPT_MAGIC_NUMBER,21,struct mpt_ioctl_eventquery)
98#define MPTEVENTENABLE _IOWR(MPT_MAGIC_NUMBER,22,struct mpt_ioctl_eventenable)
99#define MPTEVENTREPORT _IOWR(MPT_MAGIC_NUMBER,23,struct mpt_ioctl_eventreport)
100#define MPTHARDRESET _IOWR(MPT_MAGIC_NUMBER,24,struct mpt_ioctl_diag_reset)
101#define MPTFWREPLACE _IOWR(MPT_MAGIC_NUMBER,25,struct mpt_ioctl_replace_fw)
102
103/*
104 * SPARC PLATFORM REMARKS:
105 * IOCTL data structures that contain pointers
106 * will have different sizes in the driver and applications
107 * (as the app. will not use 8-byte pointers).
108 * Apps should use MPTFWDOWNLOAD and MPTCOMMAND.
109 * The driver will convert data from
110 * mpt_fw_xfer32 (mpt_ioctl_command32) to mpt_fw_xfer (mpt_ioctl_command)
111 * internally.
112 *
113 * If data structures change size, must handle as in IOCGETINFO.
114 */
115struct mpt_fw_xfer {
116 unsigned int iocnum; /* IOC unit number */
117 unsigned int fwlen;
118 void __user *bufp; /* Pointer to firmware buffer */
119};
120
121#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
122struct mpt_fw_xfer32 {
123 unsigned int iocnum;
124 unsigned int fwlen;
125 u32 bufp;
126};
127#endif /*}*/
128
129/*
130 * IOCTL header structure.
131 * iocnum - must be defined.
132 * port - must be defined for all IOCTL commands other than MPTIOCINFO
133 * maxDataSize - ignored on MPTCOMMAND commands
134 * - ignored on MPTFWREPLACE commands
135 * - on query commands, reports the maximum number of bytes to be returned
136 * to the host driver (count includes the header).
137 * That is, set to sizeof(struct mpt_ioctl_iocinfo) for fixed sized commands.
138 * Set to sizeof(struct mpt_ioctl_targetinfo) + datasize for variable
139 * sized commands. (MPTTARGETINFO, MPTEVENTREPORT)
140 */
141typedef struct _mpt_ioctl_header {
142 unsigned int iocnum; /* IOC unit number */
143 unsigned int port; /* IOC port number */
144 int maxDataSize; /* Maximum Num. bytes to transfer on read */
145} mpt_ioctl_header;
146
147/*
148 * Issue a diagnostic reset
149 */
150struct mpt_ioctl_diag_reset {
151 mpt_ioctl_header hdr;
152};
153
154
155/*
156 * PCI bus/device/function information structure.
157 */
158struct mpt_ioctl_pci_info {
159 union {
160 struct {
161 unsigned int deviceNumber : 5;
162 unsigned int functionNumber : 3;
163 unsigned int busNumber : 24;
164 } bits;
165 unsigned int asUlong;
166 } u;
167};
168
169struct mpt_ioctl_pci_info2 {
170 union {
171 struct {
172 unsigned int deviceNumber : 5;
173 unsigned int functionNumber : 3;
174 unsigned int busNumber : 24;
175 } bits;
176 unsigned int asUlong;
177 } u;
178 int segmentID;
179};
180
181/*
182 * Adapter Information Page
183 * Read only.
184 * Data starts at offset 0xC
185 */
186#define MPT_IOCTL_INTERFACE_FC (0x01)
187#define MPT_IOCTL_INTERFACE_SCSI (0x00)
188#define MPT_IOCTL_VERSION_LENGTH (32)
189
190struct mpt_ioctl_iocinfo {
191 mpt_ioctl_header hdr;
192 int adapterType; /* SCSI or FCP */
193 int port; /* port number */
194 int pciId; /* PCI Id. */
195 int hwRev; /* hardware revision */
196 int subSystemDevice; /* PCI subsystem Device ID */
197 int subSystemVendor; /* PCI subsystem Vendor ID */
198 int numDevices; /* number of devices */
199 int FWVersion; /* FW Version (integer) */
200 int BIOSVersion; /* BIOS Version (integer) */
201 char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */
202 char busChangeEvent;
203 char hostId;
204 char rsvd[2];
205 struct mpt_ioctl_pci_info2 pciInfo; /* Added Rev 2 */
206};
207
208struct mpt_ioctl_iocinfo_rev1 {
209 mpt_ioctl_header hdr;
210 int adapterType; /* SCSI or FCP */
211 int port; /* port number */
212 int pciId; /* PCI Id. */
213 int hwRev; /* hardware revision */
214 int subSystemDevice; /* PCI subsystem Device ID */
215 int subSystemVendor; /* PCI subsystem Vendor ID */
216 int numDevices; /* number of devices */
217 int FWVersion; /* FW Version (integer) */
218 int BIOSVersion; /* BIOS Version (integer) */
219 char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */
220 char busChangeEvent;
221 char hostId;
222 char rsvd[2];
223 struct mpt_ioctl_pci_info pciInfo; /* Added Rev 1 */
224};
225
226/* Original structure, must always accept these
227 * IOCTLs. 4 byte pads can occur based on arch with
228 * above structure. Wish to re-align, but cannot.
229 */
230struct mpt_ioctl_iocinfo_rev0 {
231 mpt_ioctl_header hdr;
232 int adapterType; /* SCSI or FCP */
233 int port; /* port number */
234 int pciId; /* PCI Id. */
235 int hwRev; /* hardware revision */
236 int subSystemDevice; /* PCI subsystem Device ID */
237 int subSystemVendor; /* PCI subsystem Vendor ID */
238 int numDevices; /* number of devices */
239 int FWVersion; /* FW Version (integer) */
240 int BIOSVersion; /* BIOS Version (integer) */
241 char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */
242 char busChangeEvent;
243 char hostId;
244 char rsvd[2];
245};
246
247/*
248 * Device Information Page
249 * Report the number of, and ids of, all targets
250 * on this IOC. The ids array is a packed structure
251 * of the known targetInfo.
252 * bits 31-24: reserved
253 * 23-16: LUN
254 * 15- 8: Bus Number
255 * 7- 0: Target ID
256 */
257struct mpt_ioctl_targetinfo {
258 mpt_ioctl_header hdr;
259 int numDevices; /* Num targets on this ioc */
260 int targetInfo[1];
261};
262
263
264/*
265 * Event reporting IOCTL's. These IOCTL's will
266 * use the following defines:
267 */
268struct mpt_ioctl_eventquery {
269 mpt_ioctl_header hdr;
270 unsigned short eventEntries;
271 unsigned short reserved;
272 unsigned int eventTypes;
273};
274
275struct mpt_ioctl_eventenable {
276 mpt_ioctl_header hdr;
277 unsigned int eventTypes;
278};
279
280#ifndef __KERNEL__
281typedef struct {
282 uint event;
283 uint eventContext;
284 uint data[2];
285} MPT_IOCTL_EVENTS;
286#endif
287
288struct mpt_ioctl_eventreport {
289 mpt_ioctl_header hdr;
290 MPT_IOCTL_EVENTS eventData[1];
291};
292
293#define MPT_MAX_NAME 32
294struct mpt_ioctl_test {
295 mpt_ioctl_header hdr;
296 u8 name[MPT_MAX_NAME];
297 int chip_type;
298 u8 product [MPT_PRODUCT_LENGTH];
299};
300
301/* Replace the FW image cached in host driver memory
302 * newImageSize - image size in bytes
303 * newImage - first byte of the new image
304 */
305typedef struct mpt_ioctl_replace_fw {
306 mpt_ioctl_header hdr;
307 int newImageSize;
308 u8 newImage[1];
309} mpt_ioctl_replace_fw_t;
310
311/* General MPT Pass through data strucutre
312 *
313 * iocnum
314 * timeout - in seconds, command timeout. If 0, set by driver to
315 * default value.
316 * replyFrameBufPtr - reply location
317 * dataInBufPtr - destination for read
318 * dataOutBufPtr - data source for write
319 * senseDataPtr - sense data location
320 * maxReplyBytes - maximum number of reply bytes to be sent to app.
321 * dataInSize - num bytes for data transfer in (read)
322 * dataOutSize - num bytes for data transfer out (write)
323 * dataSgeOffset - offset in words from the start of the request message
324 * to the first SGL
325 * MF[1];
326 *
327 * Remark: Some config pages have bi-directional transfer,
328 * both a read and a write. The basic structure allows for
329 * a bidirectional set up. Normal messages will have one or
330 * both of these buffers NULL.
331 */
332struct mpt_ioctl_command {
333 mpt_ioctl_header hdr;
334 int timeout; /* optional (seconds) */
335 char __user *replyFrameBufPtr;
336 char __user *dataInBufPtr;
337 char __user *dataOutBufPtr;
338 char __user *senseDataPtr;
339 int maxReplyBytes;
340 int dataInSize;
341 int dataOutSize;
342 int maxSenseBytes;
343 int dataSgeOffset;
344 char MF[1];
345};
346
347/*
348 * SPARC PLATFORM: See earlier remark.
349 */
350#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
351struct mpt_ioctl_command32 {
352 mpt_ioctl_header hdr;
353 int timeout;
354 u32 replyFrameBufPtr;
355 u32 dataInBufPtr;
356 u32 dataOutBufPtr;
357 u32 senseDataPtr;
358 int maxReplyBytes;
359 int dataInSize;
360 int dataOutSize;
361 int maxSenseBytes;
362 int dataSgeOffset;
363 char MF[1];
364};
365#endif /*}*/
366
367
368/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
369/*
370 * HP Specific IOCTL Defines and Structures
371 */
372
373#define CPQFCTS_IOC_MAGIC 'Z'
374#define HP_IOC_MAGIC 'Z'
375#define HP_GETHOSTINFO _IOR(HP_IOC_MAGIC, 20, hp_host_info_t)
376#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t)
377#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t)
378
379/* All HP IOCTLs must include this header
380 */
381typedef struct _hp_header {
382 unsigned int iocnum;
383 unsigned int host;
384 unsigned int channel;
385 unsigned int id;
386 unsigned int lun;
387} hp_header_t;
388
389/*
390 * Header:
391 * iocnum required (input)
392 * host ignored
393 * channe ignored
394 * id ignored
395 * lun ignored
396 */
397typedef struct _hp_host_info {
398 hp_header_t hdr;
399 u16 vendor;
400 u16 device;
401 u16 subsystem_vendor;
402 u16 subsystem_id;
403 u8 devfn;
404 u8 bus;
405 ushort host_no; /* SCSI Host number, if scsi driver not loaded*/
406 u8 fw_version[16]; /* string */
407 u8 serial_number[24]; /* string */
408 u32 ioc_status;
409 u32 bus_phys_width;
410 u32 base_io_addr;
411 u32 rsvd;
412 unsigned int hard_resets; /* driver initiated resets */
413 unsigned int soft_resets; /* ioc, external resets */
414 unsigned int timeouts; /* num timeouts */
415} hp_host_info_t;
416
417/* replace ulongs with uints, need to preserve backwards
418 * compatibility.
419 */
420typedef struct _hp_host_info_rev0 {
421 hp_header_t hdr;
422 u16 vendor;
423 u16 device;
424 u16 subsystem_vendor;
425 u16 subsystem_id;
426 u8 devfn;
427 u8 bus;
428 ushort host_no; /* SCSI Host number, if scsi driver not loaded*/
429 u8 fw_version[16]; /* string */
430 u8 serial_number[24]; /* string */
431 u32 ioc_status;
432 u32 bus_phys_width;
433 u32 base_io_addr;
434 u32 rsvd;
435 unsigned long hard_resets; /* driver initiated resets */
436 unsigned long soft_resets; /* ioc, external resets */
437 unsigned long timeouts; /* num timeouts */
438} hp_host_info_rev0_t;
439
440/*
441 * Header:
442 * iocnum required (input)
443 * host required
444 * channel required (bus number)
445 * id required
446 * lun ignored
447 *
448 * All error values between 0 and 0xFFFF in size.
449 */
450typedef struct _hp_target_info {
451 hp_header_t hdr;
452 u32 parity_errors;
453 u32 phase_errors;
454 u32 select_timeouts;
455 u32 message_rejects;
456 u32 negotiated_speed;
457 u8 negotiated_width;
458 u8 rsvd[7]; /* 8 byte alignment */
459} hp_target_info_t;
460
461#define HP_STATUS_OTHER 1
462#define HP_STATUS_OK 2
463#define HP_STATUS_FAILED 3
464
465#define HP_BUS_WIDTH_UNK 1
466#define HP_BUS_WIDTH_8 2
467#define HP_BUS_WIDTH_16 3
468#define HP_BUS_WIDTH_32 4
469
470#define HP_DEV_SPEED_ASYNC 2
471#define HP_DEV_SPEED_FAST 3
472#define HP_DEV_SPEED_ULTRA 4
473#define HP_DEV_SPEED_ULTRA2 5
474#define HP_DEV_SPEED_ULTRA160 6
475#define HP_DEV_SPEED_SCSI1 7
476#define HP_DEV_SPEED_ULTRA320 8
477
478/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
479
480
481/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
482
483#endif
484
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
new file mode 100644
index 000000000000..ef2713b93fab
--- /dev/null
+++ b/drivers/message/fusion/mptlan.c
@@ -0,0 +1,1688 @@
1/*
2 * linux/drivers/message/fusion/mptlan.c
3 * IP Over Fibre Channel device driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7 *
8 * Credits:
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
11 *
12 * Special thanks goes to the I2O LAN driver people at the
13 * University of Helsinki, who, unbeknownst to them, provided
14 * the inspiration and initial structure for this driver.
15 *
16 * A huge debt of gratitude is owed to David S. Miller (DaveM)
17 * for fixing much of the stupid and broken stuff in the early
18 * driver while porting to sparc64 platform. THANK YOU!
19 *
20 * A really huge debt of gratitude is owed to Eddie C. Dost
21 * for gobs of hard work fixing and optimizing LAN code.
22 * THANK YOU!
23 *
24 * (see also mptbase.c)
25 *
26 * Copyright (c) 2000-2004 LSI Logic Corporation
27 * Originally By: Noah Romer
28 * (mailto:mpt_linux_developer@lsil.com)
29 *
30 * $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $
31 */
32/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
33/*
34 This program is free software; you can redistribute it and/or modify
35 it under the terms of the GNU General Public License as published by
36 the Free Software Foundation; version 2 of the License.
37
38 This program is distributed in the hope that it will be useful,
39 but WITHOUT ANY WARRANTY; without even the implied warranty of
40 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 GNU General Public License for more details.
42
43 NO WARRANTY
44 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
45 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
46 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
47 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
48 solely responsible for determining the appropriateness of using and
49 distributing the Program and assumes all risks associated with its
50 exercise of rights under this Agreement, including but not limited to
51 the risks and costs of program errors, damage to or loss of data,
52 programs or equipment, and unavailability or interruption of operations.
53
54 DISCLAIMER OF LIABILITY
55 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
58 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
59 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
60 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
61 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
62
63 You should have received a copy of the GNU General Public License
64 along with this program; if not, write to the Free Software
65 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
66*/
67
68/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69/*
70 * Define statements used for debugging
71 */
72//#define MPT_LAN_IO_DEBUG
73
74/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
75
76#include "mptlan.h"
77#include <linux/init.h>
78#include <linux/module.h>
79#include <linux/fs.h>
80
81#define MYNAM "mptlan"
82
83MODULE_LICENSE("GPL");
84
85/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
86/*
87 * MPT LAN message sizes without variable part.
88 */
89#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
90 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
91
92#define MPT_LAN_TRANSACTION32_SIZE \
93 (sizeof(SGETransaction32_t) - sizeof(u32))
94
95/*
96 * Fusion MPT LAN private structures
97 */
98
99struct NAA_Hosed {
100 u16 NAA;
101 u8 ieee[FC_ALEN];
102 struct NAA_Hosed *next;
103};
104
105struct BufferControl {
106 struct sk_buff *skb;
107 dma_addr_t dma;
108 unsigned int len;
109};
110
111struct mpt_lan_priv {
112 MPT_ADAPTER *mpt_dev;
113 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
114
115 atomic_t buckets_out; /* number of unused buckets on IOC */
116 int bucketthresh; /* Send more when this many left */
117
118 int *mpt_txfidx; /* Free Tx Context list */
119 int mpt_txfidx_tail;
120 spinlock_t txfidx_lock;
121
122 int *mpt_rxfidx; /* Free Rx Context list */
123 int mpt_rxfidx_tail;
124 spinlock_t rxfidx_lock;
125
126 struct BufferControl *RcvCtl; /* Receive BufferControl structs */
127 struct BufferControl *SendCtl; /* Send BufferControl structs */
128
129 int max_buckets_out; /* Max buckets to send to IOC */
130 int tx_max_out; /* IOC's Tx queue len */
131
132 u32 total_posted;
133 u32 total_received;
134 struct net_device_stats stats; /* Per device statistics */
135
136 struct work_struct post_buckets_task;
137 unsigned long post_buckets_active;
138};
139
140struct mpt_lan_ohdr {
141 u16 dtype;
142 u8 daddr[FC_ALEN];
143 u16 stype;
144 u8 saddr[FC_ALEN];
145};
146
147/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
148
149/*
150 * Forward protos...
151 */
152static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
153 MPT_FRAME_HDR *reply);
154static int mpt_lan_open(struct net_device *dev);
155static int mpt_lan_reset(struct net_device *dev);
156static int mpt_lan_close(struct net_device *dev);
157static void mpt_lan_post_receive_buckets(void *dev_id);
158static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
159 int priority);
160static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
161static int mpt_lan_receive_post_reply(struct net_device *dev,
162 LANReceivePostReply_t *pRecvRep);
163static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
164static int mpt_lan_send_reply(struct net_device *dev,
165 LANSendReply_t *pSendRep);
166static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
167static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
168static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
169 struct net_device *dev);
170
171/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
172/*
173 * Fusion MPT LAN private data
174 */
175static int LanCtx = -1;
176
177static u32 max_buckets_out = 127;
178static u32 tx_max_out_p = 127 - 16;
179
180#ifdef QLOGIC_NAA_WORKAROUND
181static struct NAA_Hosed *mpt_bad_naa = NULL;
182DEFINE_RWLOCK(bad_naa_lock);
183#endif
184
185/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
186/*
187 * Fusion MPT LAN external data
188 */
189extern int mpt_lan_index;
190
191/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
192/**
193 * lan_reply - Handle all data sent from the hardware.
194 * @ioc: Pointer to MPT_ADAPTER structure
195 * @mf: Pointer to original MPT request frame (NULL if TurboReply)
196 * @reply: Pointer to MPT reply frame
197 *
198 * Returns 1 indicating original alloc'd request frame ptr
199 * should be freed, or 0 if it shouldn't.
200 */
201static int
202lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
203{
204 struct net_device *dev = ioc->netdev;
205 int FreeReqFrame = 0;
206
207 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
208 IOC_AND_NETDEV_NAMES_s_s(dev)));
209
210// dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
211// mf, reply));
212
213 if (mf == NULL) {
214 u32 tmsg = CAST_PTR_TO_U32(reply);
215
216 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
217 IOC_AND_NETDEV_NAMES_s_s(dev),
218 tmsg));
219
220 switch (GET_LAN_FORM(tmsg)) {
221
222 // NOTE! (Optimization) First case here is now caught in
223 // mptbase.c::mpt_interrupt() routine and callcack here
224 // is now skipped for this case! 20001218 -sralston
225#if 0
226 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
227// dioprintk((KERN_INFO MYNAM "/lan_reply: "
228// "MessageContext turbo reply received\n"));
229 FreeReqFrame = 1;
230 break;
231#endif
232
233 case LAN_REPLY_FORM_SEND_SINGLE:
234// dioprintk((MYNAM "/lan_reply: "
235// "calling mpt_lan_send_reply (turbo)\n"));
236
237 // Potential BUG here? -sralston
238 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
239 // If/when mpt_lan_send_turbo would return 1 here,
240 // calling routine (mptbase.c|mpt_interrupt)
241 // would Oops because mf has already been set
242 // to NULL. So after return from this func,
243 // mpt_interrupt() will attempt to put (NULL) mf ptr
244 // item back onto its adapter FreeQ - Oops!:-(
245 // It's Ok, since mpt_lan_send_turbo() *currently*
246 // always returns 0, but..., just in case:
247
248 (void) mpt_lan_send_turbo(dev, tmsg);
249 FreeReqFrame = 0;
250
251 break;
252
253 case LAN_REPLY_FORM_RECEIVE_SINGLE:
254// dioprintk((KERN_INFO MYNAM "@lan_reply: "
255// "rcv-Turbo = %08x\n", tmsg));
256 mpt_lan_receive_post_turbo(dev, tmsg);
257 break;
258
259 default:
260 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
261 "that I don't know what to do with\n");
262
263 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
264
265 break;
266 }
267
268 return FreeReqFrame;
269 }
270
271// msg = (u32 *) reply;
272// dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
273// le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
274// le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
275// dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
276// reply->u.hdr.Function));
277
278 switch (reply->u.hdr.Function) {
279
280 case MPI_FUNCTION_LAN_SEND:
281 {
282 LANSendReply_t *pSendRep;
283
284 pSendRep = (LANSendReply_t *) reply;
285 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
286 break;
287 }
288
289 case MPI_FUNCTION_LAN_RECEIVE:
290 {
291 LANReceivePostReply_t *pRecvRep;
292
293 pRecvRep = (LANReceivePostReply_t *) reply;
294 if (pRecvRep->NumberOfContexts) {
295 mpt_lan_receive_post_reply(dev, pRecvRep);
296 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
297 FreeReqFrame = 1;
298 } else
299 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
300 "ReceivePostReply received.\n"));
301 break;
302 }
303
304 case MPI_FUNCTION_LAN_RESET:
305 /* Just a default reply. Might want to check it to
306 * make sure that everything went ok.
307 */
308 FreeReqFrame = 1;
309 break;
310
311 case MPI_FUNCTION_EVENT_NOTIFICATION:
312 case MPI_FUNCTION_EVENT_ACK:
313 /* UPDATE! 20010120 -sralston
314 * _EVENT_NOTIFICATION should NOT come down this path any more.
315 * Should be routed to mpt_lan_event_process(), but just in case...
316 */
317 FreeReqFrame = 1;
318 break;
319
320 default:
321 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
322 "reply that I don't know what to do with\n");
323
324 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
325 FreeReqFrame = 1;
326
327 break;
328 }
329
330 return FreeReqFrame;
331}
332
333/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
334static int
335mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
336{
337 struct net_device *dev = ioc->netdev;
338 struct mpt_lan_priv *priv = netdev_priv(dev);
339
340 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
341 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
342 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
343
344 if (priv->mpt_rxfidx == NULL)
345 return (1);
346
347 if (reset_phase == MPT_IOC_SETUP_RESET) {
348 ;
349 } else if (reset_phase == MPT_IOC_PRE_RESET) {
350 int i;
351 unsigned long flags;
352
353 netif_stop_queue(dev);
354
355 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
356
357 atomic_set(&priv->buckets_out, 0);
358
359 /* Reset Rx Free Tail index and re-populate the queue. */
360 spin_lock_irqsave(&priv->rxfidx_lock, flags);
361 priv->mpt_rxfidx_tail = -1;
362 for (i = 0; i < priv->max_buckets_out; i++)
363 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
364 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
365 } else {
366 mpt_lan_post_receive_buckets(dev);
367 netif_wake_queue(dev);
368 }
369
370 return 1;
371}
372
373/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
374static int
375mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
376{
377 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
378
379 switch (le32_to_cpu(pEvReply->Event)) {
380 case MPI_EVENT_NONE: /* 00 */
381 case MPI_EVENT_LOG_DATA: /* 01 */
382 case MPI_EVENT_STATE_CHANGE: /* 02 */
383 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
384 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
385 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
386 case MPI_EVENT_RESCAN: /* 06 */
387 /* Ok, do we need to do anything here? As far as
388 I can tell, this is when a new device gets added
389 to the loop. */
390 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
391 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
392 case MPI_EVENT_LOGOUT: /* 09 */
393 case MPI_EVENT_EVENT_CHANGE: /* 0A */
394 default:
395 break;
396 }
397
398 /*
399 * NOTE: pEvent->AckRequired handling now done in mptbase.c;
400 * Do NOT do it here now!
401 */
402
403 return 1;
404}
405
406/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
407static int
408mpt_lan_open(struct net_device *dev)
409{
410 struct mpt_lan_priv *priv = netdev_priv(dev);
411 int i;
412
413 if (mpt_lan_reset(dev) != 0) {
414 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
415
416 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
417
418 if (mpt_dev->active)
419 printk ("The ioc is active. Perhaps it needs to be"
420 " reset?\n");
421 else
422 printk ("The ioc in inactive, most likely in the "
423 "process of being reset. Please try again in "
424 "a moment.\n");
425 }
426
427 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
428 if (priv->mpt_txfidx == NULL)
429 goto out;
430 priv->mpt_txfidx_tail = -1;
431
432 priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
433 GFP_KERNEL);
434 if (priv->SendCtl == NULL)
435 goto out_mpt_txfidx;
436 for (i = 0; i < priv->tx_max_out; i++) {
437 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
438 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
439 }
440
441 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
442
443 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
444 GFP_KERNEL);
445 if (priv->mpt_rxfidx == NULL)
446 goto out_SendCtl;
447 priv->mpt_rxfidx_tail = -1;
448
449 priv->RcvCtl = kmalloc(priv->max_buckets_out *
450 sizeof(struct BufferControl),
451 GFP_KERNEL);
452 if (priv->RcvCtl == NULL)
453 goto out_mpt_rxfidx;
454 for (i = 0; i < priv->max_buckets_out; i++) {
455 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
456 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
457 }
458
459/**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
460/**/ for (i = 0; i < priv->tx_max_out; i++)
461/**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
462/**/ dlprintk(("\n"));
463
464 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
465
466 mpt_lan_post_receive_buckets(dev);
467 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
468 IOC_AND_NETDEV_NAMES_s_s(dev));
469
470 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
471 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
472 " Notifications. This is a bad thing! We're not going "
473 "to go ahead, but I'd be leery of system stability at "
474 "this point.\n");
475 }
476
477 netif_start_queue(dev);
478 dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
479
480 return 0;
481out_mpt_rxfidx:
482 kfree(priv->mpt_rxfidx);
483 priv->mpt_rxfidx = NULL;
484out_SendCtl:
485 kfree(priv->SendCtl);
486 priv->SendCtl = NULL;
487out_mpt_txfidx:
488 kfree(priv->mpt_txfidx);
489 priv->mpt_txfidx = NULL;
490out: return -ENOMEM;
491}
492
493/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
494/* Send a LanReset message to the FW. This should result in the FW returning
495 any buckets it still has. */
496static int
497mpt_lan_reset(struct net_device *dev)
498{
499 MPT_FRAME_HDR *mf;
500 LANResetRequest_t *pResetReq;
501 struct mpt_lan_priv *priv = netdev_priv(dev);
502
503 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
504
505 if (mf == NULL) {
506/* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
507 "Unable to allocate a request frame.\n"));
508*/
509 return -1;
510 }
511
512 pResetReq = (LANResetRequest_t *) mf;
513
514 pResetReq->Function = MPI_FUNCTION_LAN_RESET;
515 pResetReq->ChainOffset = 0;
516 pResetReq->Reserved = 0;
517 pResetReq->PortNumber = priv->pnum;
518 pResetReq->MsgFlags = 0;
519 pResetReq->Reserved2 = 0;
520
521 mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
522
523 return 0;
524}
525
526/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
527static int
528mpt_lan_close(struct net_device *dev)
529{
530 struct mpt_lan_priv *priv = netdev_priv(dev);
531 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
532 unsigned int timeout;
533 int i;
534
535 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
536
537 mpt_event_deregister(LanCtx);
538
539 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
540 "since driver was loaded, %d still out\n",
541 priv->total_posted,atomic_read(&priv->buckets_out)));
542
543 netif_stop_queue(dev);
544
545 mpt_lan_reset(dev);
546
547 timeout = 2 * HZ;
548 while (atomic_read(&priv->buckets_out) && --timeout) {
549 set_current_state(TASK_INTERRUPTIBLE);
550 schedule_timeout(1);
551 }
552
553 for (i = 0; i < priv->max_buckets_out; i++) {
554 if (priv->RcvCtl[i].skb != NULL) {
555/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
556/**/ "is still out\n", i));
557 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
558 priv->RcvCtl[i].len,
559 PCI_DMA_FROMDEVICE);
560 dev_kfree_skb(priv->RcvCtl[i].skb);
561 }
562 }
563
564 kfree (priv->RcvCtl);
565 kfree (priv->mpt_rxfidx);
566
567 for (i = 0; i < priv->tx_max_out; i++) {
568 if (priv->SendCtl[i].skb != NULL) {
569 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
570 priv->SendCtl[i].len,
571 PCI_DMA_TODEVICE);
572 dev_kfree_skb(priv->SendCtl[i].skb);
573 }
574 }
575
576 kfree(priv->SendCtl);
577 kfree(priv->mpt_txfidx);
578
579 atomic_set(&priv->buckets_out, 0);
580
581 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
582 IOC_AND_NETDEV_NAMES_s_s(dev));
583
584 return 0;
585}
586
587/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
588static struct net_device_stats *
589mpt_lan_get_stats(struct net_device *dev)
590{
591 struct mpt_lan_priv *priv = netdev_priv(dev);
592
593 return (struct net_device_stats *) &priv->stats;
594}
595
596/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
597static int
598mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
599{
600 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
601 return -EINVAL;
602 dev->mtu = new_mtu;
603 return 0;
604}
605
606/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
607/* Tx timeout handler. */
608static void
609mpt_lan_tx_timeout(struct net_device *dev)
610{
611 struct mpt_lan_priv *priv = netdev_priv(dev);
612 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
613
614 if (mpt_dev->active) {
615 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
616 netif_wake_queue(dev);
617 }
618}
619
620/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
621//static inline int
622static int
623mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
624{
625 struct mpt_lan_priv *priv = netdev_priv(dev);
626 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
627 struct sk_buff *sent;
628 unsigned long flags;
629 u32 ctx;
630
631 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
632 sent = priv->SendCtl[ctx].skb;
633
634 priv->stats.tx_packets++;
635 priv->stats.tx_bytes += sent->len;
636
637 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
638 IOC_AND_NETDEV_NAMES_s_s(dev),
639 __FUNCTION__, sent));
640
641 priv->SendCtl[ctx].skb = NULL;
642 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
643 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
644 dev_kfree_skb_irq(sent);
645
646 spin_lock_irqsave(&priv->txfidx_lock, flags);
647 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
648 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
649
650 netif_wake_queue(dev);
651 return 0;
652}
653
654/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
655static int
656mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
657{
658 struct mpt_lan_priv *priv = netdev_priv(dev);
659 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
660 struct sk_buff *sent;
661 unsigned long flags;
662 int FreeReqFrame = 0;
663 u32 *pContext;
664 u32 ctx;
665 u8 count;
666
667 count = pSendRep->NumberOfContexts;
668
669 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
670 le16_to_cpu(pSendRep->IOCStatus)));
671
672 /* Add check for Loginfo Flag in IOCStatus */
673
674 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
675 case MPI_IOCSTATUS_SUCCESS:
676 priv->stats.tx_packets += count;
677 break;
678
679 case MPI_IOCSTATUS_LAN_CANCELED:
680 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
681 break;
682
683 case MPI_IOCSTATUS_INVALID_SGL:
684 priv->stats.tx_errors += count;
685 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
686 IOC_AND_NETDEV_NAMES_s_s(dev));
687 goto out;
688
689 default:
690 priv->stats.tx_errors += count;
691 break;
692 }
693
694 pContext = &pSendRep->BufferContext;
695
696 spin_lock_irqsave(&priv->txfidx_lock, flags);
697 while (count > 0) {
698 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
699
700 sent = priv->SendCtl[ctx].skb;
701 priv->stats.tx_bytes += sent->len;
702
703 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
704 IOC_AND_NETDEV_NAMES_s_s(dev),
705 __FUNCTION__, sent));
706
707 priv->SendCtl[ctx].skb = NULL;
708 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
709 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
710 dev_kfree_skb_irq(sent);
711
712 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
713
714 pContext++;
715 count--;
716 }
717 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
718
719out:
720 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
721 FreeReqFrame = 1;
722
723 netif_wake_queue(dev);
724 return FreeReqFrame;
725}
726
727/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
728static int
729mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
730{
731 struct mpt_lan_priv *priv = netdev_priv(dev);
732 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
733 MPT_FRAME_HDR *mf;
734 LANSendRequest_t *pSendReq;
735 SGETransaction32_t *pTrans;
736 SGESimple64_t *pSimple;
737 dma_addr_t dma;
738 unsigned long flags;
739 int ctx;
740 u16 cur_naa = 0x1000;
741
742 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
743 __FUNCTION__, skb));
744
745 spin_lock_irqsave(&priv->txfidx_lock, flags);
746 if (priv->mpt_txfidx_tail < 0) {
747 netif_stop_queue(dev);
748 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
749
750 printk (KERN_ERR "%s: no tx context available: %u\n",
751 __FUNCTION__, priv->mpt_txfidx_tail);
752 return 1;
753 }
754
755 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
756 if (mf == NULL) {
757 netif_stop_queue(dev);
758 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
759
760 printk (KERN_ERR "%s: Unable to alloc request frame\n",
761 __FUNCTION__);
762 return 1;
763 }
764
765 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
766 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
767
768// dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
769// IOC_AND_NETDEV_NAMES_s_s(dev)));
770
771 pSendReq = (LANSendRequest_t *) mf;
772
773 /* Set the mac.raw pointer, since this apparently isn't getting
774 * done before we get the skb. Pull the data pointer past the mac data.
775 */
776 skb->mac.raw = skb->data;
777 skb_pull(skb, 12);
778
779 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
780 PCI_DMA_TODEVICE);
781
782 priv->SendCtl[ctx].skb = skb;
783 priv->SendCtl[ctx].dma = dma;
784 priv->SendCtl[ctx].len = skb->len;
785
786 /* Message Header */
787 pSendReq->Reserved = 0;
788 pSendReq->Function = MPI_FUNCTION_LAN_SEND;
789 pSendReq->ChainOffset = 0;
790 pSendReq->Reserved2 = 0;
791 pSendReq->MsgFlags = 0;
792 pSendReq->PortNumber = priv->pnum;
793
794 /* Transaction Context Element */
795 pTrans = (SGETransaction32_t *) pSendReq->SG_List;
796
797 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
798 pTrans->ContextSize = sizeof(u32);
799 pTrans->DetailsLength = 2 * sizeof(u32);
800 pTrans->Flags = 0;
801 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
802
803// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
804// IOC_AND_NETDEV_NAMES_s_s(dev),
805// ctx, skb, skb->data));
806
807#ifdef QLOGIC_NAA_WORKAROUND
808{
809 struct NAA_Hosed *nh;
810
811 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
812 RFC 2625. The longer I look at this, the more my opinion of Qlogic
813 drops. */
814 read_lock_irq(&bad_naa_lock);
815 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
816 if ((nh->ieee[0] == skb->mac.raw[0]) &&
817 (nh->ieee[1] == skb->mac.raw[1]) &&
818 (nh->ieee[2] == skb->mac.raw[2]) &&
819 (nh->ieee[3] == skb->mac.raw[3]) &&
820 (nh->ieee[4] == skb->mac.raw[4]) &&
821 (nh->ieee[5] == skb->mac.raw[5])) {
822 cur_naa = nh->NAA;
823 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
824 "= %04x.\n", cur_naa));
825 break;
826 }
827 }
828 read_unlock_irq(&bad_naa_lock);
829}
830#endif
831
832 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
833 (skb->mac.raw[0] << 8) |
834 (skb->mac.raw[1] << 0));
835 pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
836 (skb->mac.raw[3] << 16) |
837 (skb->mac.raw[4] << 8) |
838 (skb->mac.raw[5] << 0));
839
840 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
841
842 /* If we ever decide to send more than one Simple SGE per LANSend, then
843 we will need to make sure that LAST_ELEMENT only gets set on the
844 last one. Otherwise, bad voodoo and evil funkiness will commence. */
845 pSimple->FlagsLength = cpu_to_le32(
846 ((MPI_SGE_FLAGS_LAST_ELEMENT |
847 MPI_SGE_FLAGS_END_OF_BUFFER |
848 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
849 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
850 MPI_SGE_FLAGS_HOST_TO_IOC |
851 MPI_SGE_FLAGS_64_BIT_ADDRESSING |
852 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
853 skb->len);
854 pSimple->Address.Low = cpu_to_le32((u32) dma);
855 if (sizeof(dma_addr_t) > sizeof(u32))
856 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
857 else
858 pSimple->Address.High = 0;
859
860 mpt_put_msg_frame (LanCtx, mpt_dev, mf);
861 dev->trans_start = jiffies;
862
863 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
864 IOC_AND_NETDEV_NAMES_s_s(dev),
865 le32_to_cpu(pSimple->FlagsLength)));
866
867 return 0;
868}
869
870/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
871static inline void
872mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
873/*
874 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
875 */
876{
877 struct mpt_lan_priv *priv = dev->priv;
878
879 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
880 if (priority) {
881 schedule_work(&priv->post_buckets_task);
882 } else {
883 schedule_delayed_work(&priv->post_buckets_task, 1);
884 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
885 "timer.\n"));
886 }
887 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
888 IOC_AND_NETDEV_NAMES_s_s(dev) ));
889 }
890}
891
892/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
893static inline int
894mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
895{
896 struct mpt_lan_priv *priv = dev->priv;
897
898 skb->protocol = mpt_lan_type_trans(skb, dev);
899
900 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
901 "delivered to upper level.\n",
902 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
903
904 priv->stats.rx_bytes += skb->len;
905 priv->stats.rx_packets++;
906
907 skb->dev = dev;
908 netif_rx(skb);
909
910 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
911 atomic_read(&priv->buckets_out)));
912
913 if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
914 mpt_lan_wake_post_buckets_task(dev, 1);
915
916 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
917 "remaining, %d received back since sod\n",
918 atomic_read(&priv->buckets_out), priv->total_received));
919
920 return 0;
921}
922
923/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
924//static inline int
925static int
926mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
927{
928 struct mpt_lan_priv *priv = dev->priv;
929 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
930 struct sk_buff *skb, *old_skb;
931 unsigned long flags;
932 u32 ctx, len;
933
934 ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
935 skb = priv->RcvCtl[ctx].skb;
936
937 len = GET_LAN_PACKET_LENGTH(tmsg);
938
939 if (len < MPT_LAN_RX_COPYBREAK) {
940 old_skb = skb;
941
942 skb = (struct sk_buff *)dev_alloc_skb(len);
943 if (!skb) {
944 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
945 IOC_AND_NETDEV_NAMES_s_s(dev),
946 __FILE__, __LINE__);
947 return -ENOMEM;
948 }
949
950 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
951 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
952
953 memcpy(skb_put(skb, len), old_skb->data, len);
954
955 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
956 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
957 goto out;
958 }
959
960 skb_put(skb, len);
961
962 priv->RcvCtl[ctx].skb = NULL;
963
964 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
965 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
966
967out:
968 spin_lock_irqsave(&priv->rxfidx_lock, flags);
969 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
970 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
971
972 atomic_dec(&priv->buckets_out);
973 priv->total_received++;
974
975 return mpt_lan_receive_skb(dev, skb);
976}
977
978/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
979static int
980mpt_lan_receive_post_free(struct net_device *dev,
981 LANReceivePostReply_t *pRecvRep)
982{
983 struct mpt_lan_priv *priv = dev->priv;
984 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
985 unsigned long flags;
986 struct sk_buff *skb;
987 u32 ctx;
988 int count;
989 int i;
990
991 count = pRecvRep->NumberOfContexts;
992
993/**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
994 "IOC returned %d buckets, freeing them...\n", count));
995
996 spin_lock_irqsave(&priv->rxfidx_lock, flags);
997 for (i = 0; i < count; i++) {
998 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
999
1000 skb = priv->RcvCtl[ctx].skb;
1001
1002// dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
1003// IOC_AND_NETDEV_NAMES_s_s(dev)));
1004// dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
1005// priv, &(priv->buckets_out)));
1006// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
1007
1008 priv->RcvCtl[ctx].skb = NULL;
1009 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1010 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1011 dev_kfree_skb_any(skb);
1012
1013 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1014 }
1015 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1016
1017 atomic_sub(count, &priv->buckets_out);
1018
1019// for (i = 0; i < priv->max_buckets_out; i++)
1020// if (priv->RcvCtl[i].skb != NULL)
1021// dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1022// "is still out\n", i));
1023
1024/* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1025 count));
1026*/
1027/**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1028/**/ "remaining, %d received back since sod.\n",
1029/**/ atomic_read(&priv->buckets_out), priv->total_received));
1030 return 0;
1031}
1032
1033/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1034static int
1035mpt_lan_receive_post_reply(struct net_device *dev,
1036 LANReceivePostReply_t *pRecvRep)
1037{
1038 struct mpt_lan_priv *priv = dev->priv;
1039 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1040 struct sk_buff *skb, *old_skb;
1041 unsigned long flags;
1042 u32 len, ctx, offset;
1043 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1044 int count;
1045 int i, l;
1046
1047 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1048 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1049 le16_to_cpu(pRecvRep->IOCStatus)));
1050
1051 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1052 MPI_IOCSTATUS_LAN_CANCELED)
1053 return mpt_lan_receive_post_free(dev, pRecvRep);
1054
1055 len = le32_to_cpu(pRecvRep->PacketLength);
1056 if (len == 0) {
1057 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1058 "ReceivePostReply w/ PacketLength zero!\n",
1059 IOC_AND_NETDEV_NAMES_s_s(dev));
1060 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1061 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1062 return -1;
1063 }
1064
1065 ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
1066 count = pRecvRep->NumberOfContexts;
1067 skb = priv->RcvCtl[ctx].skb;
1068
1069 offset = le32_to_cpu(pRecvRep->PacketOffset);
1070// if (offset != 0) {
1071// printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1072// "w/ PacketOffset %u\n",
1073// IOC_AND_NETDEV_NAMES_s_s(dev),
1074// offset);
1075// }
1076
1077 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1078 IOC_AND_NETDEV_NAMES_s_s(dev),
1079 offset, len));
1080
1081 if (count > 1) {
1082 int szrem = len;
1083
1084// dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1085// "for single packet, concatenating...\n",
1086// IOC_AND_NETDEV_NAMES_s_s(dev)));
1087
1088 skb = (struct sk_buff *)dev_alloc_skb(len);
1089 if (!skb) {
1090 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1091 IOC_AND_NETDEV_NAMES_s_s(dev),
1092 __FILE__, __LINE__);
1093 return -ENOMEM;
1094 }
1095
1096 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1097 for (i = 0; i < count; i++) {
1098
1099 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1100 old_skb = priv->RcvCtl[ctx].skb;
1101
1102 l = priv->RcvCtl[ctx].len;
1103 if (szrem < l)
1104 l = szrem;
1105
1106// dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1107// IOC_AND_NETDEV_NAMES_s_s(dev),
1108// i, l));
1109
1110 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1111 priv->RcvCtl[ctx].dma,
1112 priv->RcvCtl[ctx].len,
1113 PCI_DMA_FROMDEVICE);
1114 memcpy(skb_put(skb, l), old_skb->data, l);
1115
1116 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1117 priv->RcvCtl[ctx].dma,
1118 priv->RcvCtl[ctx].len,
1119 PCI_DMA_FROMDEVICE);
1120
1121 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1122 szrem -= l;
1123 }
1124 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1125
1126 } else if (len < MPT_LAN_RX_COPYBREAK) {
1127
1128 old_skb = skb;
1129
1130 skb = (struct sk_buff *)dev_alloc_skb(len);
1131 if (!skb) {
1132 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1133 IOC_AND_NETDEV_NAMES_s_s(dev),
1134 __FILE__, __LINE__);
1135 return -ENOMEM;
1136 }
1137
1138 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1139 priv->RcvCtl[ctx].dma,
1140 priv->RcvCtl[ctx].len,
1141 PCI_DMA_FROMDEVICE);
1142
1143 memcpy(skb_put(skb, len), old_skb->data, len);
1144
1145 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1146 priv->RcvCtl[ctx].dma,
1147 priv->RcvCtl[ctx].len,
1148 PCI_DMA_FROMDEVICE);
1149
1150 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1151 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1152 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1153
1154 } else {
1155 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1156
1157 priv->RcvCtl[ctx].skb = NULL;
1158
1159 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1160 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1161 priv->RcvCtl[ctx].dma = 0;
1162
1163 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1164 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1165
1166 skb_put(skb,len);
1167 }
1168
1169 atomic_sub(count, &priv->buckets_out);
1170 priv->total_received += count;
1171
1172 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1173 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1174 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1175 IOC_AND_NETDEV_NAMES_s_s(dev),
1176 priv->mpt_rxfidx_tail,
1177 MPT_LAN_MAX_BUCKETS_OUT);
1178
1179 panic("Damn it Jim! I'm a doctor, not a programmer! "
1180 "Oh, wait a sec, I am a programmer. "
1181 "And, who's Jim?!?!\n"
1182 "Arrgghh! We've done it again!\n");
1183 }
1184
1185 if (remaining == 0)
1186 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1187 "(priv->buckets_out = %d)\n",
1188 IOC_AND_NETDEV_NAMES_s_s(dev),
1189 atomic_read(&priv->buckets_out));
1190 else if (remaining < 10)
1191 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1192 "(priv->buckets_out = %d)\n",
1193 IOC_AND_NETDEV_NAMES_s_s(dev),
1194 remaining, atomic_read(&priv->buckets_out));
1195
1196 if ((remaining < priv->bucketthresh) &&
1197 ((atomic_read(&priv->buckets_out) - remaining) >
1198 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1199
1200 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1201 "buckets_out count and fw's BucketsRemaining "
1202 "count has crossed the threshold, issuing a "
1203 "LanReset to clear the fw's hashtable. You may "
1204 "want to check your /var/log/messages for \"CRC "
1205 "error\" event notifications.\n");
1206
1207 mpt_lan_reset(dev);
1208 mpt_lan_wake_post_buckets_task(dev, 0);
1209 }
1210
1211 return mpt_lan_receive_skb(dev, skb);
1212}
1213
1214/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1215/* Simple SGE's only at the moment */
1216
1217static void
1218mpt_lan_post_receive_buckets(void *dev_id)
1219{
1220 struct net_device *dev = dev_id;
1221 struct mpt_lan_priv *priv = dev->priv;
1222 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1223 MPT_FRAME_HDR *mf;
1224 LANReceivePostRequest_t *pRecvReq;
1225 SGETransaction32_t *pTrans;
1226 SGESimple64_t *pSimple;
1227 struct sk_buff *skb;
1228 dma_addr_t dma;
1229 u32 curr, buckets, count, max;
1230 u32 len = (dev->mtu + dev->hard_header_len + 4);
1231 unsigned long flags;
1232 int i;
1233
1234 curr = atomic_read(&priv->buckets_out);
1235 buckets = (priv->max_buckets_out - curr);
1236
1237 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1238 IOC_AND_NETDEV_NAMES_s_s(dev),
1239 __FUNCTION__, buckets, curr));
1240
1241 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1242 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1243
1244 while (buckets) {
1245 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1246 if (mf == NULL) {
1247 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1248 __FUNCTION__);
1249 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1250 __FUNCTION__, buckets));
1251 goto out;
1252 }
1253 pRecvReq = (LANReceivePostRequest_t *) mf;
1254
1255 count = buckets;
1256 if (count > max)
1257 count = max;
1258
1259 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1260 pRecvReq->ChainOffset = 0;
1261 pRecvReq->MsgFlags = 0;
1262 pRecvReq->PortNumber = priv->pnum;
1263
1264 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1265 pSimple = NULL;
1266
1267 for (i = 0; i < count; i++) {
1268 int ctx;
1269
1270 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1271 if (priv->mpt_rxfidx_tail < 0) {
1272 printk (KERN_ERR "%s: Can't alloc context\n",
1273 __FUNCTION__);
1274 spin_unlock_irqrestore(&priv->rxfidx_lock,
1275 flags);
1276 break;
1277 }
1278
1279 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1280
1281 skb = priv->RcvCtl[ctx].skb;
1282 if (skb && (priv->RcvCtl[ctx].len != len)) {
1283 pci_unmap_single(mpt_dev->pcidev,
1284 priv->RcvCtl[ctx].dma,
1285 priv->RcvCtl[ctx].len,
1286 PCI_DMA_FROMDEVICE);
1287 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1288 skb = priv->RcvCtl[ctx].skb = NULL;
1289 }
1290
1291 if (skb == NULL) {
1292 skb = dev_alloc_skb(len);
1293 if (skb == NULL) {
1294 printk (KERN_WARNING
1295 MYNAM "/%s: Can't alloc skb\n",
1296 __FUNCTION__);
1297 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1298 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1299 break;
1300 }
1301
1302 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1303 len, PCI_DMA_FROMDEVICE);
1304
1305 priv->RcvCtl[ctx].skb = skb;
1306 priv->RcvCtl[ctx].dma = dma;
1307 priv->RcvCtl[ctx].len = len;
1308 }
1309
1310 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1311
1312 pTrans->ContextSize = sizeof(u32);
1313 pTrans->DetailsLength = 0;
1314 pTrans->Flags = 0;
1315 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1316
1317 pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1318
1319 pSimple->FlagsLength = cpu_to_le32(
1320 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1321 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1322 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1323 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1324 if (sizeof(dma_addr_t) > sizeof(u32))
1325 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1326 else
1327 pSimple->Address.High = 0;
1328
1329 pTrans = (SGETransaction32_t *) (pSimple + 1);
1330 }
1331
1332 if (pSimple == NULL) {
1333/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1334/**/ __FUNCTION__);
1335 mpt_free_msg_frame(mpt_dev, mf);
1336 goto out;
1337 }
1338
1339 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1340
1341 pRecvReq->BucketCount = cpu_to_le32(i);
1342
1343/* printk(KERN_INFO MYNAM ": posting buckets\n ");
1344 * for (i = 0; i < j + 2; i ++)
1345 * printk (" %08x", le32_to_cpu(msg[i]));
1346 * printk ("\n");
1347 */
1348
1349 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1350
1351 priv->total_posted += i;
1352 buckets -= i;
1353 atomic_add(i, &priv->buckets_out);
1354 }
1355
1356out:
1357 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1358 __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1359 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1360 __FUNCTION__, priv->total_posted, priv->total_received));
1361
1362 clear_bit(0, &priv->post_buckets_active);
1363}
1364
1365/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1366static struct net_device *
1367mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1368{
1369 struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1370 struct mpt_lan_priv *priv = NULL;
1371 u8 HWaddr[FC_ALEN], *a;
1372
1373 if (!dev)
1374 return NULL;
1375
1376 dev->mtu = MPT_LAN_MTU;
1377
1378 priv = netdev_priv(dev);
1379
1380 priv->mpt_dev = mpt_dev;
1381 priv->pnum = pnum;
1382
1383 memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
1384 INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1385 priv->post_buckets_active = 0;
1386
1387 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1388 __LINE__, dev->mtu + dev->hard_header_len + 4));
1389
1390 atomic_set(&priv->buckets_out, 0);
1391 priv->total_posted = 0;
1392 priv->total_received = 0;
1393 priv->max_buckets_out = max_buckets_out;
1394 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1395 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1396
1397 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1398 __LINE__,
1399 mpt_dev->pfacts[0].MaxLanBuckets,
1400 max_buckets_out,
1401 priv->max_buckets_out));
1402
1403 priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1404 spin_lock_init(&priv->txfidx_lock);
1405 spin_lock_init(&priv->rxfidx_lock);
1406
1407 memset(&priv->stats, 0, sizeof(priv->stats));
1408
1409 /* Grab pre-fetched LANPage1 stuff. :-) */
1410 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1411
1412 HWaddr[0] = a[5];
1413 HWaddr[1] = a[4];
1414 HWaddr[2] = a[3];
1415 HWaddr[3] = a[2];
1416 HWaddr[4] = a[1];
1417 HWaddr[5] = a[0];
1418
1419 dev->addr_len = FC_ALEN;
1420 memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1421 memset(dev->broadcast, 0xff, FC_ALEN);
1422
1423 /* The Tx queue is 127 deep on the 909.
1424 * Give ourselves some breathing room.
1425 */
1426 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1427 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1428
1429 dev->open = mpt_lan_open;
1430 dev->stop = mpt_lan_close;
1431 dev->get_stats = mpt_lan_get_stats;
1432 dev->set_multicast_list = NULL;
1433 dev->change_mtu = mpt_lan_change_mtu;
1434 dev->hard_start_xmit = mpt_lan_sdu_send;
1435
1436/* Not in 2.3.42. Need 2.3.45+ */
1437 dev->tx_timeout = mpt_lan_tx_timeout;
1438 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1439
1440 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1441 "and setting initial values\n"));
1442
1443 SET_MODULE_OWNER(dev);
1444
1445 if (register_netdev(dev) != 0) {
1446 free_netdev(dev);
1447 dev = NULL;
1448 }
1449 return dev;
1450}
1451
1452static int
1453mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1454{
1455 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1456 struct net_device *dev;
1457 int i;
1458
1459 for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1460 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1461 "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1462 ioc->name, ioc->pfacts[i].PortNumber,
1463 ioc->pfacts[i].ProtocolFlags,
1464 MPT_PROTOCOL_FLAGS_c_c_c_c(
1465 ioc->pfacts[i].ProtocolFlags));
1466
1467 if (!(ioc->pfacts[i].ProtocolFlags &
1468 MPI_PORTFACTS_PROTOCOL_LAN)) {
1469 printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1470 "seems to be disabled on this adapter port!\n",
1471 ioc->name);
1472 continue;
1473 }
1474
1475 dev = mpt_register_lan_device(ioc, i);
1476 if (!dev) {
1477 printk(KERN_ERR MYNAM ": %s: Unable to register "
1478 "port%d as a LAN device\n", ioc->name,
1479 ioc->pfacts[i].PortNumber);
1480 continue;
1481 }
1482
1483 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1484 "registered as '%s'\n", ioc->name, dev->name);
1485 printk(KERN_INFO MYNAM ": %s/%s: "
1486 "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1487 IOC_AND_NETDEV_NAMES_s_s(dev),
1488 dev->dev_addr[0], dev->dev_addr[1],
1489 dev->dev_addr[2], dev->dev_addr[3],
1490 dev->dev_addr[4], dev->dev_addr[5]);
1491
1492 ioc->netdev = dev;
1493
1494 return 0;
1495 }
1496
1497 return -ENODEV;
1498}
1499
1500static void
1501mptlan_remove(struct pci_dev *pdev)
1502{
1503 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1504 struct net_device *dev = ioc->netdev;
1505
1506 if(dev != NULL) {
1507 unregister_netdev(dev);
1508 free_netdev(dev);
1509 }
1510}
1511
1512static struct mpt_pci_driver mptlan_driver = {
1513 .probe = mptlan_probe,
1514 .remove = mptlan_remove,
1515};
1516
1517static int __init mpt_lan_init (void)
1518{
1519 show_mptmod_ver(LANAME, LANVER);
1520
1521 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1522 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1523 return -EBUSY;
1524 }
1525
1526 /* Set the callback index to be used by driver core for turbo replies */
1527 mpt_lan_index = LanCtx;
1528
1529 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1530
1531 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1532 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1533 "handler with mptbase! The world is at an end! "
1534 "Everything is fading to black! Goodbye.\n");
1535 return -EBUSY;
1536 }
1537
1538 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1539
1540 if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER))
1541 dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n"));
1542 return 0;
1543}
1544
1545static void __exit mpt_lan_exit(void)
1546{
1547 mpt_device_driver_deregister(MPTLAN_DRIVER);
1548 mpt_reset_deregister(LanCtx);
1549
1550 if (LanCtx >= 0) {
1551 mpt_deregister(LanCtx);
1552 LanCtx = -1;
1553 mpt_lan_index = 0;
1554 }
1555}
1556
1557module_init(mpt_lan_init);
1558module_exit(mpt_lan_exit);
1559
1560/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1561static unsigned short
1562mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1563{
1564 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1565 struct fcllc *fcllc;
1566
1567 skb->mac.raw = skb->data;
1568 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1569
1570 if (fch->dtype == htons(0xffff)) {
1571 u32 *p = (u32 *) fch;
1572
1573 swab32s(p + 0);
1574 swab32s(p + 1);
1575 swab32s(p + 2);
1576 swab32s(p + 3);
1577
1578 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1579 NETDEV_PTR_TO_IOC_NAME_s(dev));
1580 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1581 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1582 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1583 }
1584
1585 if (*fch->daddr & 1) {
1586 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1587 skb->pkt_type = PACKET_BROADCAST;
1588 } else {
1589 skb->pkt_type = PACKET_MULTICAST;
1590 }
1591 } else {
1592 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1593 skb->pkt_type = PACKET_OTHERHOST;
1594 } else {
1595 skb->pkt_type = PACKET_HOST;
1596 }
1597 }
1598
1599 fcllc = (struct fcllc *)skb->data;
1600
1601#ifdef QLOGIC_NAA_WORKAROUND
1602{
1603 u16 source_naa = fch->stype, found = 0;
1604
1605 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1606 value. */
1607
1608 if ((source_naa & 0xF000) == 0)
1609 source_naa = swab16(source_naa);
1610
1611 if (fcllc->ethertype == htons(ETH_P_ARP))
1612 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1613 "%04x.\n", source_naa));
1614
1615 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1616 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1617 struct NAA_Hosed *nh, *prevnh;
1618 int i;
1619
1620 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1621 "system with non-RFC 2625 NAA value (%04x).\n",
1622 source_naa));
1623
1624 write_lock_irq(&bad_naa_lock);
1625 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1626 prevnh=nh, nh=nh->next) {
1627 if ((nh->ieee[0] == fch->saddr[0]) &&
1628 (nh->ieee[1] == fch->saddr[1]) &&
1629 (nh->ieee[2] == fch->saddr[2]) &&
1630 (nh->ieee[3] == fch->saddr[3]) &&
1631 (nh->ieee[4] == fch->saddr[4]) &&
1632 (nh->ieee[5] == fch->saddr[5])) {
1633 found = 1;
1634 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1635 "q/Rep w/ bad NAA from system already"
1636 " in DB.\n"));
1637 break;
1638 }
1639 }
1640
1641 if ((!found) && (nh == NULL)) {
1642
1643 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1644 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1645 " bad NAA from system not yet in DB.\n"));
1646
1647 if (nh != NULL) {
1648 nh->next = NULL;
1649 if (!mpt_bad_naa)
1650 mpt_bad_naa = nh;
1651 if (prevnh)
1652 prevnh->next = nh;
1653
1654 nh->NAA = source_naa; /* Set the S_NAA value. */
1655 for (i = 0; i < FC_ALEN; i++)
1656 nh->ieee[i] = fch->saddr[i];
1657 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1658 "%02x:%02x with non-compliant S_NAA value.\n",
1659 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1660 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1661 } else {
1662 printk (KERN_ERR "mptlan/type_trans: Unable to"
1663 " kmalloc a NAA_Hosed struct.\n");
1664 }
1665 } else if (!found) {
1666 printk (KERN_ERR "mptlan/type_trans: found not"
1667 " set, but nh isn't null. Evil "
1668 "funkiness abounds.\n");
1669 }
1670 write_unlock_irq(&bad_naa_lock);
1671 }
1672}
1673#endif
1674
1675 /* Strip the SNAP header from ARP packets since we don't
1676 * pass them through to the 802.2/SNAP layers.
1677 */
1678 if (fcllc->dsap == EXTENDED_SAP &&
1679 (fcllc->ethertype == htons(ETH_P_IP) ||
1680 fcllc->ethertype == htons(ETH_P_ARP))) {
1681 skb_pull(skb, sizeof(struct fcllc));
1682 return fcllc->ethertype;
1683 }
1684
1685 return htons(ETH_P_802_2);
1686}
1687
1688/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
new file mode 100644
index 000000000000..057904260ab1
--- /dev/null
+++ b/drivers/message/fusion/mptlan.h
@@ -0,0 +1,85 @@
1/* mptlan.h */
2
3#ifndef LINUX_MPTLAN_H_INCLUDED
4#define LINUX_MPTLAN_H_INCLUDED
5/*****************************************************************************/
6
7#if !defined(__GENKSYMS__)
8#include <linux/module.h>
9#endif
10
11#include <linux/netdevice.h>
12#include <linux/errno.h>
13// #include <linux/etherdevice.h>
14#include <linux/fcdevice.h>
15// #include <linux/fddidevice.h>
16#include <linux/skbuff.h>
17#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/miscdevice.h>
22#include <linux/spinlock.h>
23#include <linux/version.h>
24#include <linux/workqueue.h>
25#include <linux/delay.h>
26// #include <linux/trdevice.h>
27
28#include <asm/uaccess.h>
29#include <asm/io.h>
30
31 /* Override mptbase.h by pre-defining these! */
32 #define MODULEAUTHOR "Noah Romer, Eddie C. Dost"
33
34#include "mptbase.h"
35
36/*****************************************************************************/
37#define LANAME "Fusion MPT LAN driver"
38#define LANVER MPT_LINUX_VERSION_COMMON
39
40#ifdef MODULE
41MODULE_AUTHOR(MODULEAUTHOR);
42MODULE_DESCRIPTION(LANAME);
43#endif
44/*****************************************************************************/
45
46#define MPT_LAN_MAX_BUCKETS_OUT 256
47#define MPT_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */
48#define MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH 10
49#define MPT_LAN_RX_COPYBREAK 200
50#define MPT_LAN_TX_TIMEOUT (1*HZ)
51#define MPT_TX_MAX_OUT_LIM 127
52
53#define MPT_LAN_MIN_MTU 96 /* RFC2625 */
54#define MPT_LAN_MAX_MTU 65280 /* RFC2625 */
55#define MPT_LAN_MTU 13312 /* Max perf range + lower mem
56 usage than 16128 */
57
58#define MPT_LAN_NAA_RFC2625 0x1
59#define MPT_LAN_NAA_QLOGIC 0x2
60
61/* MPT LAN Reset and Suspend Resource Flags Defines */
62
63#define MPT_LAN_RESOURCE_FLAG_RETURN_POSTED_BUCKETS 0x01
64#define MPT_LAN_RESOURCE_FLAG_RETURN_PEND_TRANSMITS 0x02
65
66/*****************************************************************************/
67#ifdef MPT_LAN_IO_DEBUG
68#define dioprintk(x) printk x
69#else
70#define dioprintk(x)
71#endif
72
73#ifdef MPT_LAN_DEBUG
74#define dlprintk(x) printk x
75#else
76#define dlprintk(x)
77#endif
78
79#define NETDEV_TO_LANPRIV_PTR(d) ((struct mpt_lan_priv *)(d)->priv)
80#define NETDEV_PTR_TO_IOC_NAME_s(d) (NETDEV_TO_LANPRIV_PTR(d)->mpt_dev->name)
81#define IOC_AND_NETDEV_NAMES_s_s(d) NETDEV_PTR_TO_IOC_NAME_s(d), (d)->name
82
83/*****************************************************************************/
84#endif
85
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
new file mode 100644
index 000000000000..c98d6257ec02
--- /dev/null
+++ b/drivers/message/fusion/mptscsih.c
@@ -0,0 +1,6021 @@
1/*
2 * linux/drivers/message/fusion/mptscsih.c
3 * High performance SCSI / Fibre Channel SCSI Host device driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7 *
8 * Credits:
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
11 *
12 * A special thanks to Pamela Delaney (LSI Logic) for tons of work
13 * and countless enhancements while adding support for the 1030
14 * chip family. Pam has been instrumental in the development of
15 * of the 2.xx.xx series fusion drivers, and her contributions are
16 * far too numerous to hope to list in one place.
17 *
18 * A huge debt of gratitude is owed to David S. Miller (DaveM)
19 * for fixing much of the stupid and broken stuff in the early
20 * driver while porting to sparc64 platform. THANK YOU!
21 *
22 * (see mptbase.c)
23 *
24 * Copyright (c) 1999-2004 LSI Logic Corporation
25 * Original author: Steven J. Ralston
26 * (mailto:sjralston1@netscape.net)
27 * (mailto:mpt_linux_developer@lsil.com)
28 *
29 * $Id: mptscsih.c,v 1.104 2002/12/03 21:26:34 pdelaney Exp $
30 */
31/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
32/*
33 This program is free software; you can redistribute it and/or modify
34 it under the terms of the GNU General Public License as published by
35 the Free Software Foundation; version 2 of the License.
36
37 This program is distributed in the hope that it will be useful,
38 but WITHOUT ANY WARRANTY; without even the implied warranty of
39 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
40 GNU General Public License for more details.
41
42 NO WARRANTY
43 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
44 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
45 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
46 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
47 solely responsible for determining the appropriateness of using and
48 distributing the Program and assumes all risks associated with its
49 exercise of rights under this Agreement, including but not limited to
50 the risks and costs of program errors, damage to or loss of data,
51 programs or equipment, and unavailability or interruption of operations.
52
53 DISCLAIMER OF LIABILITY
54 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
55 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
57 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
58 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
59 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
60 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
61
62 You should have received a copy of the GNU General Public License
63 along with this program; if not, write to the Free Software
64 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
65*/
66/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
67
68#include "linux_compat.h" /* linux-2.6 tweaks */
69#include <linux/module.h>
70#include <linux/kernel.h>
71#include <linux/init.h>
72#include <linux/errno.h>
73#include <linux/kdev_t.h>
74#include <linux/blkdev.h>
75#include <linux/delay.h> /* for mdelay */
76#include <linux/interrupt.h> /* needed for in_interrupt() proto */
77#include <linux/reboot.h> /* notifier code */
78#include <linux/sched.h>
79#include <linux/workqueue.h>
80
81#include <scsi/scsi.h>
82#include <scsi/scsi_cmnd.h>
83#include <scsi/scsi_device.h>
84#include <scsi/scsi_host.h>
85#include <scsi/scsi_tcq.h>
86
87#include "mptbase.h"
88#include "mptscsih.h"
89
90/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
91#define my_NAME "Fusion MPT SCSI Host driver"
92#define my_VERSION MPT_LINUX_VERSION_COMMON
93#define MYNAM "mptscsih"
94
95MODULE_AUTHOR(MODULEAUTHOR);
96MODULE_DESCRIPTION(my_NAME);
97MODULE_LICENSE("GPL");
98
99/* Command line args */
100static int mpt_dv = MPTSCSIH_DOMAIN_VALIDATION;
101MODULE_PARM(mpt_dv, "i");
102MODULE_PARM_DESC(mpt_dv, " DV Algorithm: enhanced=1, basic=0 (default=MPTSCSIH_DOMAIN_VALIDATION=1)");
103
104static int mpt_width = MPTSCSIH_MAX_WIDTH;
105MODULE_PARM(mpt_width, "i");
106MODULE_PARM_DESC(mpt_width, " Max Bus Width: wide=1, narrow=0 (default=MPTSCSIH_MAX_WIDTH=1)");
107
108static int mpt_factor = MPTSCSIH_MIN_SYNC;
109MODULE_PARM(mpt_factor, "h");
110MODULE_PARM_DESC(mpt_factor, " Min Sync Factor (default=MPTSCSIH_MIN_SYNC=0x08)");
111
112static int mpt_saf_te = MPTSCSIH_SAF_TE;
113MODULE_PARM(mpt_saf_te, "i");
114MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)");
115
116static int mpt_pq_filter = 0;
117MODULE_PARM(mpt_pq_filter, "i");
118MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
119
120/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
121
122typedef struct _BIG_SENSE_BUF {
123 u8 data[MPT_SENSE_BUFFER_ALLOC];
124} BIG_SENSE_BUF;
125
126#define MPT_SCANDV_GOOD (0x00000000) /* must be 0 */
127#define MPT_SCANDV_DID_RESET (0x00000001)
128#define MPT_SCANDV_SENSE (0x00000002)
129#define MPT_SCANDV_SOME_ERROR (0x00000004)
130#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
131#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
132#define MPT_SCANDV_FALLBACK (0x00000020)
133
134#define MPT_SCANDV_MAX_RETRIES (10)
135
136#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */
137#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */
138#define MPT_ICFLAG_PHYS_DISK 0x04 /* Any SCSI IO but do Phys Disk Format */
139#define MPT_ICFLAG_TAGGED_CMD 0x08 /* Do tagged IO */
140#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */
141#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */
142
143typedef struct _internal_cmd {
144 char *data; /* data pointer */
145 dma_addr_t data_dma; /* data dma address */
146 int size; /* transfer size */
147 u8 cmd; /* SCSI Op Code */
148 u8 bus; /* bus number */
149 u8 id; /* SCSI ID (virtual) */
150 u8 lun;
151 u8 flags; /* Bit Field - See above */
152 u8 physDiskNum; /* Phys disk number, -1 else */
153 u8 rsvd2;
154 u8 rsvd;
155} INTERNAL_CMD;
156
157typedef struct _negoparms {
158 u8 width;
159 u8 offset;
160 u8 factor;
161 u8 flags;
162} NEGOPARMS;
163
164typedef struct _dv_parameters {
165 NEGOPARMS max;
166 NEGOPARMS now;
167 u8 cmd;
168 u8 id;
169 u16 pad1;
170} DVPARAMETERS;
171
172
173/*
174 * Other private/forward protos...
175 */
176static int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
177static void mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq);
178static int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
179
180static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
181 SCSIIORequest_t *pReq, int req_idx);
182static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
183static void copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
184static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
185static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
186static u32 SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc);
187
188static int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
189static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
190
191static int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
192static int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
193
194static void mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen);
195static void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56);
196static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq);
197static void mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags);
198static void mptscsih_no_negotiate(MPT_SCSI_HOST *hd, int target_id);
199static int mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target, int flags);
200static int mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus);
201static int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
202static void mptscsih_timer_expired(unsigned long data);
203static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
204static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum);
205
206#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
207static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
208static void mptscsih_domainValidation(void *hd);
209static int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id);
210static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
211static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
212static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
213static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
214#endif
215/* module entry point */
216static int __init mptscsih_init (void);
217static void __exit mptscsih_exit (void);
218
219static int mptscsih_probe (struct pci_dev *, const struct pci_device_id *);
220static void mptscsih_remove(struct pci_dev *);
221static void mptscsih_shutdown(struct device *);
222#ifdef CONFIG_PM
223static int mptscsih_suspend(struct pci_dev *pdev, u32 state);
224static int mptscsih_resume(struct pci_dev *pdev);
225#endif
226
227
228/*
229 * Private data...
230 */
231
232static int mpt_scsi_hosts = 0;
233
234static int ScsiDoneCtx = -1;
235static int ScsiTaskCtx = -1;
236static int ScsiScanDvCtx = -1; /* Used only for bus scan and dv */
237
238#define SNS_LEN(scp) sizeof((scp)->sense_buffer)
239
240#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
241/*
242 * Domain Validation task structure
243 */
244static DEFINE_SPINLOCK(dvtaskQ_lock);
245static int dvtaskQ_active = 0;
246static int dvtaskQ_release = 0;
247static struct work_struct mptscsih_dvTask;
248#endif
249
250/*
251 * Wait Queue setup
252 */
253static DECLARE_WAIT_QUEUE_HEAD (scandv_waitq);
254static int scandv_wait_done = 1;
255
256
257/* Driver command line structure
258 */
259static struct scsi_host_template driver_template;
260
261/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
262/**
263 * mptscsih_add_sge - Place a simple SGE at address pAddr.
264 * @pAddr: virtual address for SGE
265 * @flagslength: SGE flags and data transfer length
266 * @dma_addr: Physical address
267 *
268 * This routine places a MPT request frame back on the MPT adapter's
269 * FreeQ.
270 */
271static inline void
272mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
273{
274 if (sizeof(dma_addr_t) == sizeof(u64)) {
275 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
276 u32 tmp = dma_addr & 0xFFFFFFFF;
277
278 pSge->FlagsLength = cpu_to_le32(flagslength);
279 pSge->Address.Low = cpu_to_le32(tmp);
280 tmp = (u32) ((u64)dma_addr >> 32);
281 pSge->Address.High = cpu_to_le32(tmp);
282
283 } else {
284 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
285 pSge->FlagsLength = cpu_to_le32(flagslength);
286 pSge->Address = cpu_to_le32(dma_addr);
287 }
288} /* mptscsih_add_sge() */
289
290/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
291/**
292 * mptscsih_add_chain - Place a chain SGE at address pAddr.
293 * @pAddr: virtual address for SGE
294 * @next: nextChainOffset value (u32's)
295 * @length: length of next SGL segment
296 * @dma_addr: Physical address
297 *
298 * This routine places a MPT request frame back on the MPT adapter's
299 * FreeQ.
300 */
301static inline void
302mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
303{
304 if (sizeof(dma_addr_t) == sizeof(u64)) {
305 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
306 u32 tmp = dma_addr & 0xFFFFFFFF;
307
308 pChain->Length = cpu_to_le16(length);
309 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
310
311 pChain->NextChainOffset = next;
312
313 pChain->Address.Low = cpu_to_le32(tmp);
314 tmp = (u32) ((u64)dma_addr >> 32);
315 pChain->Address.High = cpu_to_le32(tmp);
316 } else {
317 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
318 pChain->Length = cpu_to_le16(length);
319 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
320 pChain->NextChainOffset = next;
321 pChain->Address = cpu_to_le32(dma_addr);
322 }
323} /* mptscsih_add_chain() */
324
325/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
326/*
327 * mptscsih_getFreeChainBuffer - Function to get a free chain
328 * from the MPT_SCSI_HOST FreeChainQ.
329 * @ioc: Pointer to MPT_ADAPTER structure
330 * @req_idx: Index of the SCSI IO request frame. (output)
331 *
332 * return SUCCESS or FAILED
333 */
334static inline int
335mptscsih_getFreeChainBuffer(MPT_ADAPTER *ioc, int *retIndex)
336{
337 MPT_FRAME_HDR *chainBuf;
338 unsigned long flags;
339 int rc;
340 int chain_idx;
341
342 dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer called\n",
343 ioc->name));
344 spin_lock_irqsave(&ioc->FreeQlock, flags);
345 if (!list_empty(&ioc->FreeChainQ)) {
346 int offset;
347
348 chainBuf = list_entry(ioc->FreeChainQ.next, MPT_FRAME_HDR,
349 u.frame.linkage.list);
350 list_del(&chainBuf->u.frame.linkage.list);
351 offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer;
352 chain_idx = offset / ioc->req_sz;
353 rc = SUCCESS;
354 dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer (index %d), got buf=%p\n",
355 ioc->name, *retIndex, chainBuf));
356 } else {
357 rc = FAILED;
358 chain_idx = MPT_HOST_NO_CHAIN;
359 dfailprintk((MYIOC_s_ERR_FMT "getFreeChainBuffer failed\n",
360 ioc->name));
361 }
362 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
363
364 *retIndex = chain_idx;
365 return rc;
366} /* mptscsih_getFreeChainBuffer() */
367
368/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
369/*
370 * mptscsih_AddSGE - Add a SGE (plus chain buffers) to the
371 * SCSIIORequest_t Message Frame.
372 * @ioc: Pointer to MPT_ADAPTER structure
373 * @SCpnt: Pointer to scsi_cmnd structure
374 * @pReq: Pointer to SCSIIORequest_t structure
375 *
376 * Returns ...
377 */
378static int
379mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
380 SCSIIORequest_t *pReq, int req_idx)
381{
382 char *psge;
383 char *chainSge;
384 struct scatterlist *sg;
385 int frm_sz;
386 int sges_left, sg_done;
387 int chain_idx = MPT_HOST_NO_CHAIN;
388 int sgeOffset;
389 int numSgeSlots, numSgeThisFrame;
390 u32 sgflags, sgdir, thisxfer = 0;
391 int chain_dma_off = 0;
392 int newIndex;
393 int ii;
394 dma_addr_t v2;
395 u32 RequestNB;
396
397 sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
398 if (sgdir == MPI_SCSIIO_CONTROL_WRITE) {
399 sgdir = MPT_TRANSFER_HOST_TO_IOC;
400 } else {
401 sgdir = MPT_TRANSFER_IOC_TO_HOST;
402 }
403
404 psge = (char *) &pReq->SGL;
405 frm_sz = ioc->req_sz;
406
407 /* Map the data portion, if any.
408 * sges_left = 0 if no data transfer.
409 */
410 if ( (sges_left = SCpnt->use_sg) ) {
411 sges_left = pci_map_sg(ioc->pcidev,
412 (struct scatterlist *) SCpnt->request_buffer,
413 SCpnt->use_sg,
414 SCpnt->sc_data_direction);
415 if (sges_left == 0)
416 return FAILED;
417 } else if (SCpnt->request_bufflen) {
418 SCpnt->SCp.dma_handle = pci_map_single(ioc->pcidev,
419 SCpnt->request_buffer,
420 SCpnt->request_bufflen,
421 SCpnt->sc_data_direction);
422 dsgprintk((MYIOC_s_INFO_FMT "SG: non-SG for %p, len=%d\n",
423 ioc->name, SCpnt, SCpnt->request_bufflen));
424 mptscsih_add_sge((char *) &pReq->SGL,
425 0xD1000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|SCpnt->request_bufflen,
426 SCpnt->SCp.dma_handle);
427
428 return SUCCESS;
429 }
430
431 /* Handle the SG case.
432 */
433 sg = (struct scatterlist *) SCpnt->request_buffer;
434 sg_done = 0;
435 sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION);
436 chainSge = NULL;
437
438 /* Prior to entering this loop - the following must be set
439 * current MF: sgeOffset (bytes)
440 * chainSge (Null if original MF is not a chain buffer)
441 * sg_done (num SGE done for this MF)
442 */
443
444nextSGEset:
445 numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) );
446 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
447
448 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir;
449
450 /* Get first (num - 1) SG elements
451 * Skip any SG entries with a length of 0
452 * NOTE: at finish, sg and psge pointed to NEXT data/location positions
453 */
454 for (ii=0; ii < (numSgeThisFrame-1); ii++) {
455 thisxfer = sg_dma_len(sg);
456 if (thisxfer == 0) {
457 sg ++; /* Get next SG element from the OS */
458 sg_done++;
459 continue;
460 }
461
462 v2 = sg_dma_address(sg);
463 mptscsih_add_sge(psge, sgflags | thisxfer, v2);
464
465 sg++; /* Get next SG element from the OS */
466 psge += (sizeof(u32) + sizeof(dma_addr_t));
467 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
468 sg_done++;
469 }
470
471 if (numSgeThisFrame == sges_left) {
472 /* Add last element, end of buffer and end of list flags.
473 */
474 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT |
475 MPT_SGE_FLAGS_END_OF_BUFFER |
476 MPT_SGE_FLAGS_END_OF_LIST;
477
478 /* Add last SGE and set termination flags.
479 * Note: Last SGE may have a length of 0 - which should be ok.
480 */
481 thisxfer = sg_dma_len(sg);
482
483 v2 = sg_dma_address(sg);
484 mptscsih_add_sge(psge, sgflags | thisxfer, v2);
485 /*
486 sg++;
487 psge += (sizeof(u32) + sizeof(dma_addr_t));
488 */
489 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
490 sg_done++;
491
492 if (chainSge) {
493 /* The current buffer is a chain buffer,
494 * but there is not another one.
495 * Update the chain element
496 * Offset and Length fields.
497 */
498 mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off);
499 } else {
500 /* The current buffer is the original MF
501 * and there is no Chain buffer.
502 */
503 pReq->ChainOffset = 0;
504 RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
505 dsgprintk((MYIOC_s_ERR_FMT
506 "Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
507 ioc->RequestNB[req_idx] = RequestNB;
508 }
509 } else {
510 /* At least one chain buffer is needed.
511 * Complete the first MF
512 * - last SGE element, set the LastElement bit
513 * - set ChainOffset (words) for orig MF
514 * (OR finish previous MF chain buffer)
515 * - update MFStructPtr ChainIndex
516 * - Populate chain element
517 * Also
518 * Loop until done.
519 */
520
521 dsgprintk((MYIOC_s_INFO_FMT "SG: Chain Required! sg done %d\n",
522 ioc->name, sg_done));
523
524 /* Set LAST_ELEMENT flag for last non-chain element
525 * in the buffer. Since psge points at the NEXT
526 * SGE element, go back one SGE element, update the flags
527 * and reset the pointer. (Note: sgflags & thisxfer are already
528 * set properly).
529 */
530 if (sg_done) {
531 u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t)));
532 sgflags = le32_to_cpu(*ptmp);
533 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
534 *ptmp = cpu_to_le32(sgflags);
535 }
536
537 if (chainSge) {
538 /* The current buffer is a chain buffer.
539 * chainSge points to the previous Chain Element.
540 * Update its chain element Offset and Length (must
541 * include chain element size) fields.
542 * Old chain element is now complete.
543 */
544 u8 nextChain = (u8) (sgeOffset >> 2);
545 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
546 mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off);
547 } else {
548 /* The original MF buffer requires a chain buffer -
549 * set the offset.
550 * Last element in this MF is a chain element.
551 */
552 pReq->ChainOffset = (u8) (sgeOffset >> 2);
553 RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
554 dsgprintk((MYIOC_s_ERR_FMT "Chain Buffer Needed, RequestNB=%x sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
555 ioc->RequestNB[req_idx] = RequestNB;
556 }
557
558 sges_left -= sg_done;
559
560
561 /* NOTE: psge points to the beginning of the chain element
562 * in current buffer. Get a chain buffer.
563 */
564 dsgprintk((MYIOC_s_INFO_FMT
565 "calling getFreeChainBuffer SCSI cmd=%02x (%p)\n",
566 ioc->name, pReq->CDB[0], SCpnt));
567 if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED)
568 return FAILED;
569
570 /* Update the tracking arrays.
571 * If chainSge == NULL, update ReqToChain, else ChainToChain
572 */
573 if (chainSge) {
574 ioc->ChainToChain[chain_idx] = newIndex;
575 } else {
576 ioc->ReqToChain[req_idx] = newIndex;
577 }
578 chain_idx = newIndex;
579 chain_dma_off = ioc->req_sz * chain_idx;
580
581 /* Populate the chainSGE for the current buffer.
582 * - Set chain buffer pointer to psge and fill
583 * out the Address and Flags fields.
584 */
585 chainSge = (char *) psge;
586 dsgprintk((KERN_INFO " Current buff @ %p (index 0x%x)",
587 psge, req_idx));
588
589 /* Start the SGE for the next buffer
590 */
591 psge = (char *) (ioc->ChainBuffer + chain_dma_off);
592 sgeOffset = 0;
593 sg_done = 0;
594
595 dsgprintk((KERN_INFO " Chain buff @ %p (index 0x%x)\n",
596 psge, chain_idx));
597
598 /* Start the SGE for the next buffer
599 */
600
601 goto nextSGEset;
602 }
603
604 return SUCCESS;
605} /* mptscsih_AddSGE() */
606
607/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
608/*
609 * mptscsih_io_done - Main SCSI IO callback routine registered to
610 * Fusion MPT (base) driver
611 * @ioc: Pointer to MPT_ADAPTER structure
612 * @mf: Pointer to original MPT request frame
613 * @r: Pointer to MPT reply frame (NULL if TurboReply)
614 *
615 * This routine is called from mpt.c::mpt_interrupt() at the completion
616 * of any SCSI IO request.
617 * This routine is registered with the Fusion MPT (base) driver at driver
618 * load/init time via the mpt_register() API call.
619 *
620 * Returns 1 indicating alloc'd request frame ptr should be freed.
621 */
622static int
623mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
624{
625 struct scsi_cmnd *sc;
626 MPT_SCSI_HOST *hd;
627 SCSIIORequest_t *pScsiReq;
628 SCSIIOReply_t *pScsiReply;
629 u16 req_idx;
630
631 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
632
633 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
634 sc = hd->ScsiLookup[req_idx];
635 if (sc == NULL) {
636 MPIHeader_t *hdr = (MPIHeader_t *)mf;
637
638 /* Remark: writeSDP1 will use the ScsiDoneCtx
639 * If a SCSI I/O cmd, device disabled by OS and
640 * completion done. Cannot touch sc struct. Just free mem.
641 */
642 if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST)
643 printk(MYIOC_s_ERR_FMT "NULL ScsiCmd ptr!\n",
644 ioc->name);
645
646 mptscsih_freeChainBuffers(ioc, req_idx);
647 return 1;
648 }
649
650 dmfprintk((MYIOC_s_INFO_FMT
651 "ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
652 ioc->name, mf, mr, sc, req_idx));
653
654 sc->result = DID_OK << 16; /* Set default reply as OK */
655 pScsiReq = (SCSIIORequest_t *) mf;
656 pScsiReply = (SCSIIOReply_t *) mr;
657
658 if (pScsiReply == NULL) {
659 /* special context reply handling */
660 ;
661 } else {
662 u32 xfer_cnt;
663 u16 status;
664 u8 scsi_state, scsi_status;
665
666 status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
667 scsi_state = pScsiReply->SCSIState;
668 scsi_status = pScsiReply->SCSIStatus;
669 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
670 sc->resid = sc->request_bufflen - xfer_cnt;
671
672 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
673 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
674 "resid=%d bufflen=%d xfer_cnt=%d\n",
675 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
676 status, scsi_state, scsi_status, sc->resid,
677 sc->request_bufflen, xfer_cnt));
678
679 if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
680 copy_sense_data(sc, hd, mf, pScsiReply);
681
682 /*
683 * Look for + dump FCP ResponseInfo[]!
684 */
685 if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) {
686 printk(KERN_NOTICE " FCP_ResponseInfo=%08xh\n",
687 le32_to_cpu(pScsiReply->ResponseInfo));
688 }
689
690 switch(status) {
691 case MPI_IOCSTATUS_BUSY: /* 0x0002 */
692 /* CHECKME!
693 * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry)
694 * But not: DID_BUS_BUSY lest one risk
695 * killing interrupt handler:-(
696 */
697 sc->result = SAM_STAT_BUSY;
698 break;
699
700 case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
701 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
702 sc->result = DID_BAD_TARGET << 16;
703 break;
704
705 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
706 /* Spoof to SCSI Selection Timeout! */
707 sc->result = DID_NO_CONNECT << 16;
708
709 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
710 hd->sel_timeout[pScsiReq->TargetID]++;
711 break;
712
713 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
714 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
715 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
716 /* Linux handles an unsolicited DID_RESET better
717 * than an unsolicited DID_ABORT.
718 */
719 sc->result = DID_RESET << 16;
720
721 /* GEM Workaround. */
722 if (ioc->bus_type == SCSI)
723 mptscsih_no_negotiate(hd, sc->device->id);
724 break;
725
726 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
727 if ( xfer_cnt >= sc->underflow ) {
728 /* Sufficient data transfer occurred */
729 sc->result = (DID_OK << 16) | scsi_status;
730 } else if ( xfer_cnt == 0 ) {
731 /* A CRC Error causes this condition; retry */
732 sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
733 (CHECK_CONDITION << 1);
734 sc->sense_buffer[0] = 0x70;
735 sc->sense_buffer[2] = NO_SENSE;
736 sc->sense_buffer[12] = 0;
737 sc->sense_buffer[13] = 0;
738 } else {
739 sc->result = DID_SOFT_ERROR << 16;
740 }
741 dreplyprintk((KERN_NOTICE "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->target));
742 break;
743
744 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
745 /*
746 * Do upfront check for valid SenseData and give it
747 * precedence!
748 */
749 sc->result = (DID_OK << 16) | scsi_status;
750 if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
751 /* Have already saved the status and sense data
752 */
753 ;
754 } else {
755 if (xfer_cnt < sc->underflow) {
756 sc->result = DID_SOFT_ERROR << 16;
757 }
758 if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
759 /* What to do?
760 */
761 sc->result = DID_SOFT_ERROR << 16;
762 }
763 else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
764 /* Not real sure here either... */
765 sc->result = DID_RESET << 16;
766 }
767 }
768
769 dreplyprintk((KERN_NOTICE " sc->underflow={report ERR if < %02xh bytes xfer'd}\n",
770 sc->underflow));
771 dreplyprintk((KERN_NOTICE " ActBytesXferd=%02xh\n", xfer_cnt));
772 /* Report Queue Full
773 */
774 if (scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)
775 mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
776
777 break;
778
779 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
780 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
781 scsi_status = pScsiReply->SCSIStatus;
782 sc->result = (DID_OK << 16) | scsi_status;
783 if (scsi_state == 0) {
784 ;
785 } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
786 /*
787 * If running against circa 200003dd 909 MPT f/w,
788 * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
789 * (QUEUE_FULL) returned from device! --> get 0x0000?128
790 * and with SenseBytes set to 0.
791 */
792 if (pScsiReply->SCSIStatus == MPI_SCSI_STATUS_TASK_SET_FULL)
793 mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
794
795 }
796 else if (scsi_state &
797 (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)
798 ) {
799 /*
800 * What to do?
801 */
802 sc->result = DID_SOFT_ERROR << 16;
803 }
804 else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
805 /* Not real sure here either... */
806 sc->result = DID_RESET << 16;
807 }
808 else if (scsi_state & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) {
809 /* Device Inq. data indicates that it supports
810 * QTags, but rejects QTag messages.
811 * This command completed OK.
812 *
813 * Not real sure here either so do nothing... */
814 }
815
816 if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL)
817 mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
818
819 /* Add handling of:
820 * Reservation Conflict, Busy,
821 * Command Terminated, CHECK
822 */
823 break;
824
825 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
826 sc->result = DID_SOFT_ERROR << 16;
827 break;
828
829 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
830 case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
831 case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
832 case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
833 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
834 case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
835 case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
836 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
837 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
838 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
839 default:
840 /*
841 * What to do?
842 */
843 sc->result = DID_SOFT_ERROR << 16;
844 break;
845
846 } /* switch(status) */
847
848 dreplyprintk((KERN_NOTICE " sc->result is %08xh\n", sc->result));
849 } /* end of address reply case */
850
851 /* Unmap the DMA buffers, if any. */
852 if (sc->use_sg) {
853 pci_unmap_sg(ioc->pcidev, (struct scatterlist *) sc->request_buffer,
854 sc->use_sg, sc->sc_data_direction);
855 } else if (sc->request_bufflen) {
856 pci_unmap_single(ioc->pcidev, sc->SCp.dma_handle,
857 sc->request_bufflen, sc->sc_data_direction);
858 }
859
860 hd->ScsiLookup[req_idx] = NULL;
861
862 sc->scsi_done(sc); /* Issue the command callback */
863
864 /* Free Chain buffers */
865 mptscsih_freeChainBuffers(ioc, req_idx);
866 return 1;
867}
868
869
870/*
871 * mptscsih_flush_running_cmds - For each command found, search
872 * Scsi_Host instance taskQ and reply to OS.
873 * Called only if recovering from a FW reload.
874 * @hd: Pointer to a SCSI HOST structure
875 *
876 * Returns: None.
877 *
878 * Must be called while new I/Os are being queued.
879 */
880static void
881mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
882{
883 MPT_ADAPTER *ioc = hd->ioc;
884 struct scsi_cmnd *SCpnt;
885 MPT_FRAME_HDR *mf;
886 int ii;
887 int max = ioc->req_depth;
888
889 dprintk((KERN_INFO MYNAM ": flush_ScsiLookup called\n"));
890 for (ii= 0; ii < max; ii++) {
891 if ((SCpnt = hd->ScsiLookup[ii]) != NULL) {
892
893 /* Command found.
894 */
895
896 /* Null ScsiLookup index
897 */
898 hd->ScsiLookup[ii] = NULL;
899
900 mf = MPT_INDEX_2_MFPTR(ioc, ii);
901 dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n",
902 mf, SCpnt));
903
904 /* Set status, free OS resources (SG DMA buffers)
905 * Do OS callback
906 * Free driver resources (chain, msg buffers)
907 */
908 if (scsi_device_online(SCpnt->device)) {
909 if (SCpnt->use_sg) {
910 pci_unmap_sg(ioc->pcidev,
911 (struct scatterlist *) SCpnt->request_buffer,
912 SCpnt->use_sg,
913 SCpnt->sc_data_direction);
914 } else if (SCpnt->request_bufflen) {
915 pci_unmap_single(ioc->pcidev,
916 SCpnt->SCp.dma_handle,
917 SCpnt->request_bufflen,
918 SCpnt->sc_data_direction);
919 }
920 }
921 SCpnt->result = DID_RESET << 16;
922 SCpnt->host_scribble = NULL;
923
924 /* Free Chain buffers */
925 mptscsih_freeChainBuffers(ioc, ii);
926
927 /* Free Message frames */
928 mpt_free_msg_frame(ioc, mf);
929
930 SCpnt->scsi_done(SCpnt); /* Issue the command callback */
931 }
932 }
933
934 return;
935}
936
937/*
938 * mptscsih_search_running_cmds - Delete any commands associated
939 * with the specified target and lun. Function called only
940 * when a lun is disable by mid-layer.
941 * Do NOT access the referenced scsi_cmnd structure or
942 * members. Will cause either a paging or NULL ptr error.
943 * @hd: Pointer to a SCSI HOST structure
944 * @target: target id
945 * @lun: lun
946 *
947 * Returns: None.
948 *
949 * Called from slave_destroy.
950 */
951static void
952mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
953{
954 SCSIIORequest_t *mf = NULL;
955 int ii;
956 int max = hd->ioc->req_depth;
957
958 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
959 target, lun, max));
960
961 for (ii=0; ii < max; ii++) {
962 if (hd->ScsiLookup[ii] != NULL) {
963
964 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
965
966 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n",
967 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1]));
968
969 if ((mf->TargetID != ((u8)target)) || (mf->LUN[1] != ((u8) lun)))
970 continue;
971
972 /* Cleanup
973 */
974 hd->ScsiLookup[ii] = NULL;
975 mptscsih_freeChainBuffers(hd->ioc, ii);
976 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
977 }
978 }
979
980 return;
981}
982
983/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
984/*
985 * Hack! It might be nice to report if a device is returning QUEUE_FULL
986 * but maybe not each and every time...
987 */
988static long last_queue_full = 0;
989
990/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
991/*
992 * mptscsih_report_queue_full - Report QUEUE_FULL status returned
993 * from a SCSI target device.
994 * @sc: Pointer to scsi_cmnd structure
995 * @pScsiReply: Pointer to SCSIIOReply_t
996 * @pScsiReq: Pointer to original SCSI request
997 *
998 * This routine periodically reports QUEUE_FULL status returned from a
999 * SCSI target device. It reports this to the console via kernel
1000 * printk() API call, not more than once every 10 seconds.
1001 */
1002static void
1003mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq)
1004{
1005 long time = jiffies;
1006
1007 if (time - last_queue_full > 10 * HZ) {
1008 char *ioc_str = "ioc?";
1009
1010 if (sc->device && sc->device->host != NULL && sc->device->host->hostdata != NULL)
1011 ioc_str = ((MPT_SCSI_HOST *)sc->device->host->hostdata)->ioc->name;
1012 dprintk((MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n",
1013 ioc_str, 0, sc->device->id, sc->device->lun));
1014 last_queue_full = time;
1015 }
1016}
1017
1018/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1019static char *info_kbuf = NULL;
1020
1021/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1022/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1023/*
1024 * mptscsih_probe - Installs scsi devices per bus.
1025 * @pdev: Pointer to pci_dev structure
1026 *
1027 * Returns 0 for success, non-zero for failure.
1028 *
1029 */
1030
1031static int
1032mptscsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1033{
1034 struct Scsi_Host *sh;
1035 MPT_SCSI_HOST *hd;
1036 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1037 unsigned long flags;
1038 int sz, ii;
1039 int numSGE = 0;
1040 int scale;
1041 int ioc_cap;
1042 u8 *mem;
1043 int error=0;
1044
1045
1046 /* 20010202 -sralston
1047 * Added sanity check on readiness of the MPT adapter.
1048 */
1049 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
1050 printk(MYIOC_s_WARN_FMT
1051 "Skipping because it's not operational!\n",
1052 ioc->name);
1053 return -ENODEV;
1054 }
1055
1056 if (!ioc->active) {
1057 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
1058 ioc->name);
1059 return -ENODEV;
1060 }
1061
1062 /* Sanity check - ensure at least 1 port is INITIATOR capable
1063 */
1064 ioc_cap = 0;
1065 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
1066 if (ioc->pfacts[ii].ProtocolFlags &
1067 MPI_PORTFACTS_PROTOCOL_INITIATOR)
1068 ioc_cap ++;
1069 }
1070
1071 if (!ioc_cap) {
1072 printk(MYIOC_s_WARN_FMT
1073 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
1074 ioc->name, ioc);
1075 return -ENODEV;
1076 }
1077
1078 sh = scsi_host_alloc(&driver_template, sizeof(MPT_SCSI_HOST));
1079
1080 if (!sh) {
1081 printk(MYIOC_s_WARN_FMT
1082 "Unable to register controller with SCSI subsystem\n",
1083 ioc->name);
1084 return -1;
1085 }
1086
1087 spin_lock_irqsave(&ioc->FreeQlock, flags);
1088
1089 /* Attach the SCSI Host to the IOC structure
1090 */
1091 ioc->sh = sh;
1092
1093 sh->io_port = 0;
1094 sh->n_io_port = 0;
1095 sh->irq = 0;
1096
1097 /* set 16 byte cdb's */
1098 sh->max_cmd_len = 16;
1099
1100 /* Yikes! This is important!
1101 * Otherwise, by default, linux
1102 * only scans target IDs 0-7!
1103 * pfactsN->MaxDevices unreliable
1104 * (not supported in early
1105 * versions of the FW).
1106 * max_id = 1 + actual max id,
1107 * max_lun = 1 + actual last lun,
1108 * see hosts.h :o(
1109 */
1110 if (ioc->bus_type == SCSI) {
1111 sh->max_id = MPT_MAX_SCSI_DEVICES;
1112 } else {
1113 /* For FC, increase the queue depth
1114 * from MPT_SCSI_CAN_QUEUE (31)
1115 * to MPT_FC_CAN_QUEUE (63).
1116 */
1117 sh->can_queue = MPT_FC_CAN_QUEUE;
1118 sh->max_id =
1119 MPT_MAX_FC_DEVICES<256 ? MPT_MAX_FC_DEVICES : 255;
1120 }
1121
1122 sh->max_lun = MPT_LAST_LUN + 1;
1123 sh->max_channel = 0;
1124 sh->this_id = ioc->pfacts[0].PortSCSIID;
1125
1126 /* Required entry.
1127 */
1128 sh->unique_id = ioc->id;
1129
1130 /* Verify that we won't exceed the maximum
1131 * number of chain buffers
1132 * We can optimize: ZZ = req_sz/sizeof(SGE)
1133 * For 32bit SGE's:
1134 * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
1135 * + (req_sz - 64)/sizeof(SGE)
1136 * A slightly different algorithm is required for
1137 * 64bit SGEs.
1138 */
1139 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
1140 if (sizeof(dma_addr_t) == sizeof(u64)) {
1141 numSGE = (scale - 1) *
1142 (ioc->facts.MaxChainDepth-1) + scale +
1143 (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
1144 sizeof(u32));
1145 } else {
1146 numSGE = 1 + (scale - 1) *
1147 (ioc->facts.MaxChainDepth-1) + scale +
1148 (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
1149 sizeof(u32));
1150 }
1151
1152 if (numSGE < sh->sg_tablesize) {
1153 /* Reset this value */
1154 dprintk((MYIOC_s_INFO_FMT
1155 "Resetting sg_tablesize to %d from %d\n",
1156 ioc->name, numSGE, sh->sg_tablesize));
1157 sh->sg_tablesize = numSGE;
1158 }
1159
1160 /* Set the pci device pointer in Scsi_Host structure.
1161 */
1162 scsi_set_device(sh, &ioc->pcidev->dev);
1163
1164 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1165
1166 hd = (MPT_SCSI_HOST *) sh->hostdata;
1167 hd->ioc = ioc;
1168
1169 /* SCSI needs scsi_cmnd lookup table!
1170 * (with size equal to req_depth*PtrSz!)
1171 */
1172 sz = ioc->req_depth * sizeof(void *);
1173 mem = kmalloc(sz, GFP_ATOMIC);
1174 if (mem == NULL) {
1175 error = -ENOMEM;
1176 goto mptscsih_probe_failed;
1177 }
1178
1179 memset(mem, 0, sz);
1180 hd->ScsiLookup = (struct scsi_cmnd **) mem;
1181
1182 dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n",
1183 ioc->name, hd->ScsiLookup, sz));
1184
1185 /* Allocate memory for the device structures.
1186 * A non-Null pointer at an offset
1187 * indicates a device exists.
1188 * max_id = 1 + maximum id (hosts.h)
1189 */
1190 sz = sh->max_id * sizeof(void *);
1191 mem = kmalloc(sz, GFP_ATOMIC);
1192 if (mem == NULL) {
1193 error = -ENOMEM;
1194 goto mptscsih_probe_failed;
1195 }
1196
1197 memset(mem, 0, sz);
1198 hd->Targets = (VirtDevice **) mem;
1199
1200 dprintk((KERN_INFO
1201 " Targets @ %p, sz=%d\n", hd->Targets, sz));
1202
1203 /* Clear the TM flags
1204 */
1205 hd->tmPending = 0;
1206 hd->tmState = TM_STATE_NONE;
1207 hd->resetPending = 0;
1208 hd->abortSCpnt = NULL;
1209
1210 /* Clear the pointer used to store
1211 * single-threaded commands, i.e., those
1212 * issued during a bus scan, dv and
1213 * configuration pages.
1214 */
1215 hd->cmdPtr = NULL;
1216
1217 /* Initialize this SCSI Hosts' timers
1218 * To use, set the timer expires field
1219 * and add_timer
1220 */
1221 init_timer(&hd->timer);
1222 hd->timer.data = (unsigned long) hd;
1223 hd->timer.function = mptscsih_timer_expired;
1224
1225 if (ioc->bus_type == SCSI) {
1226 /* Update with the driver setup
1227 * values.
1228 */
1229 if (ioc->spi_data.maxBusWidth > mpt_width)
1230 ioc->spi_data.maxBusWidth = mpt_width;
1231 if (ioc->spi_data.minSyncFactor < mpt_factor)
1232 ioc->spi_data.minSyncFactor = mpt_factor;
1233
1234 if (ioc->spi_data.minSyncFactor == MPT_ASYNC) {
1235 ioc->spi_data.maxSyncOffset = 0;
1236 }
1237
1238 ioc->spi_data.Saf_Te = mpt_saf_te;
1239
1240 hd->negoNvram = 0;
1241#ifndef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
1242 hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
1243#endif
1244 ioc->spi_data.forceDv = 0;
1245 ioc->spi_data.noQas = 0;
1246 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
1247 ioc->spi_data.dvStatus[ii] =
1248 MPT_SCSICFG_NEGOTIATE;
1249 }
1250
1251 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++)
1252 ioc->spi_data.dvStatus[ii] |=
1253 MPT_SCSICFG_DV_NOT_DONE;
1254
1255 dinitprintk((MYIOC_s_INFO_FMT
1256 "dv %x width %x factor %x saf_te %x\n",
1257 ioc->name, mpt_dv,
1258 mpt_width,
1259 mpt_factor,
1260 mpt_saf_te));
1261 }
1262
1263 mpt_scsi_hosts++;
1264
1265 error = scsi_add_host (sh, &ioc->pcidev->dev);
1266 if(error) {
1267 dprintk((KERN_ERR MYNAM
1268 "scsi_add_host failed\n"));
1269 goto mptscsih_probe_failed;
1270 }
1271
1272 scsi_scan_host(sh);
1273 return 0;
1274
1275mptscsih_probe_failed:
1276
1277 mptscsih_remove(pdev);
1278 return error;
1279
1280}
1281
1282/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1283/*
1284 * mptscsih_remove - Removed scsi devices
1285 * @pdev: Pointer to pci_dev structure
1286 *
1287 *
1288 */
1289static void
1290mptscsih_remove(struct pci_dev *pdev)
1291{
1292 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1293 struct Scsi_Host *host = ioc->sh;
1294 MPT_SCSI_HOST *hd;
1295 int count;
1296 unsigned long flags;
1297
1298 if(!host)
1299 return;
1300
1301 scsi_remove_host(host);
1302
1303#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
1304 /* Check DV thread active */
1305 count = 10 * HZ;
1306 spin_lock_irqsave(&dvtaskQ_lock, flags);
1307 if (dvtaskQ_active) {
1308 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
1309 while(dvtaskQ_active && --count) {
1310 set_current_state(TASK_INTERRUPTIBLE);
1311 schedule_timeout(1);
1312 }
1313 } else {
1314 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
1315 }
1316 if (!count)
1317 printk(KERN_ERR MYNAM ": ERROR - DV thread still active!\n");
1318#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
1319 else
1320 printk(KERN_ERR MYNAM ": DV thread orig %d, count %d\n", 10 * HZ, count);
1321#endif
1322#endif
1323
1324 hd = (MPT_SCSI_HOST *)host->hostdata;
1325 if (hd != NULL) {
1326 int sz1;
1327
1328 mptscsih_shutdown(&pdev->dev);
1329
1330 sz1=0;
1331
1332 if (hd->ScsiLookup != NULL) {
1333 sz1 = hd->ioc->req_depth * sizeof(void *);
1334 kfree(hd->ScsiLookup);
1335 hd->ScsiLookup = NULL;
1336 }
1337
1338 if (hd->Targets != NULL) {
1339 /*
1340 * Free pointer array.
1341 */
1342 kfree(hd->Targets);
1343 hd->Targets = NULL;
1344 }
1345
1346 dprintk((MYIOC_s_INFO_FMT
1347 "Free'd ScsiLookup (%d) memory\n",
1348 hd->ioc->name, sz1));
1349
1350 /* NULL the Scsi_Host pointer
1351 */
1352 hd->ioc->sh = NULL;
1353 }
1354
1355 scsi_host_put(host);
1356 mpt_scsi_hosts--;
1357
1358}
1359
1360/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1361/*
1362 * mptscsih_shutdown - reboot notifier
1363 *
1364 */
1365static void
1366mptscsih_shutdown(struct device * dev)
1367{
1368 MPT_ADAPTER *ioc = pci_get_drvdata(to_pci_dev(dev));
1369 struct Scsi_Host *host = ioc->sh;
1370 MPT_SCSI_HOST *hd;
1371
1372 if(!host)
1373 return;
1374
1375 hd = (MPT_SCSI_HOST *)host->hostdata;
1376
1377 /* Flush the cache of this adapter
1378 */
1379 if(hd != NULL)
1380 mptscsih_synchronize_cache(hd, 0);
1381
1382}
1383
1384#ifdef CONFIG_PM
1385/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1386/*
1387 * mptscsih_suspend - Fusion MPT scsie driver suspend routine.
1388 *
1389 *
1390 */
1391static int
1392mptscsih_suspend(struct pci_dev *pdev, u32 state)
1393{
1394 mptscsih_shutdown(&pdev->dev);
1395 return 0;
1396}
1397
1398/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1399/*
1400 * mptscsih_resume - Fusion MPT scsi driver resume routine.
1401 *
1402 *
1403 */
1404static int
1405mptscsih_resume(struct pci_dev *pdev)
1406{
1407 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1408 struct Scsi_Host *host = ioc->sh;
1409 MPT_SCSI_HOST *hd;
1410
1411 if(!host)
1412 return 0;
1413
1414 hd = (MPT_SCSI_HOST *)host->hostdata;
1415 if(!hd)
1416 return 0;
1417
1418#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
1419 {
1420 unsigned long lflags;
1421 spin_lock_irqsave(&dvtaskQ_lock, lflags);
1422 if (!dvtaskQ_active) {
1423 dvtaskQ_active = 1;
1424 spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
1425 INIT_WORK(&mptscsih_dvTask,
1426 mptscsih_domainValidation, (void *) hd);
1427 schedule_work(&mptscsih_dvTask);
1428 } else {
1429 spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
1430 }
1431 }
1432#endif
1433 return 0;
1434}
1435
1436#endif
1437
1438static struct mpt_pci_driver mptscsih_driver = {
1439 .probe = mptscsih_probe,
1440 .remove = mptscsih_remove,
1441 .shutdown = mptscsih_shutdown,
1442#ifdef CONFIG_PM
1443 .suspend = mptscsih_suspend,
1444 .resume = mptscsih_resume,
1445#endif
1446};
1447
1448/* SCSI host fops start here... */
1449/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1450/**
1451 * mptscsih_init - Register MPT adapter(s) as SCSI host(s) with
1452 * linux scsi mid-layer.
1453 *
1454 * Returns 0 for success, non-zero for failure.
1455 */
1456static int __init
1457mptscsih_init(void)
1458{
1459
1460 show_mptmod_ver(my_NAME, my_VERSION);
1461
1462 ScsiDoneCtx = mpt_register(mptscsih_io_done, MPTSCSIH_DRIVER);
1463 ScsiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSCSIH_DRIVER);
1464 ScsiScanDvCtx = mpt_register(mptscsih_scandv_complete, MPTSCSIH_DRIVER);
1465
1466 if (mpt_event_register(ScsiDoneCtx, mptscsih_event_process) == 0) {
1467 devtprintk((KERN_INFO MYNAM
1468 ": Registered for IOC event notifications\n"));
1469 }
1470
1471 if (mpt_reset_register(ScsiDoneCtx, mptscsih_ioc_reset) == 0) {
1472 dprintk((KERN_INFO MYNAM
1473 ": Registered for IOC reset notifications\n"));
1474 }
1475
1476 if(mpt_device_driver_register(&mptscsih_driver,
1477 MPTSCSIH_DRIVER) != 0 ) {
1478 dprintk((KERN_INFO MYNAM
1479 ": failed to register dd callbacks\n"));
1480 }
1481
1482 return 0;
1483
1484}
1485
1486/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1487/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1488/**
1489 * mptscsih_exit - Unregisters MPT adapter(s)
1490 *
1491 */
1492static void __exit
1493mptscsih_exit(void)
1494{
1495 mpt_device_driver_deregister(MPTSCSIH_DRIVER);
1496
1497 mpt_reset_deregister(ScsiDoneCtx);
1498 dprintk((KERN_INFO MYNAM
1499 ": Deregistered for IOC reset notifications\n"));
1500
1501 mpt_event_deregister(ScsiDoneCtx);
1502 dprintk((KERN_INFO MYNAM
1503 ": Deregistered for IOC event notifications\n"));
1504
1505 mpt_deregister(ScsiScanDvCtx);
1506 mpt_deregister(ScsiTaskCtx);
1507 mpt_deregister(ScsiDoneCtx);
1508
1509 if (info_kbuf != NULL)
1510 kfree(info_kbuf);
1511
1512}
1513
1514/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1515/**
1516 * mptscsih_info - Return information about MPT adapter
1517 * @SChost: Pointer to Scsi_Host structure
1518 *
1519 * (linux scsi_host_template.info routine)
1520 *
1521 * Returns pointer to buffer where information was written.
1522 */
1523static const char *
1524mptscsih_info(struct Scsi_Host *SChost)
1525{
1526 MPT_SCSI_HOST *h;
1527 int size = 0;
1528
1529 if (info_kbuf == NULL)
1530 if ((info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
1531 return info_kbuf;
1532
1533 h = (MPT_SCSI_HOST *)SChost->hostdata;
1534 info_kbuf[0] = '\0';
1535 if (h) {
1536 mpt_print_ioc_summary(h->ioc, info_kbuf, &size, 0, 0);
1537 info_kbuf[size-1] = '\0';
1538 }
1539
1540 return info_kbuf;
1541}
1542
1543struct info_str {
1544 char *buffer;
1545 int length;
1546 int offset;
1547 int pos;
1548};
1549
1550static void copy_mem_info(struct info_str *info, char *data, int len)
1551{
1552 if (info->pos + len > info->length)
1553 len = info->length - info->pos;
1554
1555 if (info->pos + len < info->offset) {
1556 info->pos += len;
1557 return;
1558 }
1559
1560 if (info->pos < info->offset) {
1561 data += (info->offset - info->pos);
1562 len -= (info->offset - info->pos);
1563 }
1564
1565 if (len > 0) {
1566 memcpy(info->buffer + info->pos, data, len);
1567 info->pos += len;
1568 }
1569}
1570
1571static int copy_info(struct info_str *info, char *fmt, ...)
1572{
1573 va_list args;
1574 char buf[81];
1575 int len;
1576
1577 va_start(args, fmt);
1578 len = vsprintf(buf, fmt, args);
1579 va_end(args);
1580
1581 copy_mem_info(info, buf, len);
1582 return len;
1583}
1584
1585static int mptscsih_host_info(MPT_ADAPTER *ioc, char *pbuf, off_t offset, int len)
1586{
1587 struct info_str info;
1588
1589 info.buffer = pbuf;
1590 info.length = len;
1591 info.offset = offset;
1592 info.pos = 0;
1593
1594 copy_info(&info, "%s: %s, ", ioc->name, ioc->prod_name);
1595 copy_info(&info, "%s%08xh, ", MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word);
1596 copy_info(&info, "Ports=%d, ", ioc->facts.NumberOfPorts);
1597 copy_info(&info, "MaxQ=%d\n", ioc->req_depth);
1598
1599 return ((info.pos > info.offset) ? info.pos - info.offset : 0);
1600}
1601
1602/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1603/**
1604 * mptscsih_proc_info - Return information about MPT adapter
1605 *
1606 * (linux scsi_host_template.info routine)
1607 *
1608 * buffer: if write, user data; if read, buffer for user
1609 * length: if write, return length;
1610 * offset: if write, 0; if read, the current offset into the buffer from
1611 * the previous read.
1612 * hostno: scsi host number
1613 * func: if write = 1; if read = 0
1614 */
1615static int
1616mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
1617 int length, int func)
1618{
1619 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
1620 MPT_ADAPTER *ioc = hd->ioc;
1621 int size = 0;
1622
1623 if (func) {
1624 /*
1625 * write is not supported
1626 */
1627 } else {
1628 if (start)
1629 *start = buffer;
1630
1631 size = mptscsih_host_info(ioc, buffer, offset, length);
1632 }
1633
1634 return size;
1635}
1636
1637/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1638#define ADD_INDEX_LOG(req_ent) do { } while(0)
1639
1640/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1641/**
1642 * mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine.
1643 * @SCpnt: Pointer to scsi_cmnd structure
1644 * @done: Pointer SCSI mid-layer IO completion function
1645 *
1646 * (linux scsi_host_template.queuecommand routine)
1647 * This is the primary SCSI IO start routine. Create a MPI SCSIIORequest
1648 * from a linux scsi_cmnd request and send it to the IOC.
1649 *
1650 * Returns 0. (rtn value discarded by linux scsi mid-layer)
1651 */
1652static int
1653mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1654{
1655 MPT_SCSI_HOST *hd;
1656 MPT_FRAME_HDR *mf;
1657 SCSIIORequest_t *pScsiReq;
1658 VirtDevice *pTarget;
1659 int target;
1660 int lun;
1661 u32 datalen;
1662 u32 scsictl;
1663 u32 scsidir;
1664 u32 cmd_len;
1665 int my_idx;
1666 int ii;
1667
1668 hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata;
1669 target = SCpnt->device->id;
1670 lun = SCpnt->device->lun;
1671 SCpnt->scsi_done = done;
1672
1673 pTarget = hd->Targets[target];
1674
1675 dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n",
1676 (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done));
1677
1678 if (hd->resetPending) {
1679 dtmprintk((MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
1680 (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt));
1681 return SCSI_MLQUEUE_HOST_BUSY;
1682 }
1683
1684 /*
1685 * Put together a MPT SCSI request...
1686 */
1687 if ((mf = mpt_get_msg_frame(ScsiDoneCtx, hd->ioc)) == NULL) {
1688 dprintk((MYIOC_s_WARN_FMT "QueueCmd, no msg frames!!\n",
1689 hd->ioc->name));
1690 return SCSI_MLQUEUE_HOST_BUSY;
1691 }
1692
1693 pScsiReq = (SCSIIORequest_t *) mf;
1694
1695 my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1696
1697 ADD_INDEX_LOG(my_idx);
1698
1699 /* BUG FIX! 19991030 -sralston
1700 * TUR's being issued with scsictl=0x02000000 (DATA_IN)!
1701 * Seems we may receive a buffer (datalen>0) even when there
1702 * will be no data transfer! GRRRRR...
1703 */
1704 if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
1705 datalen = SCpnt->request_bufflen;
1706 scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */
1707 } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
1708 datalen = SCpnt->request_bufflen;
1709 scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */
1710 } else {
1711 datalen = 0;
1712 scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1713 }
1714
1715 /* Default to untagged. Once a target structure has been allocated,
1716 * use the Inquiry data to determine if device supports tagged.
1717 */
1718 if ( pTarget
1719 && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
1720 && (SCpnt->device->tagged_supported)) {
1721 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
1722 } else {
1723 scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
1724 }
1725
1726 /* Use the above information to set up the message frame
1727 */
1728 pScsiReq->TargetID = (u8) target;
1729 pScsiReq->Bus = (u8) SCpnt->device->channel;
1730 pScsiReq->ChainOffset = 0;
1731 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1732 pScsiReq->CDBLength = SCpnt->cmd_len;
1733 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
1734 pScsiReq->Reserved = 0;
1735 pScsiReq->MsgFlags = mpt_msg_flags();
1736 pScsiReq->LUN[0] = 0;
1737 pScsiReq->LUN[1] = lun;
1738 pScsiReq->LUN[2] = 0;
1739 pScsiReq->LUN[3] = 0;
1740 pScsiReq->LUN[4] = 0;
1741 pScsiReq->LUN[5] = 0;
1742 pScsiReq->LUN[6] = 0;
1743 pScsiReq->LUN[7] = 0;
1744 pScsiReq->Control = cpu_to_le32(scsictl);
1745
1746 /*
1747 * Write SCSI CDB into the message
1748 */
1749 cmd_len = SCpnt->cmd_len;
1750 for (ii=0; ii < cmd_len; ii++)
1751 pScsiReq->CDB[ii] = SCpnt->cmnd[ii];
1752
1753 for (ii=cmd_len; ii < 16; ii++)
1754 pScsiReq->CDB[ii] = 0;
1755
1756 /* DataLength */
1757 pScsiReq->DataLength = cpu_to_le32(datalen);
1758
1759 /* SenseBuffer low address */
1760 pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma
1761 + (my_idx * MPT_SENSE_BUFFER_ALLOC));
1762
1763 /* Now add the SG list
1764 * Always have a SGE even if null length.
1765 */
1766 if (datalen == 0) {
1767 /* Add a NULL SGE */
1768 mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0,
1769 (dma_addr_t) -1);
1770 } else {
1771 /* Add a 32 or 64 bit SGE */
1772 if (mptscsih_AddSGE(hd->ioc, SCpnt, pScsiReq, my_idx) != SUCCESS)
1773 goto fail;
1774 }
1775
1776 hd->ScsiLookup[my_idx] = SCpnt;
1777 SCpnt->host_scribble = NULL;
1778
1779#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
1780 if (hd->ioc->bus_type == SCSI) {
1781 int dvStatus = hd->ioc->spi_data.dvStatus[target];
1782 int issueCmd = 1;
1783
1784 if (dvStatus || hd->ioc->spi_data.forceDv) {
1785
1786 if ((dvStatus & MPT_SCSICFG_NEED_DV) ||
1787 (hd->ioc->spi_data.forceDv & MPT_SCSICFG_NEED_DV)) {
1788 unsigned long lflags;
1789 /* Schedule DV if necessary */
1790 spin_lock_irqsave(&dvtaskQ_lock, lflags);
1791 if (!dvtaskQ_active) {
1792 dvtaskQ_active = 1;
1793 spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
1794 INIT_WORK(&mptscsih_dvTask, mptscsih_domainValidation, (void *) hd);
1795
1796 schedule_work(&mptscsih_dvTask);
1797 } else {
1798 spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
1799 }
1800 hd->ioc->spi_data.forceDv &= ~MPT_SCSICFG_NEED_DV;
1801 }
1802
1803 /* Trying to do DV to this target, extend timeout.
1804 * Wait to issue until flag is clear
1805 */
1806 if (dvStatus & MPT_SCSICFG_DV_PENDING) {
1807 mod_timer(&SCpnt->eh_timeout, jiffies + 40 * HZ);
1808 issueCmd = 0;
1809 }
1810
1811 /* Set the DV flags.
1812 */
1813 if (dvStatus & MPT_SCSICFG_DV_NOT_DONE)
1814 mptscsih_set_dvflags(hd, pScsiReq);
1815
1816 if (!issueCmd)
1817 goto fail;
1818 }
1819 }
1820#endif
1821
1822 mpt_put_msg_frame(ScsiDoneCtx, hd->ioc, mf);
1823 dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
1824 hd->ioc->name, SCpnt, mf, my_idx));
1825 DBG_DUMP_REQUEST_FRAME(mf)
1826 return 0;
1827
1828 fail:
1829 mptscsih_freeChainBuffers(hd->ioc, my_idx);
1830 mpt_free_msg_frame(hd->ioc, mf);
1831 return SCSI_MLQUEUE_HOST_BUSY;
1832}
1833
1834/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1835/*
1836 * mptscsih_freeChainBuffers - Function to free chain buffers associated
1837 * with a SCSI IO request
1838 * @hd: Pointer to the MPT_SCSI_HOST instance
1839 * @req_idx: Index of the SCSI IO request frame.
1840 *
1841 * Called if SG chain buffer allocation fails and mptscsih callbacks.
1842 * No return.
1843 */
1844static void
1845mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1846{
1847 MPT_FRAME_HDR *chain;
1848 unsigned long flags;
1849 int chain_idx;
1850 int next;
1851
1852 /* Get the first chain index and reset
1853 * tracker state.
1854 */
1855 chain_idx = ioc->ReqToChain[req_idx];
1856 ioc->ReqToChain[req_idx] = MPT_HOST_NO_CHAIN;
1857
1858 while (chain_idx != MPT_HOST_NO_CHAIN) {
1859
1860 /* Save the next chain buffer index */
1861 next = ioc->ChainToChain[chain_idx];
1862
1863 /* Free this chain buffer and reset
1864 * tracker
1865 */
1866 ioc->ChainToChain[chain_idx] = MPT_HOST_NO_CHAIN;
1867
1868 chain = (MPT_FRAME_HDR *) (ioc->ChainBuffer
1869 + (chain_idx * ioc->req_sz));
1870
1871 spin_lock_irqsave(&ioc->FreeQlock, flags);
1872 list_add_tail(&chain->u.frame.linkage.list, &ioc->FreeChainQ);
1873 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1874
1875 dmfprintk((MYIOC_s_INFO_FMT "FreeChainBuffers (index %d)\n",
1876 ioc->name, chain_idx));
1877
1878 /* handle next */
1879 chain_idx = next;
1880 }
1881 return;
1882}
1883
1884/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1885/*
1886 * Reset Handling
1887 */
1888
1889/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1890/*
1891 * mptscsih_TMHandler - Generic handler for SCSI Task Management.
1892 * Fall through to mpt_HardResetHandler if: not operational, too many
1893 * failed TM requests or handshake failure.
1894 *
1895 * @ioc: Pointer to MPT_ADAPTER structure
1896 * @type: Task Management type
1897 * @target: Logical Target ID for reset (if appropriate)
1898 * @lun: Logical Unit for reset (if appropriate)
1899 * @ctx2abort: Context for the task to be aborted (if appropriate)
1900 *
1901 * Remark: Currently invoked from a non-interrupt thread (_bh).
1902 *
1903 * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC
1904 * will be active.
1905 *
1906 * Returns 0 for SUCCESS or -1 if FAILED.
1907 */
1908static int
1909mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout)
1910{
1911 MPT_ADAPTER *ioc;
1912 int rc = -1;
1913 int doTask = 1;
1914 u32 ioc_raw_state;
1915 unsigned long flags;
1916
1917 /* If FW is being reloaded currently, return success to
1918 * the calling function.
1919 */
1920 if (hd == NULL)
1921 return 0;
1922
1923 ioc = hd->ioc;
1924 if (ioc == NULL) {
1925 printk(KERN_ERR MYNAM " TMHandler" " NULL ioc!\n");
1926 return FAILED;
1927 }
1928 dtmprintk((MYIOC_s_INFO_FMT "TMHandler Entered!\n", ioc->name));
1929
1930 // SJR - CHECKME - Can we avoid this here?
1931 // (mpt_HardResetHandler has this check...)
1932 spin_lock_irqsave(&ioc->diagLock, flags);
1933 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) {
1934 spin_unlock_irqrestore(&ioc->diagLock, flags);
1935 return FAILED;
1936 }
1937 spin_unlock_irqrestore(&ioc->diagLock, flags);
1938
1939 /* Wait a fixed amount of time for the TM pending flag to be cleared.
1940 * If we time out and not bus reset, then we return a FAILED status to the caller.
1941 * The call to mptscsih_tm_pending_wait() will set the pending flag if we are
1942 * successful. Otherwise, reload the FW.
1943 */
1944 if (mptscsih_tm_pending_wait(hd) == FAILED) {
1945 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1946 dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler abort: "
1947 "Timed out waiting for last TM (%d) to complete! \n",
1948 hd->ioc->name, hd->tmPending));
1949 return FAILED;
1950 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1951 dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler target reset: "
1952 "Timed out waiting for last TM (%d) to complete! \n",
1953 hd->ioc->name, hd->tmPending));
1954 return FAILED;
1955 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
1956 dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler bus reset: "
1957 "Timed out waiting for last TM (%d) to complete! \n",
1958 hd->ioc->name, hd->tmPending));
1959 if (hd->tmPending & (1 << MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS))
1960 return FAILED;
1961
1962 doTask = 0;
1963 }
1964 } else {
1965 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
1966 hd->tmPending |= (1 << type);
1967 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
1968 }
1969
1970 /* Is operational?
1971 */
1972 ioc_raw_state = mpt_GetIocState(hd->ioc, 0);
1973
1974#ifdef MPT_DEBUG_RESET
1975 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
1976 printk(MYIOC_s_WARN_FMT
1977 "TM Handler: IOC Not operational(0x%x)!\n",
1978 hd->ioc->name, ioc_raw_state);
1979 }
1980#endif
1981
1982 if (doTask && ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL)
1983 && !(ioc_raw_state & MPI_DOORBELL_ACTIVE)) {
1984
1985 /* Isse the Task Mgmt request.
1986 */
1987 if (hd->hard_resets < -1)
1988 hd->hard_resets++;
1989 rc = mptscsih_IssueTaskMgmt(hd, type, channel, target, lun, ctx2abort, timeout);
1990 if (rc) {
1991 printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n", hd->ioc->name);
1992 } else {
1993 dtmprintk((MYIOC_s_INFO_FMT "Issue of TaskMgmt Successful!\n", hd->ioc->name));
1994 }
1995 }
1996
1997 /* Only fall through to the HRH if this is a bus reset
1998 */
1999 if ((type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) && (rc ||
2000 ioc->reload_fw || (ioc->alt_ioc && ioc->alt_ioc->reload_fw))) {
2001 dtmprintk((MYIOC_s_INFO_FMT "Calling HardReset! \n",
2002 hd->ioc->name));
2003 rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP);
2004 }
2005
2006 dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc));
2007
2008 return rc;
2009}
2010
2011
2012/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2013/*
2014 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
2015 * @hd: Pointer to MPT_SCSI_HOST structure
2016 * @type: Task Management type
2017 * @target: Logical Target ID for reset (if appropriate)
2018 * @lun: Logical Unit for reset (if appropriate)
2019 * @ctx2abort: Context for the task to be aborted (if appropriate)
2020 *
2021 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
2022 * or a non-interrupt thread. In the former, must not call schedule().
2023 *
2024 * Not all fields are meaningfull for all task types.
2025 *
2026 * Returns 0 for SUCCESS, -999 for "no msg frames",
2027 * else other non-zero value returned.
2028 */
2029static int
2030mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout)
2031{
2032 MPT_FRAME_HDR *mf;
2033 SCSITaskMgmt_t *pScsiTm;
2034 int ii;
2035 int retval;
2036
2037 /* Return Fail to calling function if no message frames available.
2038 */
2039 if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc)) == NULL) {
2040 dfailprintk((MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n",
2041 hd->ioc->name));
2042 //return FAILED;
2043 return -999;
2044 }
2045 dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n",
2046 hd->ioc->name, mf));
2047
2048 /* Format the Request
2049 */
2050 pScsiTm = (SCSITaskMgmt_t *) mf;
2051 pScsiTm->TargetID = target;
2052 pScsiTm->Bus = channel;
2053 pScsiTm->ChainOffset = 0;
2054 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
2055
2056 pScsiTm->Reserved = 0;
2057 pScsiTm->TaskType = type;
2058 pScsiTm->Reserved1 = 0;
2059 pScsiTm->MsgFlags = (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS)
2060 ? MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION : 0;
2061
2062 for (ii= 0; ii < 8; ii++) {
2063 pScsiTm->LUN[ii] = 0;
2064 }
2065 pScsiTm->LUN[1] = lun;
2066
2067 for (ii=0; ii < 7; ii++)
2068 pScsiTm->Reserved2[ii] = 0;
2069
2070 pScsiTm->TaskMsgContext = ctx2abort;
2071
2072 dtmprintk((MYIOC_s_INFO_FMT
2073 "IssueTaskMgmt: ctx2abort (0x%08x) type=%d\n",
2074 hd->ioc->name, ctx2abort, type));
2075
2076 DBG_DUMP_TM_REQUEST_FRAME((u32 *)pScsiTm);
2077
2078 if ((retval = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc,
2079 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm,
2080 CAN_SLEEP)) != 0) {
2081 dfailprintk((MYIOC_s_ERR_FMT "_send_handshake FAILED!"
2082 " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd,
2083 hd->ioc, mf));
2084 mpt_free_msg_frame(hd->ioc, mf);
2085 return retval;
2086 }
2087
2088 if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) {
2089 dfailprintk((MYIOC_s_ERR_FMT "_wait_for_completion FAILED!"
2090 " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd,
2091 hd->ioc, mf));
2092 mpt_free_msg_frame(hd->ioc, mf);
2093 dtmprintk((MYIOC_s_INFO_FMT "Calling HardReset! \n",
2094 hd->ioc->name));
2095 retval = mpt_HardResetHandler(hd->ioc, CAN_SLEEP);
2096 }
2097
2098 return retval;
2099}
2100
2101/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2102/**
2103 * mptscsih_abort - Abort linux scsi_cmnd routine, new_eh variant
2104 * @SCpnt: Pointer to scsi_cmnd structure, IO to be aborted
2105 *
2106 * (linux scsi_host_template.eh_abort_handler routine)
2107 *
2108 * Returns SUCCESS or FAILED.
2109 */
2110static int
2111mptscsih_abort(struct scsi_cmnd * SCpnt)
2112{
2113 MPT_SCSI_HOST *hd;
2114 MPT_ADAPTER *ioc;
2115 MPT_FRAME_HDR *mf;
2116 u32 ctx2abort;
2117 int scpnt_idx;
2118 spinlock_t *host_lock = SCpnt->device->host->host_lock;
2119
2120 /* If we can't locate our host adapter structure, return FAILED status.
2121 */
2122 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
2123 SCpnt->result = DID_RESET << 16;
2124 SCpnt->scsi_done(SCpnt);
2125 dfailprintk((KERN_WARNING MYNAM ": mptscsih_abort: "
2126 "Can't locate host! (sc=%p)\n",
2127 SCpnt));
2128 return FAILED;
2129 }
2130
2131 ioc = hd->ioc;
2132 if (hd->resetPending)
2133 return FAILED;
2134
2135 printk(KERN_WARNING MYNAM ": %s: >> Attempting task abort! (sc=%p)\n",
2136 hd->ioc->name, SCpnt);
2137
2138 if (hd->timeouts < -1)
2139 hd->timeouts++;
2140
2141 /* Find this command
2142 */
2143 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
2144 /* Cmd not found in ScsiLookup.
2145 * Do OS callback.
2146 */
2147 SCpnt->result = DID_RESET << 16;
2148 dtmprintk((KERN_WARNING MYNAM ": %s: mptscsih_abort: "
2149 "Command not in the active list! (sc=%p)\n",
2150 hd->ioc->name, SCpnt));
2151 return SUCCESS;
2152 }
2153
2154 /* Most important! Set TaskMsgContext to SCpnt's MsgContext!
2155 * (the IO to be ABORT'd)
2156 *
2157 * NOTE: Since we do not byteswap MsgContext, we do not
2158 * swap it here either. It is an opaque cookie to
2159 * the controller, so it does not matter. -DaveM
2160 */
2161 mf = MPT_INDEX_2_MFPTR(hd->ioc, scpnt_idx);
2162 ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext;
2163
2164 hd->abortSCpnt = SCpnt;
2165
2166 spin_unlock_irq(host_lock);
2167 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2168 SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
2169 ctx2abort, 2 /* 2 second timeout */)
2170 < 0) {
2171
2172 /* The TM request failed and the subsequent FW-reload failed!
2173 * Fatal error case.
2174 */
2175 printk(MYIOC_s_WARN_FMT "Error issuing abort task! (sc=%p)\n",
2176 hd->ioc->name, SCpnt);
2177
2178 /* We must clear our pending flag before clearing our state.
2179 */
2180 hd->tmPending = 0;
2181 hd->tmState = TM_STATE_NONE;
2182
2183 spin_lock_irq(host_lock);
2184
2185 /* Unmap the DMA buffers, if any. */
2186 if (SCpnt->use_sg) {
2187 pci_unmap_sg(ioc->pcidev, (struct scatterlist *) SCpnt->request_buffer,
2188 SCpnt->use_sg, SCpnt->sc_data_direction);
2189 } else if (SCpnt->request_bufflen) {
2190 pci_unmap_single(ioc->pcidev, SCpnt->SCp.dma_handle,
2191 SCpnt->request_bufflen, SCpnt->sc_data_direction);
2192 }
2193 hd->ScsiLookup[scpnt_idx] = NULL;
2194 SCpnt->result = DID_RESET << 16;
2195 SCpnt->scsi_done(SCpnt); /* Issue the command callback */
2196 mptscsih_freeChainBuffers(ioc, scpnt_idx);
2197 mpt_free_msg_frame(ioc, mf);
2198 return FAILED;
2199 }
2200 spin_lock_irq(host_lock);
2201 return SUCCESS;
2202}
2203
2204/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2205/**
2206 * mptscsih_dev_reset - Perform a SCSI TARGET_RESET! new_eh variant
2207 * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
2208 *
2209 * (linux scsi_host_template.eh_dev_reset_handler routine)
2210 *
2211 * Returns SUCCESS or FAILED.
2212 */
2213static int
2214mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
2215{
2216 MPT_SCSI_HOST *hd;
2217 spinlock_t *host_lock = SCpnt->device->host->host_lock;
2218
2219 /* If we can't locate our host adapter structure, return FAILED status.
2220 */
2221 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
2222 dtmprintk((KERN_WARNING MYNAM ": mptscsih_dev_reset: "
2223 "Can't locate host! (sc=%p)\n",
2224 SCpnt));
2225 return FAILED;
2226 }
2227
2228 if (hd->resetPending)
2229 return FAILED;
2230
2231 printk(KERN_WARNING MYNAM ": %s: >> Attempting target reset! (sc=%p)\n",
2232 hd->ioc->name, SCpnt);
2233
2234 spin_unlock_irq(host_lock);
2235 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
2236 SCpnt->device->channel, SCpnt->device->id,
2237 0, 0, 5 /* 5 second timeout */)
2238 < 0){
2239 /* The TM request failed and the subsequent FW-reload failed!
2240 * Fatal error case.
2241 */
2242 printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n",
2243 hd->ioc->name, SCpnt);
2244 hd->tmPending = 0;
2245 hd->tmState = TM_STATE_NONE;
2246 spin_lock_irq(host_lock);
2247 return FAILED;
2248 }
2249 spin_lock_irq(host_lock);
2250 return SUCCESS;
2251
2252}
2253
2254/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2255/**
2256 * mptscsih_bus_reset - Perform a SCSI BUS_RESET! new_eh variant
2257 * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
2258 *
2259 * (linux scsi_host_template.eh_bus_reset_handler routine)
2260 *
2261 * Returns SUCCESS or FAILED.
2262 */
2263static int
2264mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
2265{
2266 MPT_SCSI_HOST *hd;
2267 spinlock_t *host_lock = SCpnt->device->host->host_lock;
2268
2269 /* If we can't locate our host adapter structure, return FAILED status.
2270 */
2271 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
2272 dtmprintk((KERN_WARNING MYNAM ": mptscsih_bus_reset: "
2273 "Can't locate host! (sc=%p)\n",
2274 SCpnt ) );
2275 return FAILED;
2276 }
2277
2278 printk(KERN_WARNING MYNAM ": %s: >> Attempting bus reset! (sc=%p)\n",
2279 hd->ioc->name, SCpnt);
2280
2281 if (hd->timeouts < -1)
2282 hd->timeouts++;
2283
2284 /* We are now ready to execute the task management request. */
2285 spin_unlock_irq(host_lock);
2286 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2287 SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */)
2288 < 0){
2289
2290 /* The TM request failed and the subsequent FW-reload failed!
2291 * Fatal error case.
2292 */
2293 printk(MYIOC_s_WARN_FMT
2294 "Error processing TaskMgmt request (sc=%p)\n",
2295 hd->ioc->name, SCpnt);
2296 hd->tmPending = 0;
2297 hd->tmState = TM_STATE_NONE;
2298 spin_lock_irq(host_lock);
2299 return FAILED;
2300 }
2301 spin_lock_irq(host_lock);
2302 return SUCCESS;
2303}
2304
2305/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2306/**
2307 * mptscsih_host_reset - Perform a SCSI host adapter RESET!
2308 * new_eh variant
2309 * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
2310 *
2311 * (linux scsi_host_template.eh_host_reset_handler routine)
2312 *
2313 * Returns SUCCESS or FAILED.
2314 */
2315static int
2316mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2317{
2318 MPT_SCSI_HOST * hd;
2319 int status = SUCCESS;
2320 spinlock_t *host_lock = SCpnt->device->host->host_lock;
2321
2322 /* If we can't locate the host to reset, then we failed. */
2323 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
2324 dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: "
2325 "Can't locate host! (sc=%p)\n",
2326 SCpnt ) );
2327 return FAILED;
2328 }
2329
2330 printk(KERN_WARNING MYNAM ": %s: >> Attempting host reset! (sc=%p)\n",
2331 hd->ioc->name, SCpnt);
2332
2333 /* If our attempts to reset the host failed, then return a failed
2334 * status. The host will be taken off line by the SCSI mid-layer.
2335 */
2336 spin_unlock_irq(host_lock);
2337 if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0){
2338 status = FAILED;
2339 } else {
2340 /* Make sure TM pending is cleared and TM state is set to
2341 * NONE.
2342 */
2343 hd->tmPending = 0;
2344 hd->tmState = TM_STATE_NONE;
2345 }
2346 spin_lock_irq(host_lock);
2347
2348
2349 dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: "
2350 "Status = %s\n",
2351 (status == SUCCESS) ? "SUCCESS" : "FAILED" ) );
2352
2353 return status;
2354}
2355
2356/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2357/**
2358 * mptscsih_tm_pending_wait - wait for pending task management request to
2359 * complete.
2360 * @hd: Pointer to MPT host structure.
2361 *
2362 * Returns {SUCCESS,FAILED}.
2363 */
2364static int
2365mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
2366{
2367 unsigned long flags;
2368 int loop_count = 4 * 10; /* Wait 10 seconds */
2369 int status = FAILED;
2370
2371 do {
2372 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
2373 if (hd->tmState == TM_STATE_NONE) {
2374 hd->tmState = TM_STATE_IN_PROGRESS;
2375 hd->tmPending = 1;
2376 status = SUCCESS;
2377 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
2378 break;
2379 }
2380 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
2381 msleep(250);
2382 } while (--loop_count);
2383
2384 return status;
2385}
2386
2387/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2388/**
2389 * mptscsih_tm_wait_for_completion - wait for completion of TM task
2390 * @hd: Pointer to MPT host structure.
2391 *
2392 * Returns {SUCCESS,FAILED}.
2393 */
2394static int
2395mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
2396{
2397 unsigned long flags;
2398 int loop_count = 4 * timeout;
2399 int status = FAILED;
2400
2401 do {
2402 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
2403 if(hd->tmPending == 0) {
2404 status = SUCCESS;
2405 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
2406 break;
2407 }
2408 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
2409 msleep_interruptible(250);
2410 } while (--loop_count);
2411
2412 return status;
2413}
2414
2415/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2416/**
2417 * mptscsih_taskmgmt_complete - Registered with Fusion MPT base driver
2418 * @ioc: Pointer to MPT_ADAPTER structure
2419 * @mf: Pointer to SCSI task mgmt request frame
2420 * @mr: Pointer to SCSI task mgmt reply frame
2421 *
2422 * This routine is called from mptbase.c::mpt_interrupt() at the completion
2423 * of any SCSI task management request.
2424 * This routine is registered with the MPT (base) driver at driver
2425 * load/init time via the mpt_register() API call.
2426 *
2427 * Returns 1 indicating alloc'd request frame ptr should be freed.
2428 */
2429static int
2430mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
2431{
2432 SCSITaskMgmtReply_t *pScsiTmReply;
2433 SCSITaskMgmt_t *pScsiTmReq;
2434 MPT_SCSI_HOST *hd;
2435 unsigned long flags;
2436 u16 iocstatus;
2437 u8 tmType;
2438
2439 dtmprintk((MYIOC_s_WARN_FMT "TaskMgmt completed (mf=%p,mr=%p)\n",
2440 ioc->name, mf, mr));
2441 if (ioc->sh) {
2442 /* Depending on the thread, a timer is activated for
2443 * the TM request. Delete this timer on completion of TM.
2444 * Decrement count of outstanding TM requests.
2445 */
2446 hd = (MPT_SCSI_HOST *)ioc->sh->hostdata;
2447 } else {
2448 dtmprintk((MYIOC_s_WARN_FMT "TaskMgmt Complete: NULL Scsi Host Ptr\n",
2449 ioc->name));
2450 return 1;
2451 }
2452
2453 if (mr == NULL) {
2454 dtmprintk((MYIOC_s_WARN_FMT "ERROR! TaskMgmt Reply: NULL Request %p\n",
2455 ioc->name, mf));
2456 return 1;
2457 } else {
2458 pScsiTmReply = (SCSITaskMgmtReply_t*)mr;
2459 pScsiTmReq = (SCSITaskMgmt_t*)mf;
2460
2461 /* Figure out if this was ABORT_TASK, TARGET_RESET, or BUS_RESET! */
2462 tmType = pScsiTmReq->TaskType;
2463
2464 dtmprintk((MYIOC_s_WARN_FMT " TaskType = %d, TerminationCount=%d\n",
2465 ioc->name, tmType, le32_to_cpu(pScsiTmReply->TerminationCount)));
2466 DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply);
2467
2468 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2469 dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n",
2470 ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo)));
2471 /* Error? (anything non-zero?) */
2472 if (iocstatus) {
2473
2474 /* clear flags and continue.
2475 */
2476 if (tmType == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
2477 hd->abortSCpnt = NULL;
2478
2479 /* If an internal command is present
2480 * or the TM failed - reload the FW.
2481 * FC FW may respond FAILED to an ABORT
2482 */
2483 if (tmType == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
2484 if ((hd->cmdPtr) ||
2485 (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED)) {
2486 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) {
2487 printk((KERN_WARNING
2488 " Firmware Reload FAILED!!\n"));
2489 }
2490 }
2491 }
2492 } else {
2493 dtmprintk((MYIOC_s_WARN_FMT " TaskMgmt SUCCESS\n", ioc->name));
2494
2495 hd->abortSCpnt = NULL;
2496
2497 }
2498 }
2499
2500 spin_lock_irqsave(&ioc->FreeQlock, flags);
2501 hd->tmPending = 0;
2502 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2503 hd->tmState = TM_STATE_NONE;
2504
2505 return 1;
2506}
2507
2508/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2509/*
2510 * This is anyones guess quite frankly.
2511 */
2512static int
2513mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
2514 sector_t capacity, int geom[])
2515{
2516 int heads;
2517 int sectors;
2518 sector_t cylinders;
2519 ulong dummy;
2520
2521 heads = 64;
2522 sectors = 32;
2523
2524 dummy = heads * sectors;
2525 cylinders = capacity;
2526 sector_div(cylinders,dummy);
2527
2528 /*
2529 * Handle extended translation size for logical drives
2530 * > 1Gb
2531 */
2532 if ((ulong)capacity >= 0x200000) {
2533 heads = 255;
2534 sectors = 63;
2535 dummy = heads * sectors;
2536 cylinders = capacity;
2537 sector_div(cylinders,dummy);
2538 }
2539
2540 /* return result */
2541 geom[0] = heads;
2542 geom[1] = sectors;
2543 geom[2] = cylinders;
2544
2545 dprintk((KERN_NOTICE
2546 ": bios_param: Id=%i Lun=%i Channel=%i CHS=%i/%i/%i\n",
2547 sdev->id, sdev->lun,sdev->channel,(int)cylinders,heads,sectors));
2548
2549 return 0;
2550}
2551
2552/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2553/*
2554 * OS entry point to allow host driver to alloc memory
2555 * for each scsi device. Called once per device the bus scan.
2556 * Return non-zero if allocation fails.
2557 * Init memory once per id (not LUN).
2558 */
2559static int
2560mptscsih_slave_alloc(struct scsi_device *device)
2561{
2562 struct Scsi_Host *host = device->host;
2563 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
2564 VirtDevice *vdev;
2565 uint target = device->id;
2566
2567 if (hd == NULL)
2568 return -ENODEV;
2569
2570 if ((vdev = hd->Targets[target]) != NULL)
2571 goto out;
2572
2573 vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
2574 if (!vdev) {
2575 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
2576 hd->ioc->name, sizeof(VirtDevice));
2577 return -ENOMEM;
2578 }
2579
2580 memset(vdev, 0, sizeof(VirtDevice));
2581 vdev->tflags = MPT_TARGET_FLAGS_Q_YES;
2582 vdev->ioc_id = hd->ioc->id;
2583 vdev->target_id = device->id;
2584 vdev->bus_id = device->channel;
2585 vdev->raidVolume = 0;
2586 hd->Targets[device->id] = vdev;
2587 if (hd->ioc->bus_type == SCSI) {
2588 if (hd->ioc->spi_data.isRaid & (1 << device->id)) {
2589 vdev->raidVolume = 1;
2590 ddvtprintk((KERN_INFO
2591 "RAID Volume @ id %d\n", device->id));
2592 }
2593 } else {
2594 vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
2595 }
2596
2597 out:
2598 vdev->num_luns++;
2599 return 0;
2600}
2601
2602static int mptscsih_is_raid_volume(MPT_SCSI_HOST *hd, uint id)
2603{
2604 int i;
2605
2606 if (!hd->ioc->spi_data.isRaid || !hd->ioc->spi_data.pIocPg3)
2607 return 0;
2608
2609 for (i = 0; i < hd->ioc->spi_data.pIocPg3->NumPhysDisks; i++) {
2610 if (id == hd->ioc->spi_data.pIocPg3->PhysDisk[i].PhysDiskID)
2611 return 1;
2612 }
2613
2614 return 0;
2615}
2616
2617/*
2618 * OS entry point to allow for host driver to free allocated memory
2619 * Called if no device present or device being unloaded
2620 */
2621static void
2622mptscsih_slave_destroy(struct scsi_device *device)
2623{
2624 struct Scsi_Host *host = device->host;
2625 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
2626 VirtDevice *vdev;
2627 uint target = device->id;
2628 uint lun = device->lun;
2629
2630 if (hd == NULL)
2631 return;
2632
2633 mptscsih_search_running_cmds(hd, target, lun);
2634
2635 vdev = hd->Targets[target];
2636 vdev->luns[0] &= ~(1 << lun);
2637 if (--vdev->num_luns)
2638 return;
2639
2640 kfree(hd->Targets[target]);
2641 hd->Targets[target] = NULL;
2642
2643 if (hd->ioc->bus_type == SCSI) {
2644 if (mptscsih_is_raid_volume(hd, target)) {
2645 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
2646 } else {
2647 hd->ioc->spi_data.dvStatus[target] =
2648 MPT_SCSICFG_NEGOTIATE;
2649
2650 if (!hd->negoNvram) {
2651 hd->ioc->spi_data.dvStatus[target] |=
2652 MPT_SCSICFG_DV_NOT_DONE;
2653 }
2654 }
2655 }
2656}
2657
2658static void
2659mptscsih_set_queue_depth(struct scsi_device *device, MPT_SCSI_HOST *hd,
2660 VirtDevice *pTarget, int qdepth)
2661{
2662 int max_depth;
2663 int tagged;
2664
2665 if (hd->ioc->bus_type == SCSI) {
2666 if (pTarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) {
2667 if (!(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES))
2668 max_depth = 1;
2669 else if (((pTarget->inq_data[0] & 0x1f) == 0x00) &&
2670 (pTarget->minSyncFactor <= MPT_ULTRA160 ))
2671 max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
2672 else
2673 max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
2674 } else {
2675 /* error case - No Inq. Data */
2676 max_depth = 1;
2677 }
2678 } else
2679 max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
2680
2681 if (qdepth > max_depth)
2682 qdepth = max_depth;
2683 if (qdepth == 1)
2684 tagged = 0;
2685 else
2686 tagged = MSG_SIMPLE_TAG;
2687
2688 scsi_adjust_queue_depth(device, tagged, qdepth);
2689}
2690
2691
2692/*
2693 * OS entry point to adjust the queue_depths on a per-device basis.
2694 * Called once per device the bus scan. Use it to force the queue_depth
2695 * member to 1 if a device does not support Q tags.
2696 * Return non-zero if fails.
2697 */
2698static int
2699mptscsih_slave_configure(struct scsi_device *device)
2700{
2701 struct Scsi_Host *sh = device->host;
2702 VirtDevice *pTarget;
2703 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sh->hostdata;
2704
2705 if ((hd == NULL) || (hd->Targets == NULL)) {
2706 return 0;
2707 }
2708
2709 dsprintk((MYIOC_s_INFO_FMT
2710 "device @ %p, id=%d, LUN=%d, channel=%d\n",
2711 hd->ioc->name, device, device->id, device->lun, device->channel));
2712 dsprintk((MYIOC_s_INFO_FMT
2713 "sdtr %d wdtr %d ppr %d inq length=%d\n",
2714 hd->ioc->name, device->sdtr, device->wdtr,
2715 device->ppr, device->inquiry_len));
2716
2717 if (device->id > sh->max_id) {
2718 /* error case, should never happen */
2719 scsi_adjust_queue_depth(device, 0, 1);
2720 goto slave_configure_exit;
2721 }
2722
2723 pTarget = hd->Targets[device->id];
2724
2725 if (pTarget == NULL) {
2726 /* Driver doesn't know about this device.
2727 * Kernel may generate a "Dummy Lun 0" which
2728 * may become a real Lun if a
2729 * "scsi add-single-device" command is executed
2730 * while the driver is active (hot-plug a
2731 * device). LSI Raid controllers need
2732 * queue_depth set to DEV_HIGH for this reason.
2733 */
2734 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
2735 MPT_SCSI_CMD_PER_DEV_HIGH);
2736 goto slave_configure_exit;
2737 }
2738
2739 mptscsih_initTarget(hd, device->channel, device->id, device->lun,
2740 device->inquiry, device->inquiry_len );
2741 mptscsih_set_queue_depth(device, hd, pTarget, MPT_SCSI_CMD_PER_DEV_HIGH);
2742
2743 dsprintk((MYIOC_s_INFO_FMT
2744 "Queue depth=%d, tflags=%x\n",
2745 hd->ioc->name, device->queue_depth, pTarget->tflags));
2746
2747 dsprintk((MYIOC_s_INFO_FMT
2748 "negoFlags=%x, maxOffset=%x, SyncFactor=%x\n",
2749 hd->ioc->name, pTarget->negoFlags, pTarget->maxOffset, pTarget->minSyncFactor));
2750
2751slave_configure_exit:
2752
2753 dsprintk((MYIOC_s_INFO_FMT
2754 "tagged %d, simple %d, ordered %d\n",
2755 hd->ioc->name,device->tagged_supported, device->simple_tags,
2756 device->ordered_tags));
2757
2758 return 0;
2759}
2760
2761static ssize_t
2762mptscsih_store_queue_depth(struct device *dev, const char *buf, size_t count)
2763{
2764 int depth;
2765 struct scsi_device *sdev = to_scsi_device(dev);
2766 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) sdev->host->hostdata;
2767 VirtDevice *pTarget;
2768
2769 depth = simple_strtoul(buf, NULL, 0);
2770 if (depth == 0)
2771 return -EINVAL;
2772 pTarget = hd->Targets[sdev->id];
2773 if (pTarget == NULL)
2774 return -EINVAL;
2775 mptscsih_set_queue_depth(sdev, (MPT_SCSI_HOST *) sdev->host->hostdata,
2776 pTarget, depth);
2777 return count;
2778}
2779
2780/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2781/*
2782 * Private routines...
2783 */
2784
2785/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2786/* Utility function to copy sense data from the scsi_cmnd buffer
2787 * to the FC and SCSI target structures.
2788 *
2789 */
2790static void
2791copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply)
2792{
2793 VirtDevice *target;
2794 SCSIIORequest_t *pReq;
2795 u32 sense_count = le32_to_cpu(pScsiReply->SenseCount);
2796 int index;
2797
2798 /* Get target structure
2799 */
2800 pReq = (SCSIIORequest_t *) mf;
2801 index = (int) pReq->TargetID;
2802 target = hd->Targets[index];
2803
2804 if (sense_count) {
2805 u8 *sense_data;
2806 int req_index;
2807
2808 /* Copy the sense received into the scsi command block. */
2809 req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
2810 sense_data = ((u8 *)hd->ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
2811 memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
2812
2813 /* Log SMART data (asc = 0x5D, non-IM case only) if required.
2814 */
2815 if ((hd->ioc->events) && (hd->ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) {
2816 if ((sense_data[12] == 0x5D) && (target->raidVolume == 0)) {
2817 int idx;
2818 MPT_ADAPTER *ioc = hd->ioc;
2819
2820 idx = ioc->eventContext % ioc->eventLogSize;
2821 ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
2822 ioc->events[idx].eventContext = ioc->eventContext;
2823
2824 ioc->events[idx].data[0] = (pReq->LUN[1] << 24) ||
2825 (MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) ||
2826 (pReq->Bus << 8) || pReq->TargetID;
2827
2828 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
2829
2830 ioc->eventContext++;
2831 }
2832 }
2833 } else {
2834 dprintk((MYIOC_s_INFO_FMT "Hmmm... SenseData len=0! (?)\n",
2835 hd->ioc->name));
2836 }
2837}
2838
2839static u32
2840SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc)
2841{
2842 MPT_SCSI_HOST *hd;
2843 int i;
2844
2845 hd = (MPT_SCSI_HOST *) sc->device->host->hostdata;
2846
2847 for (i = 0; i < hd->ioc->req_depth; i++) {
2848 if (hd->ScsiLookup[i] == sc) {
2849 return i;
2850 }
2851 }
2852
2853 return -1;
2854}
2855
2856/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2857static int
2858mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2859{
2860 MPT_SCSI_HOST *hd;
2861 unsigned long flags;
2862
2863 dtmprintk((KERN_WARNING MYNAM
2864 ": IOC %s_reset routed to SCSI host driver!\n",
2865 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
2866 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
2867
2868 /* If a FW reload request arrives after base installed but
2869 * before all scsi hosts have been attached, then an alt_ioc
2870 * may have a NULL sh pointer.
2871 */
2872 if ((ioc->sh == NULL) || (ioc->sh->hostdata == NULL))
2873 return 0;
2874 else
2875 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2876
2877 if (reset_phase == MPT_IOC_SETUP_RESET) {
2878 dtmprintk((MYIOC_s_WARN_FMT "Setup-Diag Reset\n", ioc->name));
2879
2880 /* Clean Up:
2881 * 1. Set Hard Reset Pending Flag
2882 * All new commands go to doneQ
2883 */
2884 hd->resetPending = 1;
2885
2886 } else if (reset_phase == MPT_IOC_PRE_RESET) {
2887 dtmprintk((MYIOC_s_WARN_FMT "Pre-Diag Reset\n", ioc->name));
2888
2889 /* 2. Flush running commands
2890 * Clean ScsiLookup (and associated memory)
2891 * AND clean mytaskQ
2892 */
2893
2894 /* 2b. Reply to OS all known outstanding I/O commands.
2895 */
2896 mptscsih_flush_running_cmds(hd);
2897
2898 /* 2c. If there was an internal command that
2899 * has not completed, configuration or io request,
2900 * free these resources.
2901 */
2902 if (hd->cmdPtr) {
2903 del_timer(&hd->timer);
2904 mpt_free_msg_frame(ioc, hd->cmdPtr);
2905 }
2906
2907 dtmprintk((MYIOC_s_WARN_FMT "Pre-Reset complete.\n", ioc->name));
2908
2909 } else {
2910 dtmprintk((MYIOC_s_WARN_FMT "Post-Diag Reset\n", ioc->name));
2911
2912 /* Once a FW reload begins, all new OS commands are
2913 * redirected to the doneQ w/ a reset status.
2914 * Init all control structures.
2915 */
2916
2917 /* ScsiLookup initialization
2918 */
2919 {
2920 int ii;
2921 for (ii=0; ii < hd->ioc->req_depth; ii++)
2922 hd->ScsiLookup[ii] = NULL;
2923 }
2924
2925 /* 2. Chain Buffer initialization
2926 */
2927
2928 /* 4. Renegotiate to all devices, if SCSI
2929 */
2930 if (ioc->bus_type == SCSI) {
2931 dnegoprintk(("writeSDP1: ALL_IDS USE_NVRAM\n"));
2932 mptscsih_writeSDP1(hd, 0, 0, MPT_SCSICFG_ALL_IDS | MPT_SCSICFG_USE_NVRAM);
2933 }
2934
2935 /* 5. Enable new commands to be posted
2936 */
2937 spin_lock_irqsave(&ioc->FreeQlock, flags);
2938 hd->tmPending = 0;
2939 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2940 hd->resetPending = 0;
2941 hd->tmState = TM_STATE_NONE;
2942
2943 /* 6. If there was an internal command,
2944 * wake this process up.
2945 */
2946 if (hd->cmdPtr) {
2947 /*
2948 * Wake up the original calling thread
2949 */
2950 hd->pLocal = &hd->localReply;
2951 hd->pLocal->completion = MPT_SCANDV_DID_RESET;
2952 scandv_wait_done = 1;
2953 wake_up(&scandv_waitq);
2954 hd->cmdPtr = NULL;
2955 }
2956
2957 /* 7. Set flag to force DV and re-read IOC Page 3
2958 */
2959 if (ioc->bus_type == SCSI) {
2960 ioc->spi_data.forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
2961 ddvtprintk(("Set reload IOC Pg3 Flag\n"));
2962 }
2963
2964 dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name));
2965
2966 }
2967
2968 return 1; /* currently means nothing really */
2969}
2970
2971/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2972static int
2973mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2974{
2975 MPT_SCSI_HOST *hd;
2976 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
2977
2978 devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
2979 ioc->name, event));
2980
2981 switch (event) {
2982 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
2983 /* FIXME! */
2984 break;
2985 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2986 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2987 hd = NULL;
2988 if (ioc->sh) {
2989 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2990 if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
2991 hd->soft_resets++;
2992 }
2993 break;
2994 case MPI_EVENT_LOGOUT: /* 09 */
2995 /* FIXME! */
2996 break;
2997
2998 /*
2999 * CHECKME! Don't think we need to do
3000 * anything for these, but...
3001 */
3002 case MPI_EVENT_RESCAN: /* 06 */
3003 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
3004 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
3005 /*
3006 * CHECKME! Falling thru...
3007 */
3008 break;
3009
3010 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
3011#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
3012 /* negoNvram set to 0 if DV enabled and to USE_NVRAM if
3013 * if DV disabled. Need to check for target mode.
3014 */
3015 hd = NULL;
3016 if (ioc->sh)
3017 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
3018
3019 if (hd && (ioc->bus_type == SCSI) && (hd->negoNvram == 0)) {
3020 ScsiCfgData *pSpi;
3021 Ioc3PhysDisk_t *pPDisk;
3022 int numPDisk;
3023 u8 reason;
3024 u8 physDiskNum;
3025
3026 reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
3027 if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
3028 /* New or replaced disk.
3029 * Set DV flag and schedule DV.
3030 */
3031 pSpi = &ioc->spi_data;
3032 physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
3033 ddvtprintk(("DV requested for phys disk id %d\n", physDiskNum));
3034 if (pSpi->pIocPg3) {
3035 pPDisk = pSpi->pIocPg3->PhysDisk;
3036 numPDisk =pSpi->pIocPg3->NumPhysDisks;
3037
3038 while (numPDisk) {
3039 if (physDiskNum == pPDisk->PhysDiskNum) {
3040 pSpi->dvStatus[pPDisk->PhysDiskID] = (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
3041 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
3042 ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
3043 break;
3044 }
3045 pPDisk++;
3046 numPDisk--;
3047 }
3048
3049 if (numPDisk == 0) {
3050 /* The physical disk that needs DV was not found
3051 * in the stored IOC Page 3. The driver must reload
3052 * this page. DV routine will set the NEED_DV flag for
3053 * all phys disks that have DV_NOT_DONE set.
3054 */
3055 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
3056 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n", physDiskNum));
3057 }
3058 }
3059 }
3060 }
3061#endif
3062
3063#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
3064 printk("Raid Event RF: ");
3065 {
3066 u32 *m = (u32 *)pEvReply;
3067 int ii;
3068 int n = (int)pEvReply->MsgLength;
3069 for (ii=6; ii < n; ii++)
3070 printk(" %08x", le32_to_cpu(m[ii]));
3071 printk("\n");
3072 }
3073#endif
3074 break;
3075
3076 case MPI_EVENT_NONE: /* 00 */
3077 case MPI_EVENT_LOG_DATA: /* 01 */
3078 case MPI_EVENT_STATE_CHANGE: /* 02 */
3079 case MPI_EVENT_EVENT_CHANGE: /* 0A */
3080 default:
3081 dprintk((KERN_INFO " Ignoring event (=%02Xh)\n", event));
3082 break;
3083 }
3084
3085 return 1; /* currently means nothing really */
3086}
3087
3088static struct device_attribute mptscsih_queue_depth_attr = {
3089 .attr = {
3090 .name = "queue_depth",
3091 .mode = S_IWUSR,
3092 },
3093 .store = mptscsih_store_queue_depth,
3094};
3095
3096static struct device_attribute *mptscsih_dev_attrs[] = {
3097 &mptscsih_queue_depth_attr,
3098 NULL,
3099};
3100
3101static struct scsi_host_template driver_template = {
3102 .proc_name = "mptscsih",
3103 .proc_info = mptscsih_proc_info,
3104 .name = "MPT SCSI Host",
3105 .info = mptscsih_info,
3106 .queuecommand = mptscsih_qcmd,
3107 .slave_alloc = mptscsih_slave_alloc,
3108 .slave_configure = mptscsih_slave_configure,
3109 .slave_destroy = mptscsih_slave_destroy,
3110 .eh_abort_handler = mptscsih_abort,
3111 .eh_device_reset_handler = mptscsih_dev_reset,
3112 .eh_bus_reset_handler = mptscsih_bus_reset,
3113 .eh_host_reset_handler = mptscsih_host_reset,
3114 .bios_param = mptscsih_bios_param,
3115 .can_queue = MPT_SCSI_CAN_QUEUE,
3116 .this_id = -1,
3117 .sg_tablesize = MPT_SCSI_SG_DEPTH,
3118 .max_sectors = 8192,
3119 .cmd_per_lun = 7,
3120 .use_clustering = ENABLE_CLUSTERING,
3121 .sdev_attrs = mptscsih_dev_attrs,
3122};
3123
3124/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3125/*
3126 * mptscsih_initTarget - Target, LUN alloc/free functionality.
3127 * @hd: Pointer to MPT_SCSI_HOST structure
3128 * @bus_id: Bus number (?)
3129 * @target_id: SCSI target id
3130 * @lun: SCSI LUN id
3131 * @data: Pointer to data
3132 * @dlen: Number of INQUIRY bytes
3133 *
3134 * NOTE: It's only SAFE to call this routine if data points to
3135 * sane & valid STANDARD INQUIRY data!
3136 *
3137 * Allocate and initialize memory for this target.
3138 * Save inquiry data.
3139 *
3140 */
3141static void
3142mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen)
3143{
3144 int indexed_lun, lun_index;
3145 VirtDevice *vdev;
3146 ScsiCfgData *pSpi;
3147 char data_56;
3148
3149 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
3150 hd->ioc->name, bus_id, target_id, lun, hd));
3151
3152 /*
3153 * If the peripheral qualifier filter is enabled then if the target reports a 0x1
3154 * (i.e. The targer is capable of supporting the specified peripheral device type
3155 * on this logical unit; however, the physical device is not currently connected
3156 * to this logical unit) it will be converted to a 0x3 (i.e. The target is not
3157 * capable of supporting a physical device on this logical unit). This is to work
3158 * around a bug in th emid-layer in some distributions in which the mid-layer will
3159 * continue to try to communicate to the LUN and evntually create a dummy LUN.
3160 */
3161 if (mpt_pq_filter && dlen && (data[0] & 0xE0))
3162 data[0] |= 0x40;
3163
3164 /* Is LUN supported? If so, upper 2 bits will be 0
3165 * in first byte of inquiry data.
3166 */
3167 if (data[0] & 0xe0)
3168 return;
3169
3170 if ((vdev = hd->Targets[target_id]) == NULL) {
3171 return;
3172 }
3173
3174 lun_index = (lun >> 5); /* 32 luns per lun_index */
3175 indexed_lun = (lun % 32);
3176 vdev->luns[lun_index] |= (1 << indexed_lun);
3177
3178 if (hd->ioc->bus_type == SCSI) {
3179 if ((data[0] == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
3180 /* Treat all Processors as SAF-TE if
3181 * command line option is set */
3182 vdev->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
3183 mptscsih_writeIOCPage4(hd, target_id, bus_id);
3184 }else if ((data[0] == TYPE_PROCESSOR) &&
3185 !(vdev->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
3186 if ( dlen > 49 ) {
3187 vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
3188 if ( data[44] == 'S' &&
3189 data[45] == 'A' &&
3190 data[46] == 'F' &&
3191 data[47] == '-' &&
3192 data[48] == 'T' &&
3193 data[49] == 'E' ) {
3194 vdev->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
3195 mptscsih_writeIOCPage4(hd, target_id, bus_id);
3196 }
3197 }
3198 }
3199 if (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) {
3200 if ( dlen > 8 ) {
3201 memcpy (vdev->inq_data, data, 8);
3202 } else {
3203 memcpy (vdev->inq_data, data, dlen);
3204 }
3205
3206 /* If have not done DV, set the DV flag.
3207 */
3208 pSpi = &hd->ioc->spi_data;
3209 if ((data[0] == TYPE_TAPE) || (data[0] == TYPE_PROCESSOR)) {
3210 if (pSpi->dvStatus[target_id] & MPT_SCSICFG_DV_NOT_DONE)
3211 pSpi->dvStatus[target_id] |= MPT_SCSICFG_NEED_DV;
3212 }
3213
3214 vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
3215
3216
3217 data_56 = 0x0F; /* Default to full capabilities if Inq data length is < 57 */
3218 if (dlen > 56) {
3219 if ( (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_56))) {
3220 /* Update the target capabilities
3221 */
3222 data_56 = data[56];
3223 vdev->tflags |= MPT_TARGET_FLAGS_VALID_56;
3224 }
3225 }
3226 mptscsih_setTargetNegoParms(hd, vdev, data_56);
3227 } else {
3228 /* Initial Inquiry may not request enough data bytes to
3229 * obtain byte 57. DV will; if target doesn't return
3230 * at least 57 bytes, data[56] will be zero. */
3231 if (dlen > 56) {
3232 if ( (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_56))) {
3233 /* Update the target capabilities
3234 */
3235 data_56 = data[56];
3236 vdev->tflags |= MPT_TARGET_FLAGS_VALID_56;
3237 mptscsih_setTargetNegoParms(hd, vdev, data_56);
3238 }
3239 }
3240 }
3241 }
3242}
3243
3244/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3245/*
3246 * Update the target negotiation parameters based on the
3247 * the Inquiry data, adapter capabilities, and NVRAM settings.
3248 *
3249 */
3250static void
3251mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
3252{
3253 ScsiCfgData *pspi_data = &hd->ioc->spi_data;
3254 int id = (int) target->target_id;
3255 int nvram;
3256 VirtDevice *vdev;
3257 int ii;
3258 u8 width = MPT_NARROW;
3259 u8 factor = MPT_ASYNC;
3260 u8 offset = 0;
3261 u8 version, nfactor;
3262 u8 noQas = 1;
3263
3264 target->negoFlags = pspi_data->noQas;
3265
3266 /* noQas == 0 => device supports QAS. Need byte 56 of Inq to determine
3267 * support. If available, default QAS to off and allow enabling.
3268 * If not available, default QAS to on, turn off for non-disks.
3269 */
3270
3271 /* Set flags based on Inquiry data
3272 */
3273 version = target->inq_data[2] & 0x07;
3274 if (version < 2) {
3275 width = 0;
3276 factor = MPT_ULTRA2;
3277 offset = pspi_data->maxSyncOffset;
3278 target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
3279 } else {
3280 if (target->inq_data[7] & 0x20) {
3281 width = 1;
3282 }
3283
3284 if (target->inq_data[7] & 0x10) {
3285 factor = pspi_data->minSyncFactor;
3286 if (target->tflags & MPT_TARGET_FLAGS_VALID_56) {
3287 /* bits 2 & 3 show Clocking support */
3288 if ((byte56 & 0x0C) == 0)
3289 factor = MPT_ULTRA2;
3290 else {
3291 if ((byte56 & 0x03) == 0)
3292 factor = MPT_ULTRA160;
3293 else {
3294 factor = MPT_ULTRA320;
3295 if (byte56 & 0x02)
3296 {
3297 ddvtprintk((KERN_INFO "Enabling QAS due to byte56=%02x on id=%d!\n", byte56, id));
3298 noQas = 0;
3299 }
3300 if (target->inq_data[0] == TYPE_TAPE) {
3301 if (byte56 & 0x01)
3302 target->negoFlags |= MPT_TAPE_NEGO_IDP;
3303 }
3304 }
3305 }
3306 } else {
3307 ddvtprintk((KERN_INFO "Enabling QAS on id=%d due to ~TARGET_FLAGS_VALID_56!\n", id));
3308 noQas = 0;
3309 }
3310
3311 offset = pspi_data->maxSyncOffset;
3312
3313 /* If RAID, never disable QAS
3314 * else if non RAID, do not disable
3315 * QAS if bit 1 is set
3316 * bit 1 QAS support, non-raid only
3317 * bit 0 IU support
3318 */
3319 if (target->raidVolume == 1) {
3320 noQas = 0;
3321 }
3322 } else {
3323 factor = MPT_ASYNC;
3324 offset = 0;
3325 }
3326 }
3327
3328 if ( (target->inq_data[7] & 0x02) == 0) {
3329 target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
3330 }
3331
3332 /* Update tflags based on NVRAM settings. (SCSI only)
3333 */
3334 if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
3335 nvram = pspi_data->nvram[id];
3336 nfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8;
3337
3338 if (width)
3339 width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
3340
3341 if (offset > 0) {
3342 /* Ensure factor is set to the
3343 * maximum of: adapter, nvram, inquiry
3344 */
3345 if (nfactor) {
3346 if (nfactor < pspi_data->minSyncFactor )
3347 nfactor = pspi_data->minSyncFactor;
3348
3349 factor = max(factor, nfactor);
3350 if (factor == MPT_ASYNC)
3351 offset = 0;
3352 } else {
3353 offset = 0;
3354 factor = MPT_ASYNC;
3355 }
3356 } else {
3357 factor = MPT_ASYNC;
3358 }
3359 }
3360
3361 /* Make sure data is consistent
3362 */
3363 if ((!width) && (factor < MPT_ULTRA2)) {
3364 factor = MPT_ULTRA2;
3365 }
3366
3367 /* Save the data to the target structure.
3368 */
3369 target->minSyncFactor = factor;
3370 target->maxOffset = offset;
3371 target->maxWidth = width;
3372
3373 target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
3374
3375 /* Disable unused features.
3376 */
3377 if (!width)
3378 target->negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
3379
3380 if (!offset)
3381 target->negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
3382
3383 if ( factor > MPT_ULTRA320 )
3384 noQas = 0;
3385
3386 /* GEM, processor WORKAROUND
3387 */
3388 if ((target->inq_data[0] == TYPE_PROCESSOR) || (target->inq_data[0] > 0x08)) {
3389 target->negoFlags |= (MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC);
3390 pspi_data->dvStatus[id] |= MPT_SCSICFG_BLK_NEGO;
3391 } else {
3392 if (noQas && (pspi_data->noQas == 0)) {
3393 pspi_data->noQas |= MPT_TARGET_NO_NEGO_QAS;
3394 target->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
3395
3396 /* Disable QAS in a mixed configuration case
3397 */
3398
3399 ddvtprintk((KERN_INFO "Disabling QAS due to noQas=%02x on id=%d!\n", noQas, id));
3400 for (ii = 0; ii < id; ii++) {
3401 if ( (vdev = hd->Targets[ii]) ) {
3402 vdev->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
3403 mptscsih_writeSDP1(hd, 0, ii, vdev->negoFlags);
3404 }
3405 }
3406 }
3407 }
3408
3409 /* Write SDP1 on this I/O to this target */
3410 if (pspi_data->dvStatus[id] & MPT_SCSICFG_NEGOTIATE) {
3411 ddvtprintk((KERN_INFO "MPT_SCSICFG_NEGOTIATE on id=%d!\n", id));
3412 mptscsih_writeSDP1(hd, 0, id, hd->negoNvram);
3413 pspi_data->dvStatus[id] &= ~MPT_SCSICFG_NEGOTIATE;
3414 } else if (pspi_data->dvStatus[id] & MPT_SCSICFG_BLK_NEGO) {
3415 ddvtprintk((KERN_INFO "MPT_SCSICFG_BLK_NEGO on id=%d!\n", id));
3416 mptscsih_writeSDP1(hd, 0, id, MPT_SCSICFG_BLK_NEGO);
3417 pspi_data->dvStatus[id] &= ~MPT_SCSICFG_BLK_NEGO;
3418 }
3419}
3420
3421/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3422/* If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return.
3423 * Else set the NEED_DV flag after Read Capacity Issued (disks)
3424 * or Mode Sense (cdroms).
3425 *
3426 * Tapes, initTarget will set this flag on completion of Inquiry command.
3427 * Called only if DV_NOT_DONE flag is set
3428 */
3429static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
3430{
3431 u8 cmd;
3432 ScsiCfgData *pSpi;
3433
3434 ddvtprintk((" set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
3435 pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
3436
3437 if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
3438 return;
3439
3440 cmd = pReq->CDB[0];
3441
3442 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
3443 pSpi = &hd->ioc->spi_data;
3444 if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) {
3445 /* Set NEED_DV for all hidden disks
3446 */
3447 Ioc3PhysDisk_t *pPDisk = pSpi->pIocPg3->PhysDisk;
3448 int numPDisk = pSpi->pIocPg3->NumPhysDisks;
3449
3450 while (numPDisk) {
3451 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
3452 ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
3453 pPDisk++;
3454 numPDisk--;
3455 }
3456 }
3457 pSpi->dvStatus[pReq->TargetID] |= MPT_SCSICFG_NEED_DV;
3458 ddvtprintk(("NEED_DV set for visible disk id %d\n", pReq->TargetID));
3459 }
3460}
3461
3462/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3463/*
3464 * If no Target, bus reset on 1st I/O. Set the flag to
3465 * prevent any future negotiations to this device.
3466 */
3467static void mptscsih_no_negotiate(MPT_SCSI_HOST *hd, int target_id)
3468{
3469
3470 if ((hd->Targets) && (hd->Targets[target_id] == NULL))
3471 hd->ioc->spi_data.dvStatus[target_id] |= MPT_SCSICFG_BLK_NEGO;
3472
3473 return;
3474}
3475
3476/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3477/*
3478 * SCSI Config Page functionality ...
3479 */
3480/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3481/* mptscsih_setDevicePage1Flags - add Requested and Configuration fields flags
3482 * based on width, factor and offset parameters.
3483 * @width: bus width
3484 * @factor: sync factor
3485 * @offset: sync offset
3486 * @requestedPtr: pointer to requested values (updated)
3487 * @configurationPtr: pointer to configuration values (updated)
3488 * @flags: flags to block WDTR or SDTR negotiation
3489 *
3490 * Return: None.
3491 *
3492 * Remark: Called by writeSDP1 and _dv_params
3493 */
3494static void
3495mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags)
3496{
3497 u8 nowide = flags & MPT_TARGET_NO_NEGO_WIDE;
3498 u8 nosync = flags & MPT_TARGET_NO_NEGO_SYNC;
3499
3500 *configurationPtr = 0;
3501 *requestedPtr = width ? MPI_SCSIDEVPAGE1_RP_WIDE : 0;
3502 *requestedPtr |= (offset << 16) | (factor << 8);
3503
3504 if (width && offset && !nowide && !nosync) {
3505 if (factor < MPT_ULTRA160) {
3506 *requestedPtr |= (MPI_SCSIDEVPAGE1_RP_IU + MPI_SCSIDEVPAGE1_RP_DT);
3507 if ((flags & MPT_TARGET_NO_NEGO_QAS) == 0)
3508 *requestedPtr |= MPI_SCSIDEVPAGE1_RP_QAS;
3509 if (flags & MPT_TAPE_NEGO_IDP)
3510 *requestedPtr |= 0x08000000;
3511 } else if (factor < MPT_ULTRA2) {
3512 *requestedPtr |= MPI_SCSIDEVPAGE1_RP_DT;
3513 }
3514 }
3515
3516 if (nowide)
3517 *configurationPtr |= MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED;
3518
3519 if (nosync)
3520 *configurationPtr |= MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED;
3521
3522 return;
3523}
3524
3525/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3526/* mptscsih_writeSDP1 - write SCSI Device Page 1
3527 * @hd: Pointer to a SCSI Host Strucutre
3528 * @portnum: IOC port number
3529 * @target_id: writeSDP1 for single ID
3530 * @flags: MPT_SCSICFG_ALL_IDS, MPT_SCSICFG_USE_NVRAM, MPT_SCSICFG_BLK_NEGO
3531 *
3532 * Return: -EFAULT if read of config page header fails
3533 * or 0 if success.
3534 *
3535 * Remark: If a target has been found, the settings from the
3536 * target structure are used, else the device is set
3537 * to async/narrow.
3538 *
3539 * Remark: Called during init and after a FW reload.
3540 * Remark: We do not wait for a return, write pages sequentially.
3541 */
3542static int
3543mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3544{
3545 MPT_ADAPTER *ioc = hd->ioc;
3546 Config_t *pReq;
3547 SCSIDevicePage1_t *pData;
3548 VirtDevice *pTarget;
3549 MPT_FRAME_HDR *mf;
3550 dma_addr_t dataDma;
3551 u16 req_idx;
3552 u32 frameOffset;
3553 u32 requested, configuration, flagsLength;
3554 int ii, nvram;
3555 int id = 0, maxid = 0;
3556 u8 width;
3557 u8 factor;
3558 u8 offset;
3559 u8 bus = 0;
3560 u8 negoFlags;
3561 u8 maxwidth, maxoffset, maxfactor;
3562
3563 if (ioc->spi_data.sdp1length == 0)
3564 return 0;
3565
3566 if (flags & MPT_SCSICFG_ALL_IDS) {
3567 id = 0;
3568 maxid = ioc->sh->max_id - 1;
3569 } else if (ioc->sh) {
3570 id = target_id;
3571 maxid = min_t(int, id, ioc->sh->max_id - 1);
3572 }
3573
3574 for (; id <= maxid; id++) {
3575
3576 if (id == ioc->pfacts[portnum].PortSCSIID)
3577 continue;
3578
3579 /* Use NVRAM to get adapter and target maximums
3580 * Data over-riden by target structure information, if present
3581 */
3582 maxwidth = ioc->spi_data.maxBusWidth;
3583 maxoffset = ioc->spi_data.maxSyncOffset;
3584 maxfactor = ioc->spi_data.minSyncFactor;
3585 if (ioc->spi_data.nvram && (ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
3586 nvram = ioc->spi_data.nvram[id];
3587
3588 if (maxwidth)
3589 maxwidth = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
3590
3591 if (maxoffset > 0) {
3592 maxfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8;
3593 if (maxfactor == 0) {
3594 /* Key for async */
3595 maxfactor = MPT_ASYNC;
3596 maxoffset = 0;
3597 } else if (maxfactor < ioc->spi_data.minSyncFactor) {
3598 maxfactor = ioc->spi_data.minSyncFactor;
3599 }
3600 } else
3601 maxfactor = MPT_ASYNC;
3602 }
3603
3604 /* Set the negotiation flags.
3605 */
3606 negoFlags = ioc->spi_data.noQas;
3607 if (!maxwidth)
3608 negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
3609
3610 if (!maxoffset)
3611 negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
3612
3613 if (flags & MPT_SCSICFG_USE_NVRAM) {
3614 width = maxwidth;
3615 factor = maxfactor;
3616 offset = maxoffset;
3617 } else {
3618 width = 0;
3619 factor = MPT_ASYNC;
3620 offset = 0;
3621 //negoFlags = 0;
3622 //negoFlags = MPT_TARGET_NO_NEGO_SYNC;
3623 }
3624
3625 /* If id is not a raid volume, get the updated
3626 * transmission settings from the target structure.
3627 */
3628 if (hd->Targets && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) {
3629 width = pTarget->maxWidth;
3630 factor = pTarget->minSyncFactor;
3631 offset = pTarget->maxOffset;
3632 negoFlags = pTarget->negoFlags;
3633 }
3634
3635#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
3636 /* Force to async and narrow if DV has not been executed
3637 * for this ID
3638 */
3639 if ((hd->ioc->spi_data.dvStatus[id] & MPT_SCSICFG_DV_NOT_DONE) != 0) {
3640 width = 0;
3641 factor = MPT_ASYNC;
3642 offset = 0;
3643 }
3644#endif
3645
3646 if (flags & MPT_SCSICFG_BLK_NEGO)
3647 negoFlags = MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
3648
3649 mptscsih_setDevicePage1Flags(width, factor, offset,
3650 &requested, &configuration, negoFlags);
3651 dnegoprintk(("writeSDP1: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
3652 target_id, width, factor, offset, negoFlags, requested, configuration));
3653
3654 /* Get a MF for this command.
3655 */
3656 if ((mf = mpt_get_msg_frame(ScsiDoneCtx, ioc)) == NULL) {
3657 dprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n",
3658 ioc->name));
3659 return -EAGAIN;
3660 }
3661
3662 ddvprintk((MYIOC_s_INFO_FMT "WriteSDP1 (mf=%p, id=%d, req=0x%x, cfg=0x%x)\n",
3663 hd->ioc->name, mf, id, requested, configuration));
3664
3665
3666 /* Set the request and the data pointers.
3667 * Request takes: 36 bytes (32 bit SGE)
3668 * SCSI Device Page 1 requires 16 bytes
3669 * 40 + 16 <= size of SCSI IO Request = 56 bytes
3670 * and MF size >= 64 bytes.
3671 * Place data at end of MF.
3672 */
3673 pReq = (Config_t *)mf;
3674
3675 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
3676 frameOffset = ioc->req_sz - sizeof(SCSIDevicePage1_t);
3677
3678 pData = (SCSIDevicePage1_t *)((u8 *) mf + frameOffset);
3679 dataDma = ioc->req_frames_dma + (req_idx * ioc->req_sz) + frameOffset;
3680
3681 /* Complete the request frame (same for all requests).
3682 */
3683 pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
3684 pReq->Reserved = 0;
3685 pReq->ChainOffset = 0;
3686 pReq->Function = MPI_FUNCTION_CONFIG;
3687 pReq->ExtPageLength = 0;
3688 pReq->ExtPageType = 0;
3689 pReq->MsgFlags = 0;
3690 for (ii=0; ii < 8; ii++) {
3691 pReq->Reserved2[ii] = 0;
3692 }
3693 pReq->Header.PageVersion = ioc->spi_data.sdp1version;
3694 pReq->Header.PageLength = ioc->spi_data.sdp1length;
3695 pReq->Header.PageNumber = 1;
3696 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
3697 pReq->PageAddress = cpu_to_le32(id | (bus << 8 ));
3698
3699 /* Add a SGE to the config request.
3700 */
3701 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | ioc->spi_data.sdp1length * 4;
3702
3703 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
3704
3705 /* Set up the common data portion
3706 */
3707 pData->Header.PageVersion = pReq->Header.PageVersion;
3708 pData->Header.PageLength = pReq->Header.PageLength;
3709 pData->Header.PageNumber = pReq->Header.PageNumber;
3710 pData->Header.PageType = pReq->Header.PageType;
3711 pData->RequestedParameters = cpu_to_le32(requested);
3712 pData->Reserved = 0;
3713 pData->Configuration = cpu_to_le32(configuration);
3714
3715 dprintk((MYIOC_s_INFO_FMT
3716 "write SDP1: id %d pgaddr 0x%x req 0x%x config 0x%x\n",
3717 ioc->name, id, (id | (bus<<8)),
3718 requested, configuration));
3719
3720 mpt_put_msg_frame(ScsiDoneCtx, ioc, mf);
3721 }
3722
3723 return 0;
3724}
3725
3726/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3727/* mptscsih_writeIOCPage4 - write IOC Page 4
3728 * @hd: Pointer to a SCSI Host Structure
3729 * @target_id: write IOC Page4 for this ID & Bus
3730 *
3731 * Return: -EAGAIN if unable to obtain a Message Frame
3732 * or 0 if success.
3733 *
3734 * Remark: We do not wait for a return, write pages sequentially.
3735 */
3736static int
3737mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus)
3738{
3739 MPT_ADAPTER *ioc = hd->ioc;
3740 Config_t *pReq;
3741 IOCPage4_t *IOCPage4Ptr;
3742 MPT_FRAME_HDR *mf;
3743 dma_addr_t dataDma;
3744 u16 req_idx;
3745 u32 frameOffset;
3746 u32 flagsLength;
3747 int ii;
3748
3749 /* Get a MF for this command.
3750 */
3751 if ((mf = mpt_get_msg_frame(ScsiDoneCtx, ioc)) == NULL) {
3752 dprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n",
3753 ioc->name));
3754 return -EAGAIN;
3755 }
3756
3757 /* Set the request and the data pointers.
3758 * Place data at end of MF.
3759 */
3760 pReq = (Config_t *)mf;
3761
3762 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
3763 frameOffset = ioc->req_sz - sizeof(IOCPage4_t);
3764
3765 /* Complete the request frame (same for all requests).
3766 */
3767 pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
3768 pReq->Reserved = 0;
3769 pReq->ChainOffset = 0;
3770 pReq->Function = MPI_FUNCTION_CONFIG;
3771 pReq->ExtPageLength = 0;
3772 pReq->ExtPageType = 0;
3773 pReq->MsgFlags = 0;
3774 for (ii=0; ii < 8; ii++) {
3775 pReq->Reserved2[ii] = 0;
3776 }
3777
3778 IOCPage4Ptr = ioc->spi_data.pIocPg4;
3779 dataDma = ioc->spi_data.IocPg4_dma;
3780 ii = IOCPage4Ptr->ActiveSEP++;
3781 IOCPage4Ptr->SEP[ii].SEPTargetID = target_id;
3782 IOCPage4Ptr->SEP[ii].SEPBus = bus;
3783 pReq->Header = IOCPage4Ptr->Header;
3784 pReq->PageAddress = cpu_to_le32(target_id | (bus << 8 ));
3785
3786 /* Add a SGE to the config request.
3787 */
3788 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
3789 (IOCPage4Ptr->Header.PageLength + ii) * 4;
3790
3791 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
3792
3793 dinitprintk((MYIOC_s_INFO_FMT
3794 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
3795 ioc->name, IOCPage4Ptr->MaxSEP, IOCPage4Ptr->ActiveSEP, target_id, bus));
3796
3797 mpt_put_msg_frame(ScsiDoneCtx, ioc, mf);
3798
3799 return 0;
3800}
3801
3802/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3803/*
3804 * Bus Scan and Domain Validation functionality ...
3805 */
3806
3807/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3808/*
3809 * mptscsih_scandv_complete - Scan and DV callback routine registered
3810 * to Fustion MPT (base) driver.
3811 *
3812 * @ioc: Pointer to MPT_ADAPTER structure
3813 * @mf: Pointer to original MPT request frame
3814 * @mr: Pointer to MPT reply frame (NULL if TurboReply)
3815 *
3816 * This routine is called from mpt.c::mpt_interrupt() at the completion
3817 * of any SCSI IO request.
3818 * This routine is registered with the Fusion MPT (base) driver at driver
3819 * load/init time via the mpt_register() API call.
3820 *
3821 * Returns 1 indicating alloc'd request frame ptr should be freed.
3822 *
3823 * Remark: Sets a completion code and (possibly) saves sense data
3824 * in the IOC member localReply structure.
3825 * Used ONLY for DV and other internal commands.
3826 */
3827static int
3828mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
3829{
3830 MPT_SCSI_HOST *hd;
3831 SCSIIORequest_t *pReq;
3832 int completionCode;
3833 u16 req_idx;
3834
3835 if ((mf == NULL) ||
3836 (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
3837 printk(MYIOC_s_ERR_FMT
3838 "ScanDvComplete, %s req frame ptr! (=%p)\n",
3839 ioc->name, mf?"BAD":"NULL", (void *) mf);
3840 goto wakeup;
3841 }
3842
3843 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
3844 del_timer(&hd->timer);
3845 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
3846 hd->ScsiLookup[req_idx] = NULL;
3847 pReq = (SCSIIORequest_t *) mf;
3848
3849 if (mf != hd->cmdPtr) {
3850 printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n",
3851 hd->ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx);
3852 }
3853 hd->cmdPtr = NULL;
3854
3855 ddvprintk((MYIOC_s_INFO_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n",
3856 hd->ioc->name, mf, mr, req_idx));
3857
3858 hd->pLocal = &hd->localReply;
3859 hd->pLocal->scsiStatus = 0;
3860
3861 /* If target struct exists, clear sense valid flag.
3862 */
3863 if (mr == NULL) {
3864 completionCode = MPT_SCANDV_GOOD;
3865 } else {
3866 SCSIIOReply_t *pReply;
3867 u16 status;
3868 u8 scsi_status;
3869
3870 pReply = (SCSIIOReply_t *) mr;
3871
3872 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
3873 scsi_status = pReply->SCSIStatus;
3874
3875 ddvtprintk((KERN_NOTICE " IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh, IOCLogInfo=%08xh\n",
3876 status, pReply->SCSIState, scsi_status,
3877 le32_to_cpu(pReply->IOCLogInfo)));
3878
3879 switch(status) {
3880
3881 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
3882 completionCode = MPT_SCANDV_SELECTION_TIMEOUT;
3883 break;
3884
3885 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
3886 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
3887 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
3888 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
3889 completionCode = MPT_SCANDV_DID_RESET;
3890 break;
3891
3892 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
3893 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
3894 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
3895 if (pReply->Function == MPI_FUNCTION_CONFIG) {
3896 ConfigReply_t *pr = (ConfigReply_t *)mr;
3897 completionCode = MPT_SCANDV_GOOD;
3898 hd->pLocal->header.PageVersion = pr->Header.PageVersion;
3899 hd->pLocal->header.PageLength = pr->Header.PageLength;
3900 hd->pLocal->header.PageNumber = pr->Header.PageNumber;
3901 hd->pLocal->header.PageType = pr->Header.PageType;
3902
3903 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
3904 /* If the RAID Volume request is successful,
3905 * return GOOD, else indicate that
3906 * some type of error occurred.
3907 */
3908 MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
3909 if (pr->ActionStatus == MPI_RAID_ACTION_ASTATUS_SUCCESS)
3910 completionCode = MPT_SCANDV_GOOD;
3911 else
3912 completionCode = MPT_SCANDV_SOME_ERROR;
3913
3914 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
3915 u8 *sense_data;
3916 int sz;
3917
3918 /* save sense data in global structure
3919 */
3920 completionCode = MPT_SCANDV_SENSE;
3921 hd->pLocal->scsiStatus = scsi_status;
3922 sense_data = ((u8 *)hd->ioc->sense_buf_pool +
3923 (req_idx * MPT_SENSE_BUFFER_ALLOC));
3924
3925 sz = min_t(int, pReq->SenseBufferLength,
3926 SCSI_STD_SENSE_BYTES);
3927 memcpy(hd->pLocal->sense, sense_data, sz);
3928
3929 ddvprintk((KERN_NOTICE " Check Condition, sense ptr %p\n",
3930 sense_data));
3931 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
3932 if (pReq->CDB[0] == INQUIRY)
3933 completionCode = MPT_SCANDV_ISSUE_SENSE;
3934 else
3935 completionCode = MPT_SCANDV_DID_RESET;
3936 }
3937 else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
3938 completionCode = MPT_SCANDV_DID_RESET;
3939 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
3940 completionCode = MPT_SCANDV_DID_RESET;
3941 else {
3942 completionCode = MPT_SCANDV_GOOD;
3943 hd->pLocal->scsiStatus = scsi_status;
3944 }
3945 break;
3946
3947 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
3948 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
3949 completionCode = MPT_SCANDV_DID_RESET;
3950 else
3951 completionCode = MPT_SCANDV_SOME_ERROR;
3952 break;
3953
3954 default:
3955 completionCode = MPT_SCANDV_SOME_ERROR;
3956 break;
3957
3958 } /* switch(status) */
3959
3960 ddvtprintk((KERN_NOTICE " completionCode set to %08xh\n",
3961 completionCode));
3962 } /* end of address reply case */
3963
3964 hd->pLocal->completion = completionCode;
3965
3966 /* MF and RF are freed in mpt_interrupt
3967 */
3968wakeup:
3969 /* Free Chain buffers (will never chain) in scan or dv */
3970 //mptscsih_freeChainBuffers(ioc, req_idx);
3971
3972 /*
3973 * Wake up the original calling thread
3974 */
3975 scandv_wait_done = 1;
3976 wake_up(&scandv_waitq);
3977
3978 return 1;
3979}
3980
3981/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3982/* mptscsih_timer_expired - Call back for timer process.
3983 * Used only for dv functionality.
3984 * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
3985 *
3986 */
3987static void mptscsih_timer_expired(unsigned long data)
3988{
3989 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data;
3990
3991 ddvprintk((MYIOC_s_WARN_FMT "Timer Expired! Cmd %p\n", hd->ioc->name, hd->cmdPtr));
3992
3993 if (hd->cmdPtr) {
3994 MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr;
3995
3996 if (cmd->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
3997 /* Desire to issue a task management request here.
3998 * TM requests MUST be single threaded.
3999 * If old eh code and no TM current, issue request.
4000 * If new eh code, do nothing. Wait for OS cmd timeout
4001 * for bus reset.
4002 */
4003 ddvtprintk((MYIOC_s_NOTE_FMT "DV Cmd Timeout: NoOp\n", hd->ioc->name));
4004 } else {
4005 /* Perform a FW reload */
4006 if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) {
4007 printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", hd->ioc->name);
4008 }
4009 }
4010 } else {
4011 /* This should NEVER happen */
4012 printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", hd->ioc->name);
4013 }
4014
4015 /* No more processing.
4016 * TM call will generate an interrupt for SCSI TM Management.
4017 * The FW will reply to all outstanding commands, callback will finish cleanup.
4018 * Hard reset clean-up will free all resources.
4019 */
4020 ddvprintk((MYIOC_s_WARN_FMT "Timer Expired Complete!\n", hd->ioc->name));
4021
4022 return;
4023}
4024
4025#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
4026/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4027/* mptscsih_do_raid - Format and Issue a RAID volume request message.
4028 * @hd: Pointer to scsi host structure
4029 * @action: What do be done.
4030 * @id: Logical target id.
4031 * @bus: Target locations bus.
4032 *
4033 * Returns: < 0 on a fatal error
4034 * 0 on success
4035 *
4036 * Remark: Wait to return until reply processed by the ISR.
4037 */
4038static int
4039mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io)
4040{
4041 MpiRaidActionRequest_t *pReq;
4042 MPT_FRAME_HDR *mf;
4043 int in_isr;
4044
4045 in_isr = in_interrupt();
4046 if (in_isr) {
4047 dprintk((MYIOC_s_WARN_FMT "Internal raid request not allowed in ISR context!\n",
4048 hd->ioc->name));
4049 return -EPERM;
4050 }
4051
4052 /* Get and Populate a free Frame
4053 */
4054 if ((mf = mpt_get_msg_frame(ScsiScanDvCtx, hd->ioc)) == NULL) {
4055 ddvprintk((MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n",
4056 hd->ioc->name));
4057 return -EAGAIN;
4058 }
4059 pReq = (MpiRaidActionRequest_t *)mf;
4060 pReq->Action = action;
4061 pReq->Reserved1 = 0;
4062 pReq->ChainOffset = 0;
4063 pReq->Function = MPI_FUNCTION_RAID_ACTION;
4064 pReq->VolumeID = io->id;
4065 pReq->VolumeBus = io->bus;
4066 pReq->PhysDiskNum = io->physDiskNum;
4067 pReq->MsgFlags = 0;
4068 pReq->Reserved2 = 0;
4069 pReq->ActionDataWord = 0; /* Reserved for this action */
4070 //pReq->ActionDataSGE = 0;
4071
4072 mpt_add_sge((char *)&pReq->ActionDataSGE,
4073 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
4074
4075 ddvprintk((MYIOC_s_INFO_FMT "RAID Volume action %x id %d\n",
4076 hd->ioc->name, action, io->id));
4077
4078 hd->pLocal = NULL;
4079 hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
4080 scandv_wait_done = 0;
4081
4082 /* Save cmd pointer, for resource free if timeout or
4083 * FW reload occurs
4084 */
4085 hd->cmdPtr = mf;
4086
4087 add_timer(&hd->timer);
4088 mpt_put_msg_frame(ScsiScanDvCtx, hd->ioc, mf);
4089 wait_event(scandv_waitq, scandv_wait_done);
4090
4091 if ((hd->pLocal == NULL) || (hd->pLocal->completion != MPT_SCANDV_GOOD))
4092 return -1;
4093
4094 return 0;
4095}
4096#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */
4097
4098/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4099/**
4100 * mptscsih_do_cmd - Do internal command.
4101 * @hd: MPT_SCSI_HOST pointer
4102 * @io: INTERNAL_CMD pointer.
4103 *
4104 * Issue the specified internally generated command and do command
4105 * specific cleanup. For bus scan / DV only.
4106 * NOTES: If command is Inquiry and status is good,
4107 * initialize a target structure, save the data
4108 *
4109 * Remark: Single threaded access only.
4110 *
4111 * Return:
4112 * < 0 if an illegal command or no resources
4113 *
4114 * 0 if good
4115 *
4116 * > 0 if command complete but some type of completion error.
4117 */
4118static int
4119mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
4120{
4121 MPT_FRAME_HDR *mf;
4122 SCSIIORequest_t *pScsiReq;
4123 SCSIIORequest_t ReqCopy;
4124 int my_idx, ii, dir;
4125 int rc, cmdTimeout;
4126 int in_isr;
4127 char cmdLen;
4128 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
4129 char cmd = io->cmd;
4130
4131 in_isr = in_interrupt();
4132 if (in_isr) {
4133 dprintk((MYIOC_s_WARN_FMT "Internal SCSI IO request not allowed in ISR context!\n",
4134 hd->ioc->name));
4135 return -EPERM;
4136 }
4137
4138
4139 /* Set command specific information
4140 */
4141 switch (cmd) {
4142 case INQUIRY:
4143 cmdLen = 6;
4144 dir = MPI_SCSIIO_CONTROL_READ;
4145 CDB[0] = cmd;
4146 CDB[4] = io->size;
4147 cmdTimeout = 10;
4148 break;
4149
4150 case TEST_UNIT_READY:
4151 cmdLen = 6;
4152 dir = MPI_SCSIIO_CONTROL_READ;
4153 cmdTimeout = 10;
4154 break;
4155
4156 case START_STOP:
4157 cmdLen = 6;
4158 dir = MPI_SCSIIO_CONTROL_READ;
4159 CDB[0] = cmd;
4160 CDB[4] = 1; /*Spin up the disk */
4161 cmdTimeout = 15;
4162 break;
4163
4164 case REQUEST_SENSE:
4165 cmdLen = 6;
4166 CDB[0] = cmd;
4167 CDB[4] = io->size;
4168 dir = MPI_SCSIIO_CONTROL_READ;
4169 cmdTimeout = 10;
4170 break;
4171
4172 case READ_BUFFER:
4173 cmdLen = 10;
4174 dir = MPI_SCSIIO_CONTROL_READ;
4175 CDB[0] = cmd;
4176 if (io->flags & MPT_ICFLAG_ECHO) {
4177 CDB[1] = 0x0A;
4178 } else {
4179 CDB[1] = 0x02;
4180 }
4181
4182 if (io->flags & MPT_ICFLAG_BUF_CAP) {
4183 CDB[1] |= 0x01;
4184 }
4185 CDB[6] = (io->size >> 16) & 0xFF;
4186 CDB[7] = (io->size >> 8) & 0xFF;
4187 CDB[8] = io->size & 0xFF;
4188 cmdTimeout = 10;
4189 break;
4190
4191 case WRITE_BUFFER:
4192 cmdLen = 10;
4193 dir = MPI_SCSIIO_CONTROL_WRITE;
4194 CDB[0] = cmd;
4195 if (io->flags & MPT_ICFLAG_ECHO) {
4196 CDB[1] = 0x0A;
4197 } else {
4198 CDB[1] = 0x02;
4199 }
4200 CDB[6] = (io->size >> 16) & 0xFF;
4201 CDB[7] = (io->size >> 8) & 0xFF;
4202 CDB[8] = io->size & 0xFF;
4203 cmdTimeout = 10;
4204 break;
4205
4206 case RESERVE:
4207 cmdLen = 6;
4208 dir = MPI_SCSIIO_CONTROL_READ;
4209 CDB[0] = cmd;
4210 cmdTimeout = 10;
4211 break;
4212
4213 case RELEASE:
4214 cmdLen = 6;
4215 dir = MPI_SCSIIO_CONTROL_READ;
4216 CDB[0] = cmd;
4217 cmdTimeout = 10;
4218 break;
4219
4220 case SYNCHRONIZE_CACHE:
4221 cmdLen = 10;
4222 dir = MPI_SCSIIO_CONTROL_READ;
4223 CDB[0] = cmd;
4224// CDB[1] = 0x02; /* set immediate bit */
4225 cmdTimeout = 10;
4226 break;
4227
4228 default:
4229 /* Error Case */
4230 return -EFAULT;
4231 }
4232
4233 /* Get and Populate a free Frame
4234 */
4235 if ((mf = mpt_get_msg_frame(ScsiScanDvCtx, hd->ioc)) == NULL) {
4236 ddvprintk((MYIOC_s_WARN_FMT "No msg frames!\n",
4237 hd->ioc->name));
4238 return -EBUSY;
4239 }
4240
4241 pScsiReq = (SCSIIORequest_t *) mf;
4242
4243 /* Get the request index */
4244 my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
4245 ADD_INDEX_LOG(my_idx); /* for debug */
4246
4247 if (io->flags & MPT_ICFLAG_PHYS_DISK) {
4248 pScsiReq->TargetID = io->physDiskNum;
4249 pScsiReq->Bus = 0;
4250 pScsiReq->ChainOffset = 0;
4251 pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4252 } else {
4253 pScsiReq->TargetID = io->id;
4254 pScsiReq->Bus = io->bus;
4255 pScsiReq->ChainOffset = 0;
4256 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
4257 }
4258
4259 pScsiReq->CDBLength = cmdLen;
4260 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
4261
4262 pScsiReq->Reserved = 0;
4263
4264 pScsiReq->MsgFlags = mpt_msg_flags();
4265 /* MsgContext set in mpt_get_msg_fram call */
4266
4267 for (ii=0; ii < 8; ii++)
4268 pScsiReq->LUN[ii] = 0;
4269 pScsiReq->LUN[1] = io->lun;
4270
4271 if (io->flags & MPT_ICFLAG_TAGGED_CMD)
4272 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_SIMPLEQ);
4273 else
4274 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
4275
4276 if (cmd == REQUEST_SENSE) {
4277 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
4278 ddvprintk((MYIOC_s_INFO_FMT "Untagged! 0x%2x\n",
4279 hd->ioc->name, cmd));
4280 }
4281
4282 for (ii=0; ii < 16; ii++)
4283 pScsiReq->CDB[ii] = CDB[ii];
4284
4285 pScsiReq->DataLength = cpu_to_le32(io->size);
4286 pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma
4287 + (my_idx * MPT_SENSE_BUFFER_ALLOC));
4288
4289 ddvprintk((MYIOC_s_INFO_FMT "Sending Command 0x%x for (%d:%d:%d)\n",
4290 hd->ioc->name, cmd, io->bus, io->id, io->lun));
4291
4292 if (dir == MPI_SCSIIO_CONTROL_READ) {
4293 mpt_add_sge((char *) &pScsiReq->SGL,
4294 MPT_SGE_FLAGS_SSIMPLE_READ | io->size,
4295 io->data_dma);
4296 } else {
4297 mpt_add_sge((char *) &pScsiReq->SGL,
4298 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size,
4299 io->data_dma);
4300 }
4301
4302 /* The ISR will free the request frame, but we need
4303 * the information to initialize the target. Duplicate.
4304 */
4305 memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t));
4306
4307 /* Issue this command after:
4308 * finish init
4309 * add timer
4310 * Wait until the reply has been received
4311 * ScsiScanDvCtx callback function will
4312 * set hd->pLocal;
4313 * set scandv_wait_done and call wake_up
4314 */
4315 hd->pLocal = NULL;
4316 hd->timer.expires = jiffies + HZ*cmdTimeout;
4317 scandv_wait_done = 0;
4318
4319 /* Save cmd pointer, for resource free if timeout or
4320 * FW reload occurs
4321 */
4322 hd->cmdPtr = mf;
4323
4324 add_timer(&hd->timer);
4325 mpt_put_msg_frame(ScsiScanDvCtx, hd->ioc, mf);
4326 wait_event(scandv_waitq, scandv_wait_done);
4327
4328 if (hd->pLocal) {
4329 rc = hd->pLocal->completion;
4330 hd->pLocal->skip = 0;
4331
4332 /* Always set fatal error codes in some cases.
4333 */
4334 if (rc == MPT_SCANDV_SELECTION_TIMEOUT)
4335 rc = -ENXIO;
4336 else if (rc == MPT_SCANDV_SOME_ERROR)
4337 rc = -rc;
4338 } else {
4339 rc = -EFAULT;
4340 /* This should never happen. */
4341 ddvprintk((MYIOC_s_INFO_FMT "_do_cmd: Null pLocal!!!\n",
4342 hd->ioc->name));
4343 }
4344
4345 return rc;
4346}
4347
4348/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4349/**
4350 * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
4351 * @hd: Pointer to MPT_SCSI_HOST structure
4352 * @portnum: IOC port number
4353 *
4354 * Uses the ISR, but with special processing.
4355 * MUST be single-threaded.
4356 *
4357 * Return: 0 on completion
4358 */
4359static int
4360mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
4361{
4362 MPT_ADAPTER *ioc= hd->ioc;
4363 VirtDevice *pTarget;
4364 SCSIDevicePage1_t *pcfg1Data = NULL;
4365 INTERNAL_CMD iocmd;
4366 CONFIGPARMS cfg;
4367 dma_addr_t cfg1_dma_addr = -1;
4368 ConfigPageHeader_t header1;
4369 int bus = 0;
4370 int id = 0;
4371 int lun;
4372 int indexed_lun, lun_index;
4373 int hostId = ioc->pfacts[portnum].PortSCSIID;
4374 int max_id;
4375 int requested, configuration, data;
4376 int doConfig = 0;
4377 u8 flags, factor;
4378
4379 max_id = ioc->sh->max_id - 1;
4380
4381 /* Following parameters will not change
4382 * in this routine.
4383 */
4384 iocmd.cmd = SYNCHRONIZE_CACHE;
4385 iocmd.flags = 0;
4386 iocmd.physDiskNum = -1;
4387 iocmd.data = NULL;
4388 iocmd.data_dma = -1;
4389 iocmd.size = 0;
4390 iocmd.rsvd = iocmd.rsvd2 = 0;
4391
4392 /* No SCSI hosts
4393 */
4394 if (hd->Targets == NULL)
4395 return 0;
4396
4397 /* Skip the host
4398 */
4399 if (id == hostId)
4400 id++;
4401
4402 /* Write SDP1 for all SCSI devices
4403 * Alloc memory and set up config buffer
4404 */
4405 if (ioc->bus_type == SCSI) {
4406 if (ioc->spi_data.sdp1length > 0) {
4407 pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev,
4408 ioc->spi_data.sdp1length * 4, &cfg1_dma_addr);
4409
4410 if (pcfg1Data != NULL) {
4411 doConfig = 1;
4412 header1.PageVersion = ioc->spi_data.sdp1version;
4413 header1.PageLength = ioc->spi_data.sdp1length;
4414 header1.PageNumber = 1;
4415 header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
4416 cfg.hdr = &header1;
4417 cfg.physAddr = cfg1_dma_addr;
4418 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
4419 cfg.dir = 1;
4420 cfg.timeout = 0;
4421 }
4422 }
4423 }
4424
4425 /* loop through all devices on this port
4426 */
4427 while (bus < MPT_MAX_BUS) {
4428 iocmd.bus = bus;
4429 iocmd.id = id;
4430 pTarget = hd->Targets[(int)id];
4431
4432 if (doConfig) {
4433
4434 /* Set the negotiation flags */
4435 if (pTarget && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) {
4436 flags = pTarget->negoFlags;
4437 } else {
4438 flags = hd->ioc->spi_data.noQas;
4439 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
4440 data = hd->ioc->spi_data.nvram[id];
4441
4442 if (data & MPT_NVRAM_WIDE_DISABLE)
4443 flags |= MPT_TARGET_NO_NEGO_WIDE;
4444
4445 factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
4446 if ((factor == 0) || (factor == MPT_ASYNC))
4447 flags |= MPT_TARGET_NO_NEGO_SYNC;
4448 }
4449 }
4450
4451 /* Force to async, narrow */
4452 mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
4453 &configuration, flags);
4454 dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
4455 "offset=0 negoFlags=%x request=%x config=%x\n",
4456 id, flags, requested, configuration));
4457 pcfg1Data->RequestedParameters = le32_to_cpu(requested);
4458 pcfg1Data->Reserved = 0;
4459 pcfg1Data->Configuration = le32_to_cpu(configuration);
4460 cfg.pageAddr = (bus<<8) | id;
4461 mpt_config(hd->ioc, &cfg);
4462 }
4463
4464 /* If target Ptr NULL or if this target is NOT a disk, skip.
4465 */
4466 if ((pTarget) && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)){
4467 for (lun=0; lun <= MPT_LAST_LUN; lun++) {
4468 /* If LUN present, issue the command
4469 */
4470 lun_index = (lun >> 5); /* 32 luns per lun_index */
4471 indexed_lun = (lun % 32);
4472 if (pTarget->luns[lun_index] & (1<<indexed_lun)) {
4473 iocmd.lun = lun;
4474 (void) mptscsih_do_cmd(hd, &iocmd);
4475 }
4476 }
4477 }
4478
4479 /* get next relevant device */
4480 id++;
4481
4482 if (id == hostId)
4483 id++;
4484
4485 if (id > max_id) {
4486 id = 0;
4487 bus++;
4488 }
4489 }
4490
4491 if (pcfg1Data) {
4492 pci_free_consistent(ioc->pcidev, header1.PageLength * 4, pcfg1Data, cfg1_dma_addr);
4493 }
4494
4495 return 0;
4496}
4497
4498#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
4499/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4500/**
4501 * mptscsih_domainValidation - Top level handler for domain validation.
4502 * @hd: Pointer to MPT_SCSI_HOST structure.
4503 *
4504 * Uses the ISR, but with special processing.
4505 * Called from schedule, should not be in interrupt mode.
4506 * While thread alive, do dv for all devices needing dv
4507 *
4508 * Return: None.
4509 */
4510static void
4511mptscsih_domainValidation(void *arg)
4512{
4513 MPT_SCSI_HOST *hd;
4514 MPT_ADAPTER *ioc;
4515 unsigned long flags;
4516 int id, maxid, dvStatus, did;
4517 int ii, isPhysDisk;
4518
4519 spin_lock_irqsave(&dvtaskQ_lock, flags);
4520 dvtaskQ_active = 1;
4521 if (dvtaskQ_release) {
4522 dvtaskQ_active = 0;
4523 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
4524 return;
4525 }
4526 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
4527
4528 /* For this ioc, loop through all devices and do dv to each device.
4529 * When complete with this ioc, search through the ioc list, and
4530 * for each scsi ioc found, do dv for all devices. Exit when no
4531 * device needs dv.
4532 */
4533 did = 1;
4534 while (did) {
4535 did = 0;
4536 list_for_each_entry(ioc, &ioc_list, list) {
4537 spin_lock_irqsave(&dvtaskQ_lock, flags);
4538 if (dvtaskQ_release) {
4539 dvtaskQ_active = 0;
4540 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
4541 return;
4542 }
4543 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
4544
4545 msleep(250);
4546
4547 /* DV only to SCSI adapters */
4548 if (ioc->bus_type != SCSI)
4549 continue;
4550
4551 /* Make sure everything looks ok */
4552 if (ioc->sh == NULL)
4553 continue;
4554
4555 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
4556 if (hd == NULL)
4557 continue;
4558
4559 if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) {
4560 mpt_read_ioc_pg_3(ioc);
4561 if (ioc->spi_data.pIocPg3) {
4562 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
4563 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
4564
4565 while (numPDisk) {
4566 if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE)
4567 ioc->spi_data.dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
4568
4569 pPDisk++;
4570 numPDisk--;
4571 }
4572 }
4573 ioc->spi_data.forceDv &= ~MPT_SCSICFG_RELOAD_IOC_PG3;
4574 }
4575
4576 maxid = min_t(int, ioc->sh->max_id, MPT_MAX_SCSI_DEVICES);
4577
4578 for (id = 0; id < maxid; id++) {
4579 spin_lock_irqsave(&dvtaskQ_lock, flags);
4580 if (dvtaskQ_release) {
4581 dvtaskQ_active = 0;
4582 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
4583 return;
4584 }
4585 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
4586 dvStatus = hd->ioc->spi_data.dvStatus[id];
4587
4588 if (dvStatus & MPT_SCSICFG_NEED_DV) {
4589 did++;
4590 hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_DV_PENDING;
4591 hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_NEED_DV;
4592
4593 msleep(250);
4594
4595 /* If hidden phys disk, block IO's to all
4596 * raid volumes
4597 * else, process normally
4598 */
4599 isPhysDisk = mptscsih_is_phys_disk(ioc, id);
4600 if (isPhysDisk) {
4601 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4602 if (hd->ioc->spi_data.isRaid & (1 << ii)) {
4603 hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING;
4604 }
4605 }
4606 }
4607
4608 if (mptscsih_doDv(hd, 0, id) == 1) {
4609 /* Untagged device was busy, try again
4610 */
4611 hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_NEED_DV;
4612 hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_PENDING;
4613 } else {
4614 /* DV is complete. Clear flags.
4615 */
4616 hd->ioc->spi_data.dvStatus[id] &= ~(MPT_SCSICFG_DV_NOT_DONE | MPT_SCSICFG_DV_PENDING);
4617 }
4618
4619 if (isPhysDisk) {
4620 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4621 if (hd->ioc->spi_data.isRaid & (1 << ii)) {
4622 hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING;
4623 }
4624 }
4625 }
4626
4627 if (hd->ioc->spi_data.noQas)
4628 mptscsih_qas_check(hd, id);
4629 }
4630 }
4631 }
4632 }
4633
4634 spin_lock_irqsave(&dvtaskQ_lock, flags);
4635 dvtaskQ_active = 0;
4636 spin_unlock_irqrestore(&dvtaskQ_lock, flags);
4637
4638 return;
4639}
4640
4641/* Search IOC page 3 to determine if this is hidden physical disk
4642 */
4643static int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
4644{
4645 if (ioc->spi_data.pIocPg3) {
4646 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
4647 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
4648
4649 while (numPDisk) {
4650 if (pPDisk->PhysDiskID == id) {
4651 return 1;
4652 }
4653 pPDisk++;
4654 numPDisk--;
4655 }
4656 }
4657 return 0;
4658}
4659
4660/* Write SDP1 if no QAS has been enabled
4661 */
4662static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id)
4663{
4664 VirtDevice *pTarget;
4665 int ii;
4666
4667 if (hd->Targets == NULL)
4668 return;
4669
4670 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4671 if (ii == id)
4672 continue;
4673
4674 if ((hd->ioc->spi_data.dvStatus[ii] & MPT_SCSICFG_DV_NOT_DONE) != 0)
4675 continue;
4676
4677 pTarget = hd->Targets[ii];
4678
4679 if ((pTarget != NULL) && (!pTarget->raidVolume)) {
4680 if ((pTarget->negoFlags & hd->ioc->spi_data.noQas) == 0) {
4681 pTarget->negoFlags |= hd->ioc->spi_data.noQas;
4682 dnegoprintk(("writeSDP1: id=%d flags=0\n", id));
4683 mptscsih_writeSDP1(hd, 0, ii, 0);
4684 }
4685 } else {
4686 if (mptscsih_is_phys_disk(hd->ioc, ii) == 1) {
4687 dnegoprintk(("writeSDP1: id=%d SCSICFG_USE_NVRAM\n", id));
4688 mptscsih_writeSDP1(hd, 0, ii, MPT_SCSICFG_USE_NVRAM);
4689 }
4690 }
4691 }
4692 return;
4693}
4694
4695
4696
4697#define MPT_GET_NVRAM_VALS 0x01
4698#define MPT_UPDATE_MAX 0x02
4699#define MPT_SET_MAX 0x04
4700#define MPT_SET_MIN 0x08
4701#define MPT_FALLBACK 0x10
4702#define MPT_SAVE 0x20
4703
4704/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4705/**
4706 * mptscsih_doDv - Perform domain validation to a target.
4707 * @hd: Pointer to MPT_SCSI_HOST structure.
4708 * @portnum: IOC port number.
4709 * @target: Physical ID of this target
4710 *
4711 * Uses the ISR, but with special processing.
4712 * MUST be single-threaded.
4713 * Test will exit if target is at async & narrow.
4714 *
4715 * Return: None.
4716 */
4717static int
4718mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4719{
4720 MPT_ADAPTER *ioc = hd->ioc;
4721 VirtDevice *pTarget;
4722 SCSIDevicePage1_t *pcfg1Data;
4723 SCSIDevicePage0_t *pcfg0Data;
4724 u8 *pbuf1;
4725 u8 *pbuf2;
4726 u8 *pDvBuf;
4727 dma_addr_t dvbuf_dma = -1;
4728 dma_addr_t buf1_dma = -1;
4729 dma_addr_t buf2_dma = -1;
4730 dma_addr_t cfg1_dma_addr = -1;
4731 dma_addr_t cfg0_dma_addr = -1;
4732 ConfigPageHeader_t header1;
4733 ConfigPageHeader_t header0;
4734 DVPARAMETERS dv;
4735 INTERNAL_CMD iocmd;
4736 CONFIGPARMS cfg;
4737 int dv_alloc = 0;
4738 int rc, sz = 0;
4739 int bufsize = 0;
4740 int dataBufSize = 0;
4741 int echoBufSize = 0;
4742 int notDone;
4743 int patt;
4744 int repeat;
4745 int retcode = 0;
4746 int nfactor = MPT_ULTRA320;
4747 char firstPass = 1;
4748 char doFallback = 0;
4749 char readPage0;
4750 char bus, lun;
4751 char inq0 = 0;
4752
4753 if (ioc->spi_data.sdp1length == 0)
4754 return 0;
4755
4756 if (ioc->spi_data.sdp0length == 0)
4757 return 0;
4758
4759 /* If multiple buses are used, require that the initiator
4760 * id be the same on all buses.
4761 */
4762 if (id == ioc->pfacts[0].PortSCSIID)
4763 return 0;
4764
4765 lun = 0;
4766 bus = (u8) bus_number;
4767 ddvtprintk((MYIOC_s_NOTE_FMT
4768 "DV started: bus=%d, id=%d dv @ %p\n",
4769 ioc->name, bus, id, &dv));
4770
4771 /* Prep DV structure
4772 */
4773 memset (&dv, 0, sizeof(DVPARAMETERS));
4774 dv.id = id;
4775
4776 /* Populate tmax with the current maximum
4777 * transfer parameters for this target.
4778 * Exit if narrow and async.
4779 */
4780 dv.cmd = MPT_GET_NVRAM_VALS;
4781 mptscsih_dv_parms(hd, &dv, NULL);
4782
4783 /* Prep SCSI IO structure
4784 */
4785 iocmd.id = id;
4786 iocmd.bus = bus;
4787 iocmd.lun = lun;
4788 iocmd.flags = 0;
4789 iocmd.physDiskNum = -1;
4790 iocmd.rsvd = iocmd.rsvd2 = 0;
4791
4792 pTarget = hd->Targets[id];
4793
4794 /* Use tagged commands if possible.
4795 */
4796 if (pTarget) {
4797 if (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
4798 iocmd.flags |= MPT_ICFLAG_TAGGED_CMD;
4799 else {
4800 if (hd->ioc->facts.FWVersion.Word < 0x01000600)
4801 return 0;
4802
4803 if ((hd->ioc->facts.FWVersion.Word >= 0x01010000) &&
4804 (hd->ioc->facts.FWVersion.Word < 0x01010B00))
4805 return 0;
4806 }
4807 }
4808
4809 /* Prep cfg structure
4810 */
4811 cfg.pageAddr = (bus<<8) | id;
4812 cfg.hdr = NULL;
4813
4814 /* Prep SDP0 header
4815 */
4816 header0.PageVersion = ioc->spi_data.sdp0version;
4817 header0.PageLength = ioc->spi_data.sdp0length;
4818 header0.PageNumber = 0;
4819 header0.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
4820
4821 /* Prep SDP1 header
4822 */
4823 header1.PageVersion = ioc->spi_data.sdp1version;
4824 header1.PageLength = ioc->spi_data.sdp1length;
4825 header1.PageNumber = 1;
4826 header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
4827
4828 if (header0.PageLength & 1)
4829 dv_alloc = (header0.PageLength * 4) + 4;
4830
4831 dv_alloc += (2048 + (header1.PageLength * 4));
4832
4833 pDvBuf = pci_alloc_consistent(ioc->pcidev, dv_alloc, &dvbuf_dma);
4834 if (pDvBuf == NULL)
4835 return 0;
4836
4837 sz = 0;
4838 pbuf1 = (u8 *)pDvBuf;
4839 buf1_dma = dvbuf_dma;
4840 sz +=1024;
4841
4842 pbuf2 = (u8 *) (pDvBuf + sz);
4843 buf2_dma = dvbuf_dma + sz;
4844 sz +=1024;
4845
4846 pcfg0Data = (SCSIDevicePage0_t *) (pDvBuf + sz);
4847 cfg0_dma_addr = dvbuf_dma + sz;
4848 sz += header0.PageLength * 4;
4849
4850 /* 8-byte alignment
4851 */
4852 if (header0.PageLength & 1)
4853 sz += 4;
4854
4855 pcfg1Data = (SCSIDevicePage1_t *) (pDvBuf + sz);
4856 cfg1_dma_addr = dvbuf_dma + sz;
4857
4858 /* Skip this ID? Set cfg.hdr to force config page write
4859 */
4860 {
4861 ScsiCfgData *pspi_data = &hd->ioc->spi_data;
4862 if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
4863 /* Set the factor from nvram */
4864 nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8;
4865 if (nfactor < pspi_data->minSyncFactor )
4866 nfactor = pspi_data->minSyncFactor;
4867
4868 if (!(pspi_data->nvram[id] & MPT_NVRAM_ID_SCAN_ENABLE) ||
4869 (pspi_data->PortFlags == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) ) {
4870
4871 ddvprintk((MYIOC_s_NOTE_FMT "DV Skipped: bus, id, lun (%d, %d, %d)\n",
4872 ioc->name, bus, id, lun));
4873
4874 dv.cmd = MPT_SET_MAX;
4875 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
4876 cfg.hdr = &header1;
4877
4878 /* Save the final negotiated settings to
4879 * SCSI device page 1.
4880 */
4881 cfg.physAddr = cfg1_dma_addr;
4882 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
4883 cfg.dir = 1;
4884 mpt_config(hd->ioc, &cfg);
4885 goto target_done;
4886 }
4887 }
4888 }
4889
4890 /* Finish iocmd inititialization - hidden or visible disk? */
4891 if (ioc->spi_data.pIocPg3) {
4892 /* Search IOC page 3 for matching id
4893 */
4894 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
4895 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
4896
4897 while (numPDisk) {
4898 if (pPDisk->PhysDiskID == id) {
4899 /* match */
4900 iocmd.flags |= MPT_ICFLAG_PHYS_DISK;
4901 iocmd.physDiskNum = pPDisk->PhysDiskNum;
4902
4903 /* Quiesce the IM
4904 */
4905 if (mptscsih_do_raid(hd, MPI_RAID_ACTION_QUIESCE_PHYS_IO, &iocmd) < 0) {
4906 ddvprintk((MYIOC_s_ERR_FMT "RAID Queisce FAILED!\n", ioc->name));
4907 goto target_done;
4908 }
4909 break;
4910 }
4911 pPDisk++;
4912 numPDisk--;
4913 }
4914 }
4915
4916 /* RAID Volume ID's may double for a physical device. If RAID but
4917 * not a physical ID as well, skip DV.
4918 */
4919 if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
4920 goto target_done;
4921
4922
4923 /* Basic Test.
4924 * Async & Narrow - Inquiry
4925 * Async & Narrow - Inquiry
4926 * Maximum transfer rate - Inquiry
4927 * Compare buffers:
4928 * If compare, test complete.
4929 * If miscompare and first pass, repeat
4930 * If miscompare and not first pass, fall back and repeat
4931 */
4932 hd->pLocal = NULL;
4933 readPage0 = 0;
4934 sz = SCSI_MAX_INQUIRY_BYTES;
4935 rc = MPT_SCANDV_GOOD;
4936 while (1) {
4937 ddvprintk((MYIOC_s_NOTE_FMT "DV: Start Basic test on id=%d\n", ioc->name, id));
4938 retcode = 0;
4939 dv.cmd = MPT_SET_MIN;
4940 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
4941
4942 cfg.hdr = &header1;
4943 cfg.physAddr = cfg1_dma_addr;
4944 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
4945 cfg.dir = 1;
4946 if (mpt_config(hd->ioc, &cfg) != 0)
4947 goto target_done;
4948
4949 /* Wide - narrow - wide workaround case
4950 */
4951 if ((rc == MPT_SCANDV_ISSUE_SENSE) && dv.max.width) {
4952 /* Send an untagged command to reset disk Qs corrupted
4953 * when a parity error occurs on a Request Sense.
4954 */
4955 if ((hd->ioc->facts.FWVersion.Word >= 0x01000600) ||
4956 ((hd->ioc->facts.FWVersion.Word >= 0x01010000) &&
4957 (hd->ioc->facts.FWVersion.Word < 0x01010B00)) ) {
4958
4959 iocmd.cmd = REQUEST_SENSE;
4960 iocmd.data_dma = buf1_dma;
4961 iocmd.data = pbuf1;
4962 iocmd.size = 0x12;
4963 if (mptscsih_do_cmd(hd, &iocmd) < 0)
4964 goto target_done;
4965 else {
4966 if (hd->pLocal == NULL)
4967 goto target_done;
4968 rc = hd->pLocal->completion;
4969 if ((rc == MPT_SCANDV_GOOD) || (rc == MPT_SCANDV_SENSE)) {
4970 dv.max.width = 0;
4971 doFallback = 0;
4972 } else
4973 goto target_done;
4974 }
4975 } else
4976 goto target_done;
4977 }
4978
4979 iocmd.cmd = INQUIRY;
4980 iocmd.data_dma = buf1_dma;
4981 iocmd.data = pbuf1;
4982 iocmd.size = sz;
4983 memset(pbuf1, 0x00, sz);
4984 if (mptscsih_do_cmd(hd, &iocmd) < 0)
4985 goto target_done;
4986 else {
4987 if (hd->pLocal == NULL)
4988 goto target_done;
4989 rc = hd->pLocal->completion;
4990 if (rc == MPT_SCANDV_GOOD) {
4991 if (hd->pLocal->scsiStatus == SAM_STAT_BUSY) {
4992 if ((iocmd.flags & MPT_ICFLAG_TAGGED_CMD) == 0)
4993 retcode = 1;
4994 else
4995 retcode = 0;
4996
4997 goto target_done;
4998 }
4999 } else if (rc == MPT_SCANDV_SENSE) {
5000 ;
5001 } else {
5002 /* If first command doesn't complete
5003 * with a good status or with a check condition,
5004 * exit.
5005 */
5006 goto target_done;
5007 }
5008 }
5009
5010 /* Reset the size for disks
5011 */
5012 inq0 = (*pbuf1) & 0x1F;
5013 if ((inq0 == 0) && pTarget && !pTarget->raidVolume) {
5014 sz = 0x40;
5015 iocmd.size = sz;
5016 }
5017
5018 /* Another GEM workaround. Check peripheral device type,
5019 * if PROCESSOR, quit DV.
5020 */
5021 if (inq0 == TYPE_PROCESSOR) {
5022 mptscsih_initTarget(hd,
5023 bus,
5024 id,
5025 lun,
5026 pbuf1,
5027 sz);
5028 goto target_done;
5029 }
5030
5031 if (inq0 > 0x08)
5032 goto target_done;
5033
5034 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5035 goto target_done;
5036
5037 if (sz == 0x40) {
5038 if ((pTarget->maxWidth == 1) && (pTarget->maxOffset) && (nfactor < 0x0A)
5039 && (pTarget->minSyncFactor > 0x09)) {
5040 if ((pbuf1[56] & 0x04) == 0)
5041 ;
5042 else if ((pbuf1[56] & 0x01) == 1) {
5043 pTarget->minSyncFactor =
5044 nfactor > MPT_ULTRA320 ? nfactor : MPT_ULTRA320;
5045 } else {
5046 pTarget->minSyncFactor =
5047 nfactor > MPT_ULTRA160 ? nfactor : MPT_ULTRA160;
5048 }
5049
5050 dv.max.factor = pTarget->minSyncFactor;
5051
5052 if ((pbuf1[56] & 0x02) == 0) {
5053 pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
5054 hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
5055 ddvprintk((MYIOC_s_NOTE_FMT
5056 "DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n",
5057 ioc->name, id, pbuf1[56]));
5058 }
5059 }
5060 }
5061
5062 if (doFallback)
5063 dv.cmd = MPT_FALLBACK;
5064 else
5065 dv.cmd = MPT_SET_MAX;
5066
5067 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
5068 if (mpt_config(hd->ioc, &cfg) != 0)
5069 goto target_done;
5070
5071 if ((!dv.now.width) && (!dv.now.offset))
5072 goto target_done;
5073
5074 iocmd.cmd = INQUIRY;
5075 iocmd.data_dma = buf2_dma;
5076 iocmd.data = pbuf2;
5077 iocmd.size = sz;
5078 memset(pbuf2, 0x00, sz);
5079 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5080 goto target_done;
5081 else if (hd->pLocal == NULL)
5082 goto target_done;
5083 else {
5084 /* Save the return code.
5085 * If this is the first pass,
5086 * read SCSI Device Page 0
5087 * and update the target max parameters.
5088 */
5089 rc = hd->pLocal->completion;
5090 doFallback = 0;
5091 if (rc == MPT_SCANDV_GOOD) {
5092 if (!readPage0) {
5093 u32 sdp0_info;
5094 u32 sdp0_nego;
5095
5096 cfg.hdr = &header0;
5097 cfg.physAddr = cfg0_dma_addr;
5098 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5099 cfg.dir = 0;
5100
5101 if (mpt_config(hd->ioc, &cfg) != 0)
5102 goto target_done;
5103
5104 sdp0_info = le32_to_cpu(pcfg0Data->Information) & 0x0E;
5105 sdp0_nego = (le32_to_cpu(pcfg0Data->NegotiatedParameters) & 0xFF00 ) >> 8;
5106
5107 /* Quantum and Fujitsu workarounds.
5108 * Quantum: PPR U320 -> PPR reply with Ultra2 and wide
5109 * Fujitsu: PPR U320 -> Msg Reject and Ultra2 and wide
5110 * Resetart with a request for U160.
5111 */
5112 if ((dv.now.factor == MPT_ULTRA320) && (sdp0_nego == MPT_ULTRA2)) {
5113 doFallback = 1;
5114 } else {
5115 dv.cmd = MPT_UPDATE_MAX;
5116 mptscsih_dv_parms(hd, &dv, (void *)pcfg0Data);
5117 /* Update the SCSI device page 1 area
5118 */
5119 pcfg1Data->RequestedParameters = pcfg0Data->NegotiatedParameters;
5120 readPage0 = 1;
5121 }
5122 }
5123
5124 /* Quantum workaround. Restart this test will the fallback
5125 * flag set.
5126 */
5127 if (doFallback == 0) {
5128 if (memcmp(pbuf1, pbuf2, sz) != 0) {
5129 if (!firstPass)
5130 doFallback = 1;
5131 } else {
5132 ddvprintk((MYIOC_s_NOTE_FMT
5133 "DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id));
5134 hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE;
5135 mptscsih_initTarget(hd,
5136 bus,
5137 id,
5138 lun,
5139 pbuf1,
5140 sz);
5141 break; /* test complete */
5142 }
5143 }
5144
5145
5146 } else if (rc == MPT_SCANDV_ISSUE_SENSE)
5147 doFallback = 1; /* set fallback flag */
5148 else if ((rc == MPT_SCANDV_DID_RESET) ||
5149 (rc == MPT_SCANDV_SENSE) ||
5150 (rc == MPT_SCANDV_FALLBACK))
5151 doFallback = 1; /* set fallback flag */
5152 else
5153 goto target_done;
5154
5155 firstPass = 0;
5156 }
5157 }
5158 ddvprintk((MYIOC_s_NOTE_FMT "DV: Basic test on id=%d completed OK.\n", ioc->name, id));
5159
5160 if (mpt_dv == 0)
5161 goto target_done;
5162
5163 inq0 = (*pbuf1) & 0x1F;
5164
5165 /* Continue only for disks
5166 */
5167 if (inq0 != 0)
5168 goto target_done;
5169
5170 if ( ioc->spi_data.PortFlags == MPI_SCSIPORTPAGE2_PORT_FLAGS_BASIC_DV_ONLY )
5171 goto target_done;
5172
5173 /* Start the Enhanced Test.
5174 * 0) issue TUR to clear out check conditions
5175 * 1) read capacity of echo (regular) buffer
5176 * 2) reserve device
5177 * 3) do write-read-compare data pattern test
5178 * 4) release
5179 * 5) update nego parms to target struct
5180 */
5181 cfg.hdr = &header1;
5182 cfg.physAddr = cfg1_dma_addr;
5183 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
5184 cfg.dir = 1;
5185
5186 iocmd.cmd = TEST_UNIT_READY;
5187 iocmd.data_dma = -1;
5188 iocmd.data = NULL;
5189 iocmd.size = 0;
5190 notDone = 1;
5191 while (notDone) {
5192 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5193 goto target_done;
5194
5195 if (hd->pLocal == NULL)
5196 goto target_done;
5197
5198 rc = hd->pLocal->completion;
5199 if (rc == MPT_SCANDV_GOOD)
5200 notDone = 0;
5201 else if (rc == MPT_SCANDV_SENSE) {
5202 u8 skey = hd->pLocal->sense[2] & 0x0F;
5203 u8 asc = hd->pLocal->sense[12];
5204 u8 ascq = hd->pLocal->sense[13];
5205 ddvprintk((MYIOC_s_INFO_FMT
5206 "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
5207 ioc->name, skey, asc, ascq));
5208
5209 if (skey == UNIT_ATTENTION)
5210 notDone++; /* repeat */
5211 else if ((skey == NOT_READY) &&
5212 (asc == 0x04)&&(ascq == 0x01)) {
5213 /* wait then repeat */
5214 mdelay (2000);
5215 notDone++;
5216 } else if ((skey == NOT_READY) && (asc == 0x3A)) {
5217 /* no medium, try read test anyway */
5218 notDone = 0;
5219 } else {
5220 /* All other errors are fatal.
5221 */
5222 ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.",
5223 ioc->name));
5224 goto target_done;
5225 }
5226 } else
5227 goto target_done;
5228 }
5229
5230 iocmd.cmd = READ_BUFFER;
5231 iocmd.data_dma = buf1_dma;
5232 iocmd.data = pbuf1;
5233 iocmd.size = 4;
5234 iocmd.flags |= MPT_ICFLAG_BUF_CAP;
5235
5236 dataBufSize = 0;
5237 echoBufSize = 0;
5238 for (patt = 0; patt < 2; patt++) {
5239 if (patt == 0)
5240 iocmd.flags |= MPT_ICFLAG_ECHO;
5241 else
5242 iocmd.flags &= ~MPT_ICFLAG_ECHO;
5243
5244 notDone = 1;
5245 while (notDone) {
5246 bufsize = 0;
5247
5248 /* If not ready after 8 trials,
5249 * give up on this device.
5250 */
5251 if (notDone > 8)
5252 goto target_done;
5253
5254 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5255 goto target_done;
5256 else if (hd->pLocal == NULL)
5257 goto target_done;
5258 else {
5259 rc = hd->pLocal->completion;
5260 ddvprintk(("ReadBuffer Comp Code %d", rc));
5261 ddvprintk((" buff: %0x %0x %0x %0x\n",
5262 pbuf1[0], pbuf1[1], pbuf1[2], pbuf1[3]));
5263
5264 if (rc == MPT_SCANDV_GOOD) {
5265 notDone = 0;
5266 if (iocmd.flags & MPT_ICFLAG_ECHO) {
5267 bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3];
5268 } else {
5269 bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3];
5270 }
5271 } else if (rc == MPT_SCANDV_SENSE) {
5272 u8 skey = hd->pLocal->sense[2] & 0x0F;
5273 u8 asc = hd->pLocal->sense[12];
5274 u8 ascq = hd->pLocal->sense[13];
5275 ddvprintk((MYIOC_s_INFO_FMT
5276 "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
5277 ioc->name, skey, asc, ascq));
5278 if (skey == ILLEGAL_REQUEST) {
5279 notDone = 0;
5280 } else if (skey == UNIT_ATTENTION) {
5281 notDone++; /* repeat */
5282 } else if ((skey == NOT_READY) &&
5283 (asc == 0x04)&&(ascq == 0x01)) {
5284 /* wait then repeat */
5285 mdelay (2000);
5286 notDone++;
5287 } else {
5288 /* All other errors are fatal.
5289 */
5290 ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.",
5291 ioc->name));
5292 goto target_done;
5293 }
5294 } else {
5295 /* All other errors are fatal
5296 */
5297 goto target_done;
5298 }
5299 }
5300 }
5301
5302 if (iocmd.flags & MPT_ICFLAG_ECHO)
5303 echoBufSize = bufsize;
5304 else
5305 dataBufSize = bufsize;
5306 }
5307 sz = 0;
5308 iocmd.flags &= ~MPT_ICFLAG_BUF_CAP;
5309
5310 /* Use echo buffers if possible,
5311 * Exit if both buffers are 0.
5312 */
5313 if (echoBufSize > 0) {
5314 iocmd.flags |= MPT_ICFLAG_ECHO;
5315 if (dataBufSize > 0)
5316 bufsize = min(echoBufSize, dataBufSize);
5317 else
5318 bufsize = echoBufSize;
5319 } else if (dataBufSize == 0)
5320 goto target_done;
5321
5322 ddvprintk((MYIOC_s_INFO_FMT "%s Buffer Capacity %d\n", ioc->name,
5323 (iocmd.flags & MPT_ICFLAG_ECHO) ? "Echo" : " ", bufsize));
5324
5325 /* Data buffers for write-read-compare test max 1K.
5326 */
5327 sz = min(bufsize, 1024);
5328
5329 /* --- loop ----
5330 * On first pass, always issue a reserve.
5331 * On additional loops, only if a reset has occurred.
5332 * iocmd.flags indicates if echo or regular buffer
5333 */
5334 for (patt = 0; patt < 4; patt++) {
5335 ddvprintk(("Pattern %d\n", patt));
5336 if ((iocmd.flags & MPT_ICFLAG_RESERVED) && (iocmd.flags & MPT_ICFLAG_DID_RESET)) {
5337 iocmd.cmd = TEST_UNIT_READY;
5338 iocmd.data_dma = -1;
5339 iocmd.data = NULL;
5340 iocmd.size = 0;
5341 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5342 goto target_done;
5343
5344 iocmd.cmd = RELEASE;
5345 iocmd.data_dma = -1;
5346 iocmd.data = NULL;
5347 iocmd.size = 0;
5348 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5349 goto target_done;
5350 else if (hd->pLocal == NULL)
5351 goto target_done;
5352 else {
5353 rc = hd->pLocal->completion;
5354 ddvprintk(("Release rc %d\n", rc));
5355 if (rc == MPT_SCANDV_GOOD)
5356 iocmd.flags &= ~MPT_ICFLAG_RESERVED;
5357 else
5358 goto target_done;
5359 }
5360 iocmd.flags &= ~MPT_ICFLAG_RESERVED;
5361 }
5362 iocmd.flags &= ~MPT_ICFLAG_DID_RESET;
5363
5364 repeat = 5;
5365 while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) {
5366 iocmd.cmd = RESERVE;
5367 iocmd.data_dma = -1;
5368 iocmd.data = NULL;
5369 iocmd.size = 0;
5370 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5371 goto target_done;
5372 else if (hd->pLocal == NULL)
5373 goto target_done;
5374 else {
5375 rc = hd->pLocal->completion;
5376 if (rc == MPT_SCANDV_GOOD) {
5377 iocmd.flags |= MPT_ICFLAG_RESERVED;
5378 } else if (rc == MPT_SCANDV_SENSE) {
5379 /* Wait if coming ready
5380 */
5381 u8 skey = hd->pLocal->sense[2] & 0x0F;
5382 u8 asc = hd->pLocal->sense[12];
5383 u8 ascq = hd->pLocal->sense[13];
5384 ddvprintk((MYIOC_s_INFO_FMT
5385 "DV: Reserve Failed: ", ioc->name));
5386 ddvprintk(("SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
5387 skey, asc, ascq));
5388
5389 if ((skey == NOT_READY) && (asc == 0x04)&&
5390 (ascq == 0x01)) {
5391 /* wait then repeat */
5392 mdelay (2000);
5393 notDone++;
5394 } else {
5395 ddvprintk((MYIOC_s_INFO_FMT
5396 "DV: Reserved Failed.", ioc->name));
5397 goto target_done;
5398 }
5399 } else {
5400 ddvprintk((MYIOC_s_INFO_FMT "DV: Reserved Failed.",
5401 ioc->name));
5402 goto target_done;
5403 }
5404 }
5405 }
5406
5407 mptscsih_fillbuf(pbuf1, sz, patt, 1);
5408 iocmd.cmd = WRITE_BUFFER;
5409 iocmd.data_dma = buf1_dma;
5410 iocmd.data = pbuf1;
5411 iocmd.size = sz;
5412 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5413 goto target_done;
5414 else if (hd->pLocal == NULL)
5415 goto target_done;
5416 else {
5417 rc = hd->pLocal->completion;
5418 if (rc == MPT_SCANDV_GOOD)
5419 ; /* Issue read buffer */
5420 else if (rc == MPT_SCANDV_DID_RESET) {
5421 /* If using echo buffers, reset to data buffers.
5422 * Else do Fallback and restart
5423 * this test (re-issue reserve
5424 * because of bus reset).
5425 */
5426 if ((iocmd.flags & MPT_ICFLAG_ECHO) && (dataBufSize >= bufsize)) {
5427 iocmd.flags &= ~MPT_ICFLAG_ECHO;
5428 } else {
5429 dv.cmd = MPT_FALLBACK;
5430 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
5431
5432 if (mpt_config(hd->ioc, &cfg) != 0)
5433 goto target_done;
5434
5435 if ((!dv.now.width) && (!dv.now.offset))
5436 goto target_done;
5437 }
5438
5439 iocmd.flags |= MPT_ICFLAG_DID_RESET;
5440 patt = -1;
5441 continue;
5442 } else if (rc == MPT_SCANDV_SENSE) {
5443 /* Restart data test if UA, else quit.
5444 */
5445 u8 skey = hd->pLocal->sense[2] & 0x0F;
5446 ddvprintk((MYIOC_s_INFO_FMT
5447 "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey,
5448 hd->pLocal->sense[12], hd->pLocal->sense[13]));
5449 if (skey == UNIT_ATTENTION) {
5450 patt = -1;
5451 continue;
5452 } else if (skey == ILLEGAL_REQUEST) {
5453 if (iocmd.flags & MPT_ICFLAG_ECHO) {
5454 if (dataBufSize >= bufsize) {
5455 iocmd.flags &= ~MPT_ICFLAG_ECHO;
5456 patt = -1;
5457 continue;
5458 }
5459 }
5460 goto target_done;
5461 }
5462 else
5463 goto target_done;
5464 } else {
5465 /* fatal error */
5466 goto target_done;
5467 }
5468 }
5469
5470 iocmd.cmd = READ_BUFFER;
5471 iocmd.data_dma = buf2_dma;
5472 iocmd.data = pbuf2;
5473 iocmd.size = sz;
5474 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5475 goto target_done;
5476 else if (hd->pLocal == NULL)
5477 goto target_done;
5478 else {
5479 rc = hd->pLocal->completion;
5480 if (rc == MPT_SCANDV_GOOD) {
5481 /* If buffers compare,
5482 * go to next pattern,
5483 * else, do a fallback and restart
5484 * data transfer test.
5485 */
5486 if (memcmp (pbuf1, pbuf2, sz) == 0) {
5487 ; /* goto next pattern */
5488 } else {
5489 /* Miscompare with Echo buffer, go to data buffer,
5490 * if that buffer exists.
5491 * Miscompare with Data buffer, check first 4 bytes,
5492 * some devices return capacity. Exit in this case.
5493 */
5494 if (iocmd.flags & MPT_ICFLAG_ECHO) {
5495 if (dataBufSize >= bufsize)
5496 iocmd.flags &= ~MPT_ICFLAG_ECHO;
5497 else
5498 goto target_done;
5499 } else {
5500 if (dataBufSize == (pbuf2[1]<<16 | pbuf2[2]<<8 | pbuf2[3])) {
5501 /* Argh. Device returning wrong data.
5502 * Quit DV for this device.
5503 */
5504 goto target_done;
5505 }
5506
5507 /* Had an actual miscompare. Slow down.*/
5508 dv.cmd = MPT_FALLBACK;
5509 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
5510
5511 if (mpt_config(hd->ioc, &cfg) != 0)
5512 goto target_done;
5513
5514 if ((!dv.now.width) && (!dv.now.offset))
5515 goto target_done;
5516 }
5517
5518 patt = -1;
5519 continue;
5520 }
5521 } else if (rc == MPT_SCANDV_DID_RESET) {
5522 /* Do Fallback and restart
5523 * this test (re-issue reserve
5524 * because of bus reset).
5525 */
5526 dv.cmd = MPT_FALLBACK;
5527 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
5528
5529 if (mpt_config(hd->ioc, &cfg) != 0)
5530 goto target_done;
5531
5532 if ((!dv.now.width) && (!dv.now.offset))
5533 goto target_done;
5534
5535 iocmd.flags |= MPT_ICFLAG_DID_RESET;
5536 patt = -1;
5537 continue;
5538 } else if (rc == MPT_SCANDV_SENSE) {
5539 /* Restart data test if UA, else quit.
5540 */
5541 u8 skey = hd->pLocal->sense[2] & 0x0F;
5542 ddvprintk((MYIOC_s_INFO_FMT
5543 "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey,
5544 hd->pLocal->sense[12], hd->pLocal->sense[13]));
5545 if (skey == UNIT_ATTENTION) {
5546 patt = -1;
5547 continue;
5548 }
5549 else
5550 goto target_done;
5551 } else {
5552 /* fatal error */
5553 goto target_done;
5554 }
5555 }
5556
5557 } /* --- end of patt loop ---- */
5558
5559target_done:
5560 if (iocmd.flags & MPT_ICFLAG_RESERVED) {
5561 iocmd.cmd = RELEASE;
5562 iocmd.data_dma = -1;
5563 iocmd.data = NULL;
5564 iocmd.size = 0;
5565 if (mptscsih_do_cmd(hd, &iocmd) < 0)
5566 printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d",
5567 ioc->name, id);
5568 else if (hd->pLocal) {
5569 if (hd->pLocal->completion == MPT_SCANDV_GOOD)
5570 iocmd.flags &= ~MPT_ICFLAG_RESERVED;
5571 } else {
5572 printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d",
5573 ioc->name, id);
5574 }
5575 }
5576
5577
5578 /* Set if cfg1_dma_addr contents is valid
5579 */
5580 if ((cfg.hdr != NULL) && (retcode == 0)){
5581 /* If disk, not U320, disable QAS
5582 */
5583 if ((inq0 == 0) && (dv.now.factor > MPT_ULTRA320)) {
5584 hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
5585 ddvprintk((MYIOC_s_NOTE_FMT
5586 "noQas set due to id=%d has factor=%x\n", ioc->name, id, dv.now.factor));
5587 }
5588
5589 dv.cmd = MPT_SAVE;
5590 mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
5591
5592 /* Double writes to SDP1 can cause problems,
5593 * skip save of the final negotiated settings to
5594 * SCSI device page 1.
5595 *
5596 cfg.hdr = &header1;
5597 cfg.physAddr = cfg1_dma_addr;
5598 cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
5599 cfg.dir = 1;
5600 mpt_config(hd->ioc, &cfg);
5601 */
5602 }
5603
5604 /* If this is a RAID Passthrough, enable internal IOs
5605 */
5606 if (iocmd.flags & MPT_ICFLAG_PHYS_DISK) {
5607 if (mptscsih_do_raid(hd, MPI_RAID_ACTION_ENABLE_PHYS_IO, &iocmd) < 0)
5608 ddvprintk((MYIOC_s_ERR_FMT "RAID Enable FAILED!\n", ioc->name));
5609 }
5610
5611 /* Done with the DV scan of the current target
5612 */
5613 if (pDvBuf)
5614 pci_free_consistent(ioc->pcidev, dv_alloc, pDvBuf, dvbuf_dma);
5615
5616 ddvtprintk((MYIOC_s_INFO_FMT "DV Done id=%d\n",
5617 ioc->name, id));
5618
5619 return retcode;
5620}
5621
5622/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5623/* mptscsih_dv_parms - perform a variety of operations on the
5624 * parameters used for negotiation.
5625 * @hd: Pointer to a SCSI host.
5626 * @dv: Pointer to a structure that contains the maximum and current
5627 * negotiated parameters.
5628 */
5629static void
5630mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5631{
5632 VirtDevice *pTarget;
5633 SCSIDevicePage0_t *pPage0;
5634 SCSIDevicePage1_t *pPage1;
5635 int val = 0, data, configuration;
5636 u8 width = 0;
5637 u8 offset = 0;
5638 u8 factor = 0;
5639 u8 negoFlags = 0;
5640 u8 cmd = dv->cmd;
5641 u8 id = dv->id;
5642
5643 switch (cmd) {
5644 case MPT_GET_NVRAM_VALS:
5645 ddvprintk((MYIOC_s_NOTE_FMT "Getting NVRAM: ",
5646 hd->ioc->name));
5647 /* Get the NVRAM values and save in tmax
5648 * If not an LVD bus, the adapter minSyncFactor has been
5649 * already throttled back.
5650 */
5651 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) {
5652 width = pTarget->maxWidth;
5653 offset = pTarget->maxOffset;
5654 factor = pTarget->minSyncFactor;
5655 negoFlags = pTarget->negoFlags;
5656 } else {
5657 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
5658 data = hd->ioc->spi_data.nvram[id];
5659 width = data & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
5660 if ((offset = hd->ioc->spi_data.maxSyncOffset) == 0)
5661 factor = MPT_ASYNC;
5662 else {
5663 factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
5664 if ((factor == 0) || (factor == MPT_ASYNC)){
5665 factor = MPT_ASYNC;
5666 offset = 0;
5667 }
5668 }
5669 } else {
5670 width = MPT_NARROW;
5671 offset = 0;
5672 factor = MPT_ASYNC;
5673 }
5674
5675 /* Set the negotiation flags */
5676 negoFlags = hd->ioc->spi_data.noQas;
5677 if (!width)
5678 negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
5679
5680 if (!offset)
5681 negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
5682 }
5683
5684 /* limit by adapter capabilities */
5685 width = min(width, hd->ioc->spi_data.maxBusWidth);
5686 offset = min(offset, hd->ioc->spi_data.maxSyncOffset);
5687 factor = max(factor, hd->ioc->spi_data.minSyncFactor);
5688
5689 /* Check Consistency */
5690 if (offset && (factor < MPT_ULTRA2) && !width)
5691 factor = MPT_ULTRA2;
5692
5693 dv->max.width = width;
5694 dv->max.offset = offset;
5695 dv->max.factor = factor;
5696 dv->max.flags = negoFlags;
5697 ddvprintk((" id=%d width=%d factor=%x offset=%x flags=%x\n",
5698 id, width, factor, offset, negoFlags));
5699 break;
5700
5701 case MPT_UPDATE_MAX:
5702 ddvprintk((MYIOC_s_NOTE_FMT
5703 "Updating with SDP0 Data: ", hd->ioc->name));
5704 /* Update tmax values with those from Device Page 0.*/
5705 pPage0 = (SCSIDevicePage0_t *) pPage;
5706 if (pPage0) {
5707 val = cpu_to_le32(pPage0->NegotiatedParameters);
5708 dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0;
5709 dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16;
5710 dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
5711 }
5712
5713 dv->now.width = dv->max.width;
5714 dv->now.offset = dv->max.offset;
5715 dv->now.factor = dv->max.factor;
5716 ddvprintk(("id=%d width=%d factor=%x offset=%x flags=%x\n",
5717 id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags));
5718 break;
5719
5720 case MPT_SET_MAX:
5721 ddvprintk((MYIOC_s_NOTE_FMT "Setting Max: ",
5722 hd->ioc->name));
5723 /* Set current to the max values. Update the config page.*/
5724 dv->now.width = dv->max.width;
5725 dv->now.offset = dv->max.offset;
5726 dv->now.factor = dv->max.factor;
5727 dv->now.flags = dv->max.flags;
5728
5729 pPage1 = (SCSIDevicePage1_t *)pPage;
5730 if (pPage1) {
5731 mptscsih_setDevicePage1Flags (dv->now.width, dv->now.factor,
5732 dv->now.offset, &val, &configuration, dv->now.flags);
5733 dnegoprintk(("Setting Max: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
5734 id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
5735 pPage1->RequestedParameters = le32_to_cpu(val);
5736 pPage1->Reserved = 0;
5737 pPage1->Configuration = le32_to_cpu(configuration);
5738 }
5739
5740 ddvprintk(("id=%d width=%d factor=%x offset=%x flags=%x request=%x configuration=%x\n",
5741 id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
5742 break;
5743
5744 case MPT_SET_MIN:
5745 ddvprintk((MYIOC_s_NOTE_FMT "Setting Min: ",
5746 hd->ioc->name));
5747 /* Set page to asynchronous and narrow
5748 * Do not update now, breaks fallback routine. */
5749 width = MPT_NARROW;
5750 offset = 0;
5751 factor = MPT_ASYNC;
5752 negoFlags = dv->max.flags;
5753
5754 pPage1 = (SCSIDevicePage1_t *)pPage;
5755 if (pPage1) {
5756 mptscsih_setDevicePage1Flags (width, factor,
5757 offset, &val, &configuration, negoFlags);
5758 dnegoprintk(("Setting Min: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
5759 id, width, factor, offset, negoFlags, val, configuration));
5760 pPage1->RequestedParameters = le32_to_cpu(val);
5761 pPage1->Reserved = 0;
5762 pPage1->Configuration = le32_to_cpu(configuration);
5763 }
5764 ddvprintk(("id=%d width=%d factor=%x offset=%x request=%x config=%x negoFlags=%x\n",
5765 id, width, factor, offset, val, configuration, negoFlags));
5766 break;
5767
5768 case MPT_FALLBACK:
5769 ddvprintk((MYIOC_s_NOTE_FMT
5770 "Fallback: Start: offset %d, factor %x, width %d \n",
5771 hd->ioc->name, dv->now.offset,
5772 dv->now.factor, dv->now.width));
5773 width = dv->now.width;
5774 offset = dv->now.offset;
5775 factor = dv->now.factor;
5776 if ((offset) && (dv->max.width)) {
5777 if (factor < MPT_ULTRA160)
5778 factor = MPT_ULTRA160;
5779 else if (factor < MPT_ULTRA2) {
5780 factor = MPT_ULTRA2;
5781 width = MPT_WIDE;
5782 } else if ((factor == MPT_ULTRA2) && width) {
5783 factor = MPT_ULTRA2;
5784 width = MPT_NARROW;
5785 } else if (factor < MPT_ULTRA) {
5786 factor = MPT_ULTRA;
5787 width = MPT_WIDE;
5788 } else if ((factor == MPT_ULTRA) && width) {
5789 width = MPT_NARROW;
5790 } else if (factor < MPT_FAST) {
5791 factor = MPT_FAST;
5792 width = MPT_WIDE;
5793 } else if ((factor == MPT_FAST) && width) {
5794 factor = MPT_FAST;
5795 width = MPT_NARROW;
5796 } else if (factor < MPT_SCSI) {
5797 factor = MPT_SCSI;
5798 width = MPT_WIDE;
5799 } else if ((factor == MPT_SCSI) && width) {
5800 factor = MPT_SCSI;
5801 width = MPT_NARROW;
5802 } else {
5803 factor = MPT_ASYNC;
5804 offset = 0;
5805 }
5806
5807 } else if (offset) {
5808 width = MPT_NARROW;
5809 if (factor < MPT_ULTRA)
5810 factor = MPT_ULTRA;
5811 else if (factor < MPT_FAST)
5812 factor = MPT_FAST;
5813 else if (factor < MPT_SCSI)
5814 factor = MPT_SCSI;
5815 else {
5816 factor = MPT_ASYNC;
5817 offset = 0;
5818 }
5819
5820 } else {
5821 width = MPT_NARROW;
5822 factor = MPT_ASYNC;
5823 }
5824 dv->max.flags |= MPT_TARGET_NO_NEGO_QAS;
5825 dv->max.flags &= ~MPT_TAPE_NEGO_IDP;
5826
5827 dv->now.width = width;
5828 dv->now.offset = offset;
5829 dv->now.factor = factor;
5830 dv->now.flags = dv->max.flags;
5831
5832 pPage1 = (SCSIDevicePage1_t *)pPage;
5833 if (pPage1) {
5834 mptscsih_setDevicePage1Flags (width, factor, offset, &val,
5835 &configuration, dv->now.flags);
5836 dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x flags=%x request=%x config=%x\n",
5837 id, width, offset, factor, dv->now.flags, val, configuration));
5838
5839 pPage1->RequestedParameters = le32_to_cpu(val);
5840 pPage1->Reserved = 0;
5841 pPage1->Configuration = le32_to_cpu(configuration);
5842 }
5843
5844 ddvprintk(("Finish: id=%d offset=%d factor=%x width=%d request=%x config=%x\n",
5845 id, dv->now.offset, dv->now.factor, dv->now.width, val, configuration));
5846 break;
5847
5848 case MPT_SAVE:
5849 ddvprintk((MYIOC_s_NOTE_FMT
5850 "Saving to Target structure: ", hd->ioc->name));
5851 ddvprintk(("id=%d width=%x factor=%x offset=%d flags=%x\n",
5852 id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags));
5853
5854 /* Save these values to target structures
5855 * or overwrite nvram (phys disks only).
5856 */
5857
5858 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume ) {
5859 pTarget->maxWidth = dv->now.width;
5860 pTarget->maxOffset = dv->now.offset;
5861 pTarget->minSyncFactor = dv->now.factor;
5862 pTarget->negoFlags = dv->now.flags;
5863 } else {
5864 /* Preserv all flags, use
5865 * read-modify-write algorithm
5866 */
5867 if (hd->ioc->spi_data.nvram) {
5868 data = hd->ioc->spi_data.nvram[id];
5869
5870 if (dv->now.width)
5871 data &= ~MPT_NVRAM_WIDE_DISABLE;
5872 else
5873 data |= MPT_NVRAM_WIDE_DISABLE;
5874
5875 if (!dv->now.offset)
5876 factor = MPT_ASYNC;
5877
5878 data &= ~MPT_NVRAM_SYNC_MASK;
5879 data |= (dv->now.factor << MPT_NVRAM_SYNC_SHIFT) & MPT_NVRAM_SYNC_MASK;
5880
5881 hd->ioc->spi_data.nvram[id] = data;
5882 }
5883 }
5884 break;
5885 }
5886}
5887
5888/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5889/* mptscsih_fillbuf - fill a buffer with a special data pattern
5890 * cleanup. For bus scan only.
5891 *
5892 * @buffer: Pointer to data buffer to be filled.
5893 * @size: Number of bytes to fill
5894 * @index: Pattern index
5895 * @width: bus width, 0 (8 bits) or 1 (16 bits)
5896 */
5897static void
5898mptscsih_fillbuf(char *buffer, int size, int index, int width)
5899{
5900 char *ptr = buffer;
5901 int ii;
5902 char byte;
5903 short val;
5904
5905 switch (index) {
5906 case 0:
5907
5908 if (width) {
5909 /* Pattern: 0000 FFFF 0000 FFFF
5910 */
5911 for (ii=0; ii < size; ii++, ptr++) {
5912 if (ii & 0x02)
5913 *ptr = 0xFF;
5914 else
5915 *ptr = 0x00;
5916 }
5917 } else {
5918 /* Pattern: 00 FF 00 FF
5919 */
5920 for (ii=0; ii < size; ii++, ptr++) {
5921 if (ii & 0x01)
5922 *ptr = 0xFF;
5923 else
5924 *ptr = 0x00;
5925 }
5926 }
5927 break;
5928
5929 case 1:
5930 if (width) {
5931 /* Pattern: 5555 AAAA 5555 AAAA 5555
5932 */
5933 for (ii=0; ii < size; ii++, ptr++) {
5934 if (ii & 0x02)
5935 *ptr = 0xAA;
5936 else
5937 *ptr = 0x55;
5938 }
5939 } else {
5940 /* Pattern: 55 AA 55 AA 55
5941 */
5942 for (ii=0; ii < size; ii++, ptr++) {
5943 if (ii & 0x01)
5944 *ptr = 0xAA;
5945 else
5946 *ptr = 0x55;
5947 }
5948 }
5949 break;
5950
5951 case 2:
5952 /* Pattern: 00 01 02 03 04 05
5953 * ... FE FF 00 01..
5954 */
5955 for (ii=0; ii < size; ii++, ptr++)
5956 *ptr = (char) ii;
5957 break;
5958
5959 case 3:
5960 if (width) {
5961 /* Wide Pattern: FFFE 0001 FFFD 0002
5962 * ... 4000 DFFF 8000 EFFF
5963 */
5964 byte = 0;
5965 for (ii=0; ii < size/2; ii++) {
5966 /* Create the base pattern
5967 */
5968 val = (1 << byte);
5969 /* every 64 (0x40) bytes flip the pattern
5970 * since we fill 2 bytes / iteration,
5971 * test for ii = 0x20
5972 */
5973 if (ii & 0x20)
5974 val = ~(val);
5975
5976 if (ii & 0x01) {
5977 *ptr = (char)( (val & 0xFF00) >> 8);
5978 ptr++;
5979 *ptr = (char)(val & 0xFF);
5980 byte++;
5981 byte &= 0x0F;
5982 } else {
5983 val = ~val;
5984 *ptr = (char)( (val & 0xFF00) >> 8);
5985 ptr++;
5986 *ptr = (char)(val & 0xFF);
5987 }
5988
5989 ptr++;
5990 }
5991 } else {
5992 /* Narrow Pattern: FE 01 FD 02 FB 04
5993 * .. 7F 80 01 FE 02 FD ... 80 7F
5994 */
5995 byte = 0;
5996 for (ii=0; ii < size; ii++, ptr++) {
5997 /* Base pattern - first 32 bytes
5998 */
5999 if (ii & 0x01) {
6000 *ptr = (1 << byte);
6001 byte++;
6002 byte &= 0x07;
6003 } else {
6004 *ptr = (char) (~(1 << byte));
6005 }
6006
6007 /* Flip the pattern every 32 bytes
6008 */
6009 if (ii & 0x20)
6010 *ptr = ~(*ptr);
6011 }
6012 }
6013 break;
6014 }
6015}
6016#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */
6017
6018/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6019
6020module_init(mptscsih_init);
6021module_exit(mptscsih_exit);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
new file mode 100644
index 000000000000..5cb2fd45c38f
--- /dev/null
+++ b/drivers/message/fusion/mptscsih.h
@@ -0,0 +1,94 @@
1/*
2 * linux/drivers/message/fusion/mptscsih.h
3 * High performance SCSI / Fibre Channel SCSI Host device driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7 *
8 * Credits:
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
11 *
12 * A huge debt of gratitude is owed to David S. Miller (DaveM)
13 * for fixing much of the stupid and broken stuff in the early
14 * driver while porting to sparc64 platform. THANK YOU!
15 *
16 * (see also mptbase.c)
17 *
18 * Copyright (c) 1999-2004 LSI Logic Corporation
19 * Originally By: Steven J. Ralston
20 * (mailto:netscape.net)
21 * (mailto:mpt_linux_developer@lsil.com)
22 *
23 * $Id: mptscsih.h,v 1.21 2002/12/03 21:26:35 pdelaney Exp $
24 */
25/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
26/*
27 This program is free software; you can redistribute it and/or modify
28 it under the terms of the GNU General Public License as published by
29 the Free Software Foundation; version 2 of the License.
30
31 This program is distributed in the hope that it will be useful,
32 but WITHOUT ANY WARRANTY; without even the implied warranty of
33 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 GNU General Public License for more details.
35
36 NO WARRANTY
37 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
38 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
39 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
40 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
41 solely responsible for determining the appropriateness of using and
42 distributing the Program and assumes all risks associated with its
43 exercise of rights under this Agreement, including but not limited to
44 the risks and costs of program errors, damage to or loss of data,
45 programs or equipment, and unavailability or interruption of operations.
46
47 DISCLAIMER OF LIABILITY
48 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
49 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
51 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
52 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
53 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
54 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
55
56 You should have received a copy of the GNU General Public License
57 along with this program; if not, write to the Free Software
58 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
59*/
60
61#ifndef SCSIHOST_H_INCLUDED
62#define SCSIHOST_H_INCLUDED
63
64/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
65/*
66 * SCSI Public stuff...
67 */
68
69#define MPT_SCSI_CMD_PER_DEV_HIGH 31
70#define MPT_SCSI_CMD_PER_DEV_LOW 7
71
72#define MPT_SCSI_CMD_PER_LUN 7
73
74#define MPT_SCSI_MAX_SECTORS 8192
75
76/* To disable domain validation, uncomment the
77 * following line. No effect for FC devices.
78 * For SCSI devices, driver will negotiate to
79 * NVRAM settings (if available) or to maximum adapter
80 * capabilities.
81 */
82
83#define MPTSCSIH_ENABLE_DOMAIN_VALIDATION
84
85
86/* SCSI driver setup structure. Settings can be overridden
87 * by command line options.
88 */
89#define MPTSCSIH_DOMAIN_VALIDATION 1
90#define MPTSCSIH_MAX_WIDTH 1
91#define MPTSCSIH_MIN_SYNC 0x08
92#define MPTSCSIH_SAF_TE 0
93
94#endif
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
new file mode 100644
index 000000000000..8d132b0d6b12
--- /dev/null
+++ b/drivers/message/i2o/Kconfig
@@ -0,0 +1,75 @@
1
2menu "I2O device support"
3
4config I2O
5 tristate "I2O support"
6 depends on PCI
7 ---help---
8 The Intelligent Input/Output (I2O) architecture allows hardware
9 drivers to be split into two parts: an operating system specific
10 module called the OSM and an hardware specific module called the
11 HDM. The OSM can talk to a whole range of HDM's, and ideally the
12 HDM's are not OS dependent. This allows for the same HDM driver to
13 be used under different operating systems if the relevant OSM is in
14 place. In order for this to work, you need to have an I2O interface
15 adapter card in your computer. This card contains a special I/O
16 processor (IOP), thus allowing high speeds since the CPU does not
17 have to deal with I/O.
18
19 If you say Y here, you will get a choice of interface adapter
20 drivers and OSM's with the following questions.
21
22 To compile this support as a module, choose M here: the
23 modules will be called i2o_core.
24
25 If unsure, say N.
26
27config I2O_CONFIG
28 tristate "I2O Configuration support"
29 depends on PCI && I2O
30 help
31 Say Y for support of the configuration interface for the I2O adapters.
32 If you have a RAID controller from Adaptec and you want to use the
33 raidutils to manage your RAID array, you have to say Y here.
34
35 To compile this support as a module, choose M here: the
36 module will be called i2o_config.
37
38config I2O_BLOCK
39 tristate "I2O Block OSM"
40 depends on I2O
41 help
42 Include support for the I2O Block OSM. The Block OSM presents disk
43 and other structured block devices to the operating system. If you
44 are using an RAID controller, you could access the array only by
45 the Block OSM driver. But it is possible to access the single disks
46 by the SCSI OSM driver, for example to monitor the disks.
47
48 To compile this support as a module, choose M here: the
49 module will be called i2o_block.
50
51config I2O_SCSI
52 tristate "I2O SCSI OSM"
53 depends on I2O && SCSI
54 help
55 Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel
56 I2O controller. You can use both the SCSI and Block OSM together if
57 you wish. To access a RAID array, you must use the Block OSM driver.
58 But you could use the SCSI OSM driver to monitor the single disks.
59
60 To compile this support as a module, choose M here: the
61 module will be called i2o_scsi.
62
63config I2O_PROC
64 tristate "I2O /proc support"
65 depends on I2O
66 help
67 If you say Y here and to "/proc file system support", you will be
68 able to read I2O related information from the virtual directory
69 /proc/i2o.
70
71 To compile this support as a module, choose M here: the
72 module will be called i2o_proc.
73
74endmenu
75
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile
new file mode 100644
index 000000000000..aabc6cdc3fce
--- /dev/null
+++ b/drivers/message/i2o/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the kernel I2O OSM.
3#
4# Note : at this point, these files are compiled on all systems.
5# In the future, some of these should be built conditionally.
6#
7
8i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o
9obj-$(CONFIG_I2O) += i2o_core.o
10obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o
11obj-$(CONFIG_I2O_BLOCK) += i2o_block.o
12obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o
13obj-$(CONFIG_I2O_PROC) += i2o_proc.o
diff --git a/drivers/message/i2o/README b/drivers/message/i2o/README
new file mode 100644
index 000000000000..a81f851f7b5d
--- /dev/null
+++ b/drivers/message/i2o/README
@@ -0,0 +1,98 @@
1
2 Linux I2O Support (c) Copyright 1999 Red Hat Software
3 and others.
4
5 This program is free software; you can redistribute it and/or
6 modify it under the terms of the GNU General Public License
7 as published by the Free Software Foundation; either version
8 2 of the License, or (at your option) any later version.
9
10AUTHORS (so far)
11
12Alan Cox, Building Number Three Ltd.
13 Core code, SCSI and Block OSMs
14
15Steve Ralston, LSI Logic Corp.
16 Debugging SCSI and Block OSM
17
18Deepak Saxena, Intel Corp.
19 Various core/block extensions
20 /proc interface, bug fixes
21 Ioctl interfaces for control
22 Debugging LAN OSM
23
24Philip Rumpf
25 Fixed assorted dumb SMP locking bugs
26
27Juha Sievanen, University of Helsinki Finland
28 LAN OSM code
29 /proc interface to LAN class
30 Bug fixes
31 Core code extensions
32
33Auvo Häkkinen, University of Helsinki Finland
34 LAN OSM code
35 /Proc interface to LAN class
36 Bug fixes
37 Core code extensions
38
39Taneli Vähäkangas, University of Helsinki Finland
40 Fixes to i2o_config
41
42CREDITS
43
44 This work was made possible by
45
46Red Hat Software
47 Funding for the Building #3 part of the project
48
49Symbios Logic (Now LSI)
50 Host adapters, hints, known to work platforms when I hit
51 compatibility problems
52
53BoxHill Corporation
54 Loan of initial FibreChannel disk array used for development work.
55
56European Comission
57 Funding the work done by the University of Helsinki
58
59SysKonnect
60 Loan of FDDI and Gigabit Ethernet cards
61
62ASUSTeK
63 Loan of I2O motherboard
64
65STATUS:
66
67o The core setup works within limits.
68o The scsi layer seems to almost work.
69 I'm still chasing down the hang bug.
70o The block OSM is mostly functional
71o LAN OSM works with FDDI and Ethernet cards.
72
73TO DO:
74
75General:
76o Provide hidden address space if asked
77o Long term message flow control
78o PCI IOP's without interrupts are not supported yet
79o Push FAIL handling into the core
80o DDM control interfaces for module load etc
81o Add I2O 2.0 support (Deffered to 2.5 kernel)
82
83Block:
84o Multiple major numbers
85o Read ahead and cache handling stuff. Talk to Ingo and people
86o Power management
87o Finish Media changers
88
89SCSI:
90o Find the right way to associate drives/luns/busses
91
92Lan:
93o Performance tuning
94o Test Fibre Channel code
95
96Tape:
97o Anyone seen anything implementing this ?
98 (D.S: Will attempt to do so if spare cycles permit)
diff --git a/drivers/message/i2o/README.ioctl b/drivers/message/i2o/README.ioctl
new file mode 100644
index 000000000000..73dd084c0e98
--- /dev/null
+++ b/drivers/message/i2o/README.ioctl
@@ -0,0 +1,394 @@
1
2Linux I2O User Space Interface
3rev 0.3 - 04/20/99
4
5=============================================================================
6Originally written by Deepak Saxena(deepak@plexity.net)
7Currently maintained by Deepak Saxena(deepak@plexity.net)
8=============================================================================
9
10I. Introduction
11
12The Linux I2O subsystem provides a set of ioctl() commands that can be
13utilized by user space applications to communicate with IOPs and devices
14on individual IOPs. This document defines the specific ioctl() commands
15that are available to the user and provides examples of their uses.
16
17This document assumes the reader is familiar with or has access to the
18I2O specification as no I2O message parameters are outlined. For information
19on the specification, see http://www.i2osig.org
20
21This document and the I2O user space interface are currently maintained
22by Deepak Saxena. Please send all comments, errata, and bug fixes to
23deepak@csociety.purdue.edu
24
25II. IOP Access
26
27Access to the I2O subsystem is provided through the device file named
28/dev/i2o/ctl. This file is a character file with major number 10 and minor
29number 166. It can be created through the following command:
30
31 mknod /dev/i2o/ctl c 10 166
32
33III. Determining the IOP Count
34
35 SYNOPSIS
36
37 ioctl(fd, I2OGETIOPS, int *count);
38
39 u8 count[MAX_I2O_CONTROLLERS];
40
41 DESCRIPTION
42
43 This function returns the system's active IOP table. count should
44 point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon
45 returning, each entry will contain a non-zero value if the given
46 IOP unit is active, and NULL if it is inactive or non-existent.
47
48 RETURN VALUE.
49
50 Returns 0 if no errors occur, and -1 otherwise. If an error occurs,
51 errno is set appropriately:
52
53 EFAULT Invalid user space pointer was passed
54
55IV. Getting Hardware Resource Table
56
57 SYNOPSIS
58
59 ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt);
60
61 struct i2o_cmd_hrtlct
62 {
63 u32 iop; /* IOP unit number */
64 void *resbuf; /* Buffer for result */
65 u32 *reslen; /* Buffer length in bytes */
66 };
67
68 DESCRIPTION
69
70 This function returns the Hardware Resource Table of the IOP specified
71 by hrt->iop in the buffer pointed to by hrt->resbuf. The actual size of
72 the data is written into *(hrt->reslen).
73
74 RETURNS
75
76 This function returns 0 if no errors occur. If an error occurs, -1
77 is returned and errno is set appropriately:
78
79 EFAULT Invalid user space pointer was passed
80 ENXIO Invalid IOP number
81 ENOBUFS Buffer not large enough. If this occurs, the required
82 buffer length is written into *(hrt->reslen)
83
84V. Getting Logical Configuration Table
85
86 SYNOPSIS
87
88 ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct);
89
90 struct i2o_cmd_hrtlct
91 {
92 u32 iop; /* IOP unit number */
93 void *resbuf; /* Buffer for result */
94 u32 *reslen; /* Buffer length in bytes */
95 };
96
97 DESCRIPTION
98
99 This function returns the Logical Configuration Table of the IOP specified
100 by lct->iop in the buffer pointed to by lct->resbuf. The actual size of
101 the data is written into *(lct->reslen).
102
103 RETURNS
104
105 This function returns 0 if no errors occur. If an error occurs, -1
106 is returned and errno is set appropriately:
107
108 EFAULT Invalid user space pointer was passed
109 ENXIO Invalid IOP number
110 ENOBUFS Buffer not large enough. If this occurs, the required
111 buffer length is written into *(lct->reslen)
112
113VI. Settting Parameters
114
115 SYNOPSIS
116
117 ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops);
118
119 struct i2o_cmd_psetget
120 {
121 u32 iop; /* IOP unit number */
122 u32 tid; /* Target device TID */
123 void *opbuf; /* Operation List buffer */
124 u32 oplen; /* Operation List buffer length in bytes */
125 void *resbuf; /* Result List buffer */
126 u32 *reslen; /* Result List buffer length in bytes */
127 };
128
129 DESCRIPTION
130
131 This function posts a UtilParamsSet message to the device identified
132 by ops->iop and ops->tid. The operation list for the message is
133 sent through the ops->opbuf buffer, and the result list is written
134 into the buffer pointed to by ops->resbuf. The number of bytes
135 written is placed into *(ops->reslen).
136
137 RETURNS
138
139 The return value is the size in bytes of the data written into
140 ops->resbuf if no errors occur. If an error occurs, -1 is returned
141 and errno is set appropriatly:
142
143 EFAULT Invalid user space pointer was passed
144 ENXIO Invalid IOP number
145 ENOBUFS Buffer not large enough. If this occurs, the required
146 buffer length is written into *(ops->reslen)
147 ETIMEDOUT Timeout waiting for reply message
148 ENOMEM Kernel memory allocation error
149
150 A return value of 0 does not mean that the value was actually
151 changed properly on the IOP. The user should check the result
152 list to determine the specific status of the transaction.
153
154VII. Getting Parameters
155
156 SYNOPSIS
157
158 ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops);
159
160 struct i2o_parm_setget
161 {
162 u32 iop; /* IOP unit number */
163 u32 tid; /* Target device TID */
164 void *opbuf; /* Operation List buffer */
165 u32 oplen; /* Operation List buffer length in bytes */
166 void *resbuf; /* Result List buffer */
167 u32 *reslen; /* Result List buffer length in bytes */
168 };
169
170 DESCRIPTION
171
172 This function posts a UtilParamsGet message to the device identified
173 by ops->iop and ops->tid. The operation list for the message is
174 sent through the ops->opbuf buffer, and the result list is written
175 into the buffer pointed to by ops->resbuf. The actual size of data
176 written is placed into *(ops->reslen).
177
178 RETURNS
179
180 EFAULT Invalid user space pointer was passed
181 ENXIO Invalid IOP number
182 ENOBUFS Buffer not large enough. If this occurs, the required
183 buffer length is written into *(ops->reslen)
184 ETIMEDOUT Timeout waiting for reply message
185 ENOMEM Kernel memory allocation error
186
187 A return value of 0 does not mean that the value was actually
188 properly retreived. The user should check the result list
189 to determine the specific status of the transaction.
190
191VIII. Downloading Software
192
193 SYNOPSIS
194
195 ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw);
196
197 struct i2o_sw_xfer
198 {
199 u32 iop; /* IOP unit number */
200 u8 flags; /* DownloadFlags field */
201 u8 sw_type; /* Software type */
202 u32 sw_id; /* Software ID */
203 void *buf; /* Pointer to software buffer */
204 u32 *swlen; /* Length of software buffer */
205 u32 *maxfrag; /* Number of fragments */
206 u32 *curfrag; /* Current fragment number */
207 };
208
209 DESCRIPTION
210
211 This function downloads a software fragment pointed by sw->buf
212 to the iop identified by sw->iop. The DownloadFlags, SwID, SwType
213 and SwSize fields of the ExecSwDownload message are filled in with
214 the values of sw->flags, sw->sw_id, sw->sw_type and *(sw->swlen).
215
216 The fragments _must_ be sent in order and be 8K in size. The last
217 fragment _may_ be shorter, however. The kernel will compute its
218 size based on information in the sw->swlen field.
219
220 Please note that SW transfers can take a long time.
221
222 RETURNS
223
224 This function returns 0 no errors occur. If an error occurs, -1
225 is returned and errno is set appropriatly:
226
227 EFAULT Invalid user space pointer was passed
228 ENXIO Invalid IOP number
229 ETIMEDOUT Timeout waiting for reply message
230 ENOMEM Kernel memory allocation error
231
232IX. Uploading Software
233
234 SYNOPSIS
235
236 ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw);
237
238 struct i2o_sw_xfer
239 {
240 u32 iop; /* IOP unit number */
241 u8 flags; /* UploadFlags */
242 u8 sw_type; /* Software type */
243 u32 sw_id; /* Software ID */
244 void *buf; /* Pointer to software buffer */
245 u32 *swlen; /* Length of software buffer */
246 u32 *maxfrag; /* Number of fragments */
247 u32 *curfrag; /* Current fragment number */
248 };
249
250 DESCRIPTION
251
252 This function uploads a software fragment from the IOP identified
253 by sw->iop, sw->sw_type, sw->sw_id and optionally sw->swlen fields.
254 The UploadFlags, SwID, SwType and SwSize fields of the ExecSwUpload
255 message are filled in with the values of sw->flags, sw->sw_id,
256 sw->sw_type and *(sw->swlen).
257
258 The fragments _must_ be requested in order and be 8K in size. The
259 user is responsible for allocating memory pointed by sw->buf. The
260 last fragment _may_ be shorter.
261
262 Please note that SW transfers can take a long time.
263
264 RETURNS
265
266 This function returns 0 if no errors occur. If an error occurs, -1
267 is returned and errno is set appropriatly:
268
269 EFAULT Invalid user space pointer was passed
270 ENXIO Invalid IOP number
271 ETIMEDOUT Timeout waiting for reply message
272 ENOMEM Kernel memory allocation error
273
274X. Removing Software
275
276 SYNOPSIS
277
278 ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw);
279
280 struct i2o_sw_xfer
281 {
282 u32 iop; /* IOP unit number */
283 u8 flags; /* RemoveFlags */
284 u8 sw_type; /* Software type */
285 u32 sw_id; /* Software ID */
286 void *buf; /* Unused */
287 u32 *swlen; /* Length of the software data */
288 u32 *maxfrag; /* Unused */
289 u32 *curfrag; /* Unused */
290 };
291
292 DESCRIPTION
293
294 This function removes software from the IOP identified by sw->iop.
295 The RemoveFlags, SwID, SwType and SwSize fields of the ExecSwRemove message
296 are filled in with the values of sw->flags, sw->sw_id, sw->sw_type and
297 *(sw->swlen). Give zero in *(sw->len) if the value is unknown. IOP uses
298 *(sw->swlen) value to verify correct identication of the module to remove.
299 The actual size of the module is written into *(sw->swlen).
300
301 RETURNS
302
303 This function returns 0 if no errors occur. If an error occurs, -1
304 is returned and errno is set appropriatly:
305
306 EFAULT Invalid user space pointer was passed
307 ENXIO Invalid IOP number
308 ETIMEDOUT Timeout waiting for reply message
309 ENOMEM Kernel memory allocation error
310
311X. Validating Configuration
312
313 SYNOPSIS
314
315 ioctl(fd, I2OVALIDATE, int *iop);
316 u32 iop;
317
318 DESCRIPTION
319
320 This function posts an ExecConfigValidate message to the controller
321 identified by iop. This message indicates that the current
322 configuration is accepted. The iop changes the status of suspect drivers
323 to valid and may delete old drivers from its store.
324
325 RETURNS
326
327 This function returns 0 if no erro occur. If an error occurs, -1 is
328 returned and errno is set appropriatly:
329
330 ETIMEDOUT Timeout waiting for reply message
331 ENXIO Invalid IOP number
332
333XI. Configuration Dialog
334
335 SYNOPSIS
336
337 ioctl(fd, I2OHTML, struct i2o_html *htquery);
338 struct i2o_html
339 {
340 u32 iop; /* IOP unit number */
341 u32 tid; /* Target device ID */
342 u32 page; /* HTML page */
343 void *resbuf; /* Buffer for reply HTML page */
344 u32 *reslen; /* Length in bytes of reply buffer */
345 void *qbuf; /* Pointer to HTTP query string */
346 u32 qlen; /* Length in bytes of query string buffer */
347 };
348
349 DESCRIPTION
350
351 This function posts an UtilConfigDialog message to the device identified
352 by htquery->iop and htquery->tid. The requested HTML page number is
353 provided by the htquery->page field, and the resultant data is stored
354 in the buffer pointed to by htquery->resbuf. If there is an HTTP query
355 string that is to be sent to the device, it should be sent in the buffer
356 pointed to by htquery->qbuf. If there is no query string, this field
357 should be set to NULL. The actual size of the reply received is written
358 into *(htquery->reslen).
359
360 RETURNS
361
362 This function returns 0 if no error occur. If an error occurs, -1
363 is returned and errno is set appropriatly:
364
365 EFAULT Invalid user space pointer was passed
366 ENXIO Invalid IOP number
367 ENOBUFS Buffer not large enough. If this occurs, the required
368 buffer length is written into *(ops->reslen)
369 ETIMEDOUT Timeout waiting for reply message
370 ENOMEM Kernel memory allocation error
371
372XII. Events
373
374 In the process of determining this. Current idea is to have use
375 the select() interface to allow user apps to periodically poll
376 the /dev/i2o/ctl device for events. When select() notifies the user
377 that an event is available, the user would call read() to retrieve
378 a list of all the events that are pending for the specific device.
379
380=============================================================================
381Revision History
382=============================================================================
383
384Rev 0.1 - 04/01/99
385- Initial revision
386
387Rev 0.2 - 04/06/99
388- Changed return values to match UNIX ioctl() standard. Only return values
389 are 0 and -1. All errors are reported through errno.
390- Added summary of proposed possible event interfaces
391
392Rev 0.3 - 04/20/99
393- Changed all ioctls() to use pointers to user data instead of actual data
394- Updated error values to match the code
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
new file mode 100644
index 000000000000..2a5d478fc60e
--- /dev/null
+++ b/drivers/message/i2o/debug.c
@@ -0,0 +1,481 @@
1#include <linux/config.h>
2#include <linux/module.h>
3#include <linux/kernel.h>
4#include <linux/pci.h>
5#include <linux/i2o.h>
6
7extern struct i2o_driver **i2o_drivers;
8extern unsigned int i2o_max_drivers;
9static void i2o_report_util_cmd(u8 cmd);
10static void i2o_report_exec_cmd(u8 cmd);
11static void i2o_report_fail_status(u8 req_status, u32 * msg);
12static void i2o_report_common_status(u8 req_status);
13static void i2o_report_common_dsc(u16 detailed_status);
14
15/*
16 * Used for error reporting/debugging purposes.
17 * Report Cmd name, Request status, Detailed Status.
18 */
19void i2o_report_status(const char *severity, const char *str,
20 struct i2o_message *m)
21{
22 u32 *msg = (u32 *) m;
23 u8 cmd = (msg[1] >> 24) & 0xFF;
24 u8 req_status = (msg[4] >> 24) & 0xFF;
25 u16 detailed_status = msg[4] & 0xFFFF;
26 //struct i2o_driver *h = i2o_drivers[msg[2] & (i2o_max_drivers-1)];
27
28 if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
29 return; // No status in this reply
30
31 printk(KERN_DEBUG "%s%s: ", severity, str);
32
33 if (cmd < 0x1F) // Utility cmd
34 i2o_report_util_cmd(cmd);
35
36 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
37 i2o_report_exec_cmd(cmd);
38 else
39 printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); // Other cmds
40
41 if (msg[0] & MSG_FAIL) {
42 i2o_report_fail_status(req_status, msg);
43 return;
44 }
45
46 i2o_report_common_status(req_status);
47
48 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
49 i2o_report_common_dsc(detailed_status);
50 else
51 printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
52 detailed_status);
53}
54
55/* Used to dump a message to syslog during debugging */
56void i2o_dump_message(struct i2o_message *m)
57{
58#ifdef DEBUG
59 u32 *msg = (u32 *) m;
60 int i;
61 printk(KERN_INFO "Dumping I2O message size %d @ %p\n",
62 msg[0] >> 16 & 0xffff, msg);
63 for (i = 0; i < ((msg[0] >> 16) & 0xffff); i++)
64 printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]);
65#endif
66}
67
68/*
69 * Used for error reporting/debugging purposes.
70 * Following fail status are common to all classes.
71 * The preserved message must be handled in the reply handler.
72 */
73static void i2o_report_fail_status(u8 req_status, u32 * msg)
74{
75 static char *FAIL_STATUS[] = {
76 "0x80", /* not used */
77 "SERVICE_SUSPENDED", /* 0x81 */
78 "SERVICE_TERMINATED", /* 0x82 */
79 "CONGESTION",
80 "FAILURE",
81 "STATE_ERROR",
82 "TIME_OUT",
83 "ROUTING_FAILURE",
84 "INVALID_VERSION",
85 "INVALID_OFFSET",
86 "INVALID_MSG_FLAGS",
87 "FRAME_TOO_SMALL",
88 "FRAME_TOO_LARGE",
89 "INVALID_TARGET_ID",
90 "INVALID_INITIATOR_ID",
91 "INVALID_INITIATOR_CONTEX", /* 0x8F */
92 "UNKNOWN_FAILURE" /* 0xFF */
93 };
94
95 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
96 printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.",
97 req_status);
98 else
99 printk(KERN_DEBUG "TRANSPORT_%s.\n",
100 FAIL_STATUS[req_status & 0x0F]);
101
102 /* Dump some details */
103
104 printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n",
105 (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
106 printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
107 (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
108 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
109 msg[5] >> 16, msg[5] & 0xFFF);
110
111 printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
112 if (msg[4] & (1 << 16))
113 printk(KERN_DEBUG "(FormatError), "
114 "this msg can never be delivered/processed.\n");
115 if (msg[4] & (1 << 17))
116 printk(KERN_DEBUG "(PathError), "
117 "this msg can no longer be delivered/processed.\n");
118 if (msg[4] & (1 << 18))
119 printk(KERN_DEBUG "(PathState), "
120 "the system state does not allow delivery.\n");
121 if (msg[4] & (1 << 19))
122 printk(KERN_DEBUG
123 "(Congestion), resources temporarily not available;"
124 "do not retry immediately.\n");
125}
126
127/*
128 * Used for error reporting/debugging purposes.
129 * Following reply status are common to all classes.
130 */
131static void i2o_report_common_status(u8 req_status)
132{
133 static char *REPLY_STATUS[] = {
134 "SUCCESS",
135 "ABORT_DIRTY",
136 "ABORT_NO_DATA_TRANSFER",
137 "ABORT_PARTIAL_TRANSFER",
138 "ERROR_DIRTY",
139 "ERROR_NO_DATA_TRANSFER",
140 "ERROR_PARTIAL_TRANSFER",
141 "PROCESS_ABORT_DIRTY",
142 "PROCESS_ABORT_NO_DATA_TRANSFER",
143 "PROCESS_ABORT_PARTIAL_TRANSFER",
144 "TRANSACTION_ERROR",
145 "PROGRESS_REPORT"
146 };
147
148 if (req_status >= ARRAY_SIZE(REPLY_STATUS))
149 printk(KERN_DEBUG "RequestStatus = %0#2x", req_status);
150 else
151 printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]);
152}
153
154/*
155 * Used for error reporting/debugging purposes.
156 * Following detailed status are valid for executive class,
157 * utility class, DDM class and for transaction error replies.
158 */
159static void i2o_report_common_dsc(u16 detailed_status)
160{
161 static char *COMMON_DSC[] = {
162 "SUCCESS",
163 "0x01", // not used
164 "BAD_KEY",
165 "TCL_ERROR",
166 "REPLY_BUFFER_FULL",
167 "NO_SUCH_PAGE",
168 "INSUFFICIENT_RESOURCE_SOFT",
169 "INSUFFICIENT_RESOURCE_HARD",
170 "0x08", // not used
171 "CHAIN_BUFFER_TOO_LARGE",
172 "UNSUPPORTED_FUNCTION",
173 "DEVICE_LOCKED",
174 "DEVICE_RESET",
175 "INAPPROPRIATE_FUNCTION",
176 "INVALID_INITIATOR_ADDRESS",
177 "INVALID_MESSAGE_FLAGS",
178 "INVALID_OFFSET",
179 "INVALID_PARAMETER",
180 "INVALID_REQUEST",
181 "INVALID_TARGET_ADDRESS",
182 "MESSAGE_TOO_LARGE",
183 "MESSAGE_TOO_SMALL",
184 "MISSING_PARAMETER",
185 "TIMEOUT",
186 "UNKNOWN_ERROR",
187 "UNKNOWN_FUNCTION",
188 "UNSUPPORTED_VERSION",
189 "DEVICE_BUSY",
190 "DEVICE_NOT_AVAILABLE"
191 };
192
193 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
194 printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
195 detailed_status);
196 else
197 printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]);
198}
199
200/*
201 * Used for error reporting/debugging purposes
202 */
203static void i2o_report_util_cmd(u8 cmd)
204{
205 switch (cmd) {
206 case I2O_CMD_UTIL_NOP:
207 printk(KERN_DEBUG "UTIL_NOP, ");
208 break;
209 case I2O_CMD_UTIL_ABORT:
210 printk(KERN_DEBUG "UTIL_ABORT, ");
211 break;
212 case I2O_CMD_UTIL_CLAIM:
213 printk(KERN_DEBUG "UTIL_CLAIM, ");
214 break;
215 case I2O_CMD_UTIL_RELEASE:
216 printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, ");
217 break;
218 case I2O_CMD_UTIL_CONFIG_DIALOG:
219 printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, ");
220 break;
221 case I2O_CMD_UTIL_DEVICE_RESERVE:
222 printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, ");
223 break;
224 case I2O_CMD_UTIL_DEVICE_RELEASE:
225 printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, ");
226 break;
227 case I2O_CMD_UTIL_EVT_ACK:
228 printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, ");
229 break;
230 case I2O_CMD_UTIL_EVT_REGISTER:
231 printk(KERN_DEBUG "UTIL_EVENT_REGISTER, ");
232 break;
233 case I2O_CMD_UTIL_LOCK:
234 printk(KERN_DEBUG "UTIL_LOCK, ");
235 break;
236 case I2O_CMD_UTIL_LOCK_RELEASE:
237 printk(KERN_DEBUG "UTIL_LOCK_RELEASE, ");
238 break;
239 case I2O_CMD_UTIL_PARAMS_GET:
240 printk(KERN_DEBUG "UTIL_PARAMS_GET, ");
241 break;
242 case I2O_CMD_UTIL_PARAMS_SET:
243 printk(KERN_DEBUG "UTIL_PARAMS_SET, ");
244 break;
245 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
246 printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, ");
247 break;
248 default:
249 printk(KERN_DEBUG "Cmd = %0#2x, ", cmd);
250 }
251}
252
253/*
254 * Used for error reporting/debugging purposes
255 */
256static void i2o_report_exec_cmd(u8 cmd)
257{
258 switch (cmd) {
259 case I2O_CMD_ADAPTER_ASSIGN:
260 printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, ");
261 break;
262 case I2O_CMD_ADAPTER_READ:
263 printk(KERN_DEBUG "EXEC_ADAPTER_READ, ");
264 break;
265 case I2O_CMD_ADAPTER_RELEASE:
266 printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, ");
267 break;
268 case I2O_CMD_BIOS_INFO_SET:
269 printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, ");
270 break;
271 case I2O_CMD_BOOT_DEVICE_SET:
272 printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, ");
273 break;
274 case I2O_CMD_CONFIG_VALIDATE:
275 printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, ");
276 break;
277 case I2O_CMD_CONN_SETUP:
278 printk(KERN_DEBUG "EXEC_CONN_SETUP, ");
279 break;
280 case I2O_CMD_DDM_DESTROY:
281 printk(KERN_DEBUG "EXEC_DDM_DESTROY, ");
282 break;
283 case I2O_CMD_DDM_ENABLE:
284 printk(KERN_DEBUG "EXEC_DDM_ENABLE, ");
285 break;
286 case I2O_CMD_DDM_QUIESCE:
287 printk(KERN_DEBUG "EXEC_DDM_QUIESCE, ");
288 break;
289 case I2O_CMD_DDM_RESET:
290 printk(KERN_DEBUG "EXEC_DDM_RESET, ");
291 break;
292 case I2O_CMD_DDM_SUSPEND:
293 printk(KERN_DEBUG "EXEC_DDM_SUSPEND, ");
294 break;
295 case I2O_CMD_DEVICE_ASSIGN:
296 printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, ");
297 break;
298 case I2O_CMD_DEVICE_RELEASE:
299 printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, ");
300 break;
301 case I2O_CMD_HRT_GET:
302 printk(KERN_DEBUG "EXEC_HRT_GET, ");
303 break;
304 case I2O_CMD_ADAPTER_CLEAR:
305 printk(KERN_DEBUG "EXEC_IOP_CLEAR, ");
306 break;
307 case I2O_CMD_ADAPTER_CONNECT:
308 printk(KERN_DEBUG "EXEC_IOP_CONNECT, ");
309 break;
310 case I2O_CMD_ADAPTER_RESET:
311 printk(KERN_DEBUG "EXEC_IOP_RESET, ");
312 break;
313 case I2O_CMD_LCT_NOTIFY:
314 printk(KERN_DEBUG "EXEC_LCT_NOTIFY, ");
315 break;
316 case I2O_CMD_OUTBOUND_INIT:
317 printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, ");
318 break;
319 case I2O_CMD_PATH_ENABLE:
320 printk(KERN_DEBUG "EXEC_PATH_ENABLE, ");
321 break;
322 case I2O_CMD_PATH_QUIESCE:
323 printk(KERN_DEBUG "EXEC_PATH_QUIESCE, ");
324 break;
325 case I2O_CMD_PATH_RESET:
326 printk(KERN_DEBUG "EXEC_PATH_RESET, ");
327 break;
328 case I2O_CMD_STATIC_MF_CREATE:
329 printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, ");
330 break;
331 case I2O_CMD_STATIC_MF_RELEASE:
332 printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, ");
333 break;
334 case I2O_CMD_STATUS_GET:
335 printk(KERN_DEBUG "EXEC_STATUS_GET, ");
336 break;
337 case I2O_CMD_SW_DOWNLOAD:
338 printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, ");
339 break;
340 case I2O_CMD_SW_UPLOAD:
341 printk(KERN_DEBUG "EXEC_SW_UPLOAD, ");
342 break;
343 case I2O_CMD_SW_REMOVE:
344 printk(KERN_DEBUG "EXEC_SW_REMOVE, ");
345 break;
346 case I2O_CMD_SYS_ENABLE:
347 printk(KERN_DEBUG "EXEC_SYS_ENABLE, ");
348 break;
349 case I2O_CMD_SYS_MODIFY:
350 printk(KERN_DEBUG "EXEC_SYS_MODIFY, ");
351 break;
352 case I2O_CMD_SYS_QUIESCE:
353 printk(KERN_DEBUG "EXEC_SYS_QUIESCE, ");
354 break;
355 case I2O_CMD_SYS_TAB_SET:
356 printk(KERN_DEBUG "EXEC_SYS_TAB_SET, ");
357 break;
358 default:
359 printk(KERN_DEBUG "Cmd = %#02x, ", cmd);
360 }
361}
362
363void i2o_debug_state(struct i2o_controller *c)
364{
365 printk(KERN_INFO "%s: State = ", c->name);
366 switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
367 case 0x01:
368 printk(KERN_DEBUG "INIT\n");
369 break;
370 case 0x02:
371 printk(KERN_DEBUG "RESET\n");
372 break;
373 case 0x04:
374 printk(KERN_DEBUG "HOLD\n");
375 break;
376 case 0x05:
377 printk(KERN_DEBUG "READY\n");
378 break;
379 case 0x08:
380 printk(KERN_DEBUG "OPERATIONAL\n");
381 break;
382 case 0x10:
383 printk(KERN_DEBUG "FAILED\n");
384 break;
385 case 0x11:
386 printk(KERN_DEBUG "FAULTED\n");
387 break;
388 default:
389 printk(KERN_DEBUG "%x (unknown !!)\n",
390 ((i2o_status_block *) c->status_block.virt)->iop_state);
391 }
392};
393
394void i2o_dump_hrt(struct i2o_controller *c)
395{
396 u32 *rows = (u32 *) c->hrt.virt;
397 u8 *p = (u8 *) c->hrt.virt;
398 u8 *d;
399 int count;
400 int length;
401 int i;
402 int state;
403
404 if (p[3] != 0) {
405 printk(KERN_ERR
406 "%s: HRT table for controller is too new a version.\n",
407 c->name);
408 return;
409 }
410
411 count = p[0] | (p[1] << 8);
412 length = p[2];
413
414 printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
415 c->name, count, length << 2);
416
417 rows += 2;
418
419 for (i = 0; i < count; i++) {
420 printk(KERN_INFO "Adapter %08X: ", rows[0]);
421 p = (u8 *) (rows + 1);
422 d = (u8 *) (rows + 2);
423 state = p[1] << 8 | p[0];
424
425 printk(KERN_DEBUG "TID %04X:[", state & 0xFFF);
426 state >>= 12;
427 if (state & (1 << 0))
428 printk(KERN_DEBUG "H"); /* Hidden */
429 if (state & (1 << 2)) {
430 printk(KERN_DEBUG "P"); /* Present */
431 if (state & (1 << 1))
432 printk(KERN_DEBUG "C"); /* Controlled */
433 }
434 if (state > 9)
435 printk(KERN_DEBUG "*"); /* Hard */
436
437 printk(KERN_DEBUG "]:");
438
439 switch (p[3] & 0xFFFF) {
440 case 0:
441 /* Adapter private bus - easy */
442 printk(KERN_DEBUG
443 "Local bus %d: I/O at 0x%04X Mem 0x%08X", p[2],
444 d[1] << 8 | d[0], *(u32 *) (d + 4));
445 break;
446 case 1:
447 /* ISA bus */
448 printk(KERN_DEBUG
449 "ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", p[2],
450 d[2], d[1] << 8 | d[0], *(u32 *) (d + 4));
451 break;
452
453 case 2: /* EISA bus */
454 printk(KERN_DEBUG
455 "EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
456 p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
457 break;
458
459 case 3: /* MCA bus */
460 printk(KERN_DEBUG
461 "MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2],
462 d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
463 break;
464
465 case 4: /* PCI bus */
466 printk(KERN_DEBUG
467 "PCI %d: Bus %d Device %d Function %d", p[2],
468 d[2], d[1], d[0]);
469 break;
470
471 case 0x80: /* Other */
472 default:
473 printk(KERN_DEBUG "Unsupported bus type.");
474 break;
475 }
476 printk(KERN_DEBUG "\n");
477 rows += length;
478 }
479}
480
481EXPORT_SYMBOL(i2o_dump_message);
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
new file mode 100644
index 000000000000..eb907e87bc7b
--- /dev/null
+++ b/drivers/message/i2o/device.c
@@ -0,0 +1,634 @@
1/*
2 * Functions to handle I2O devices
3 *
4 * Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Fixes/additions:
12 * Markus Lidel <Markus.Lidel@shadowconnect.com>
13 * initial version.
14 */
15
16#include <linux/module.h>
17#include <linux/i2o.h>
18#include <linux/delay.h>
19
20/* Exec OSM functions */
21extern struct bus_type i2o_bus_type;
22
23/**
24 * i2o_device_issue_claim - claim or release a device
25 * @dev: I2O device to claim or release
26 * @cmd: claim or release command
27 * @type: type of claim
28 *
29 * Issue I2O UTIL_CLAIM or UTIL_RELEASE messages. The message to be sent
30 * is set by cmd. dev is the I2O device which should be claim or
31 * released and the type is the claim type (see the I2O spec).
32 *
33 * Returs 0 on success or negative error code on failure.
34 */
35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
36 u32 type)
37{
38 struct i2o_message __iomem *msg;
39 u32 m;
40
41 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
42 if (m == I2O_QUEUE_EMPTY)
43 return -ETIMEDOUT;
44
45 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
46 writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]);
47 writel(type, &msg->body[0]);
48
49 return i2o_msg_post_wait(dev->iop, m, 60);
50};
51
52/**
53 * i2o_device_claim - claim a device for use by an OSM
54 * @dev: I2O device to claim
55 * @drv: I2O driver which wants to claim the device
56 *
57 * Do the leg work to assign a device to a given OSM. If the claim succeed
58 * the owner of the rimary. If the attempt fails a negative errno code
59 * is returned. On success zero is returned.
60 */
61int i2o_device_claim(struct i2o_device *dev)
62{
63 int rc = 0;
64
65 down(&dev->lock);
66
67 rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY);
68 if (!rc)
69 pr_debug("i2o: claim of device %d succeded\n",
70 dev->lct_data.tid);
71 else
72 pr_debug("i2o: claim of device %d failed %d\n",
73 dev->lct_data.tid, rc);
74
75 up(&dev->lock);
76
77 return rc;
78};
79
80/**
81 * i2o_device_claim_release - release a device that the OSM is using
82 * @dev: device to release
83 * @drv: driver which claimed the device
84 *
85 * Drop a claim by an OSM on a given I2O device.
86 *
87 * AC - some devices seem to want to refuse an unclaim until they have
88 * finished internal processing. It makes sense since you don't want a
89 * new device to go reconfiguring the entire system until you are done.
90 * Thus we are prepared to wait briefly.
91 *
92 * Returns 0 on success or negative error code on failure.
93 */
94int i2o_device_claim_release(struct i2o_device *dev)
95{
96 int tries;
97 int rc = 0;
98
99 down(&dev->lock);
100
101 /*
102 * If the controller takes a nonblocking approach to
103 * releases we have to sleep/poll for a few times.
104 */
105 for (tries = 0; tries < 10; tries++) {
106 rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_RELEASE,
107 I2O_CLAIM_PRIMARY);
108 if (!rc)
109 break;
110
111 ssleep(1);
112 }
113
114 if (!rc)
115 pr_debug("i2o: claim release of device %d succeded\n",
116 dev->lct_data.tid);
117 else
118 pr_debug("i2o: claim release of device %d failed %d\n",
119 dev->lct_data.tid, rc);
120
121 up(&dev->lock);
122
123 return rc;
124};
125
126/**
127 * i2o_device_release - release the memory for a I2O device
128 * @dev: I2O device which should be released
129 *
130 * Release the allocated memory. This function is called if refcount of
131 * device reaches 0 automatically.
132 */
133static void i2o_device_release(struct device *dev)
134{
135 struct i2o_device *i2o_dev = to_i2o_device(dev);
136
137 pr_debug("i2o: device %s released\n", dev->bus_id);
138
139 kfree(i2o_dev);
140};
141
142/**
143 * i2o_device_class_release - Remove I2O device attributes
144 * @cd: I2O class device which is added to the I2O device class
145 *
146 * Removes attributes from the I2O device again. Also search each device
147 * on the controller for I2O devices which refert to this device as parent
148 * or user and remove this links also.
149 */
150static void i2o_device_class_release(struct class_device *cd)
151{
152 struct i2o_device *i2o_dev, *tmp;
153 struct i2o_controller *c;
154
155 i2o_dev = to_i2o_device(cd->dev);
156 c = i2o_dev->iop;
157
158 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
159 sysfs_remove_link(&i2o_dev->device.kobj, "user");
160
161 list_for_each_entry(tmp, &c->devices, list) {
162 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
163 sysfs_remove_link(&tmp->device.kobj, "parent");
164 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
165 sysfs_remove_link(&tmp->device.kobj, "user");
166 }
167};
168
169/* I2O device class */
170static struct class i2o_device_class = {
171 .name = "i2o_device",
172 .release = i2o_device_class_release
173};
174
175/**
176 * i2o_device_alloc - Allocate a I2O device and initialize it
177 *
178 * Allocate the memory for a I2O device and initialize locks and lists
179 *
180 * Returns the allocated I2O device or a negative error code if the device
181 * could not be allocated.
182 */
183static struct i2o_device *i2o_device_alloc(void)
184{
185 struct i2o_device *dev;
186
187 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
188 if (!dev)
189 return ERR_PTR(-ENOMEM);
190
191 memset(dev, 0, sizeof(*dev));
192
193 INIT_LIST_HEAD(&dev->list);
194 init_MUTEX(&dev->lock);
195
196 dev->device.bus = &i2o_bus_type;
197 dev->device.release = &i2o_device_release;
198 dev->classdev.class = &i2o_device_class;
199 dev->classdev.dev = &dev->device;
200
201 return dev;
202};
203
204/**
205 * i2o_device_add - allocate a new I2O device and add it to the IOP
206 * @iop: I2O controller where the device is on
207 * @entry: LCT entry of the I2O device
208 *
209 * Allocate a new I2O device and initialize it with the LCT entry. The
210 * device is appended to the device list of the controller.
211 *
212 * Returns a pointer to the I2O device on success or negative error code
213 * on failure.
214 */
215static struct i2o_device *i2o_device_add(struct i2o_controller *c,
216 i2o_lct_entry * entry)
217{
218 struct i2o_device *dev;
219
220 dev = i2o_device_alloc();
221 if (IS_ERR(dev)) {
222 printk(KERN_ERR "i2o: unable to allocate i2o device\n");
223 return dev;
224 }
225
226 dev->lct_data = *entry;
227
228 snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
229 dev->lct_data.tid);
230
231 snprintf(dev->classdev.class_id, BUS_ID_SIZE, "%d:%03x", c->unit,
232 dev->lct_data.tid);
233
234 dev->iop = c;
235 dev->device.parent = &c->device;
236
237 device_register(&dev->device);
238
239 list_add_tail(&dev->list, &c->devices);
240
241 class_device_register(&dev->classdev);
242
243 i2o_driver_notify_device_add_all(dev);
244
245 pr_debug("i2o: device %s added\n", dev->device.bus_id);
246
247 return dev;
248};
249
250/**
251 * i2o_device_remove - remove an I2O device from the I2O core
252 * @dev: I2O device which should be released
253 *
254 * Is used on I2O controller removal or LCT modification, when the device
255 * is removed from the system. Note that the device could still hang
256 * around until the refcount reaches 0.
257 */
258void i2o_device_remove(struct i2o_device *i2o_dev)
259{
260 i2o_driver_notify_device_remove_all(i2o_dev);
261 class_device_unregister(&i2o_dev->classdev);
262 list_del(&i2o_dev->list);
263 device_unregister(&i2o_dev->device);
264};
265
266/**
267 * i2o_device_parse_lct - Parse a previously fetched LCT and create devices
268 * @c: I2O controller from which the LCT should be parsed.
269 *
270 * The Logical Configuration Table tells us what we can talk to on the
271 * board. For every entry we create an I2O device, which is registered in
272 * the I2O core.
273 *
274 * Returns 0 on success or negative error code on failure.
275 */
276int i2o_device_parse_lct(struct i2o_controller *c)
277{
278 struct i2o_device *dev, *tmp;
279 i2o_lct *lct;
280 int i;
281 int max;
282
283 down(&c->lct_lock);
284
285 if (c->lct)
286 kfree(c->lct);
287
288 lct = c->dlct.virt;
289
290 c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL);
291 if (!c->lct) {
292 up(&c->lct_lock);
293 return -ENOMEM;
294 }
295
296 if (lct->table_size * 4 > c->dlct.len) {
297 memcpy_fromio(c->lct, c->dlct.virt, c->dlct.len);
298 up(&c->lct_lock);
299 return -EAGAIN;
300 }
301
302 memcpy_fromio(c->lct, c->dlct.virt, lct->table_size * 4);
303
304 lct = c->lct;
305
306 max = (lct->table_size - 3) / 9;
307
308 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
309 lct->table_size);
310
311 /* remove devices, which are not in the LCT anymore */
312 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
313 int found = 0;
314
315 for (i = 0; i < max; i++) {
316 if (lct->lct_entry[i].tid == dev->lct_data.tid) {
317 found = 1;
318 break;
319 }
320 }
321
322 if (!found)
323 i2o_device_remove(dev);
324 }
325
326 /* add new devices, which are new in the LCT */
327 for (i = 0; i < max; i++) {
328 int found = 0;
329
330 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
331 if (lct->lct_entry[i].tid == dev->lct_data.tid) {
332 found = 1;
333 break;
334 }
335 }
336
337 if (!found)
338 i2o_device_add(c, &lct->lct_entry[i]);
339 }
340 up(&c->lct_lock);
341
342 return 0;
343};
344
345/**
346 * i2o_device_class_show_class_id - Displays class id of I2O device
347 * @cd: class device of which the class id should be displayed
348 * @buf: buffer into which the class id should be printed
349 *
350 * Returns the number of bytes which are printed into the buffer.
351 */
352static ssize_t i2o_device_class_show_class_id(struct class_device *cd,
353 char *buf)
354{
355 struct i2o_device *dev = to_i2o_device(cd->dev);
356
357 sprintf(buf, "%03x\n", dev->lct_data.class_id);
358 return strlen(buf) + 1;
359};
360
361/**
362 * i2o_device_class_show_tid - Displays TID of I2O device
363 * @cd: class device of which the TID should be displayed
364 * @buf: buffer into which the class id should be printed
365 *
366 * Returns the number of bytes which are printed into the buffer.
367 */
368static ssize_t i2o_device_class_show_tid(struct class_device *cd, char *buf)
369{
370 struct i2o_device *dev = to_i2o_device(cd->dev);
371
372 sprintf(buf, "%03x\n", dev->lct_data.tid);
373 return strlen(buf) + 1;
374};
375
376/* I2O device class attributes */
377static CLASS_DEVICE_ATTR(class_id, S_IRUGO, i2o_device_class_show_class_id,
378 NULL);
379static CLASS_DEVICE_ATTR(tid, S_IRUGO, i2o_device_class_show_tid, NULL);
380
381/**
382 * i2o_device_class_add - Adds attributes to the I2O device
383 * @cd: I2O class device which is added to the I2O device class
384 *
385 * This function get called when a I2O device is added to the class. It
386 * creates the attributes for each device and creates user/parent symlink
387 * if necessary.
388 *
389 * Returns 0 on success or negative error code on failure.
390 */
391static int i2o_device_class_add(struct class_device *cd)
392{
393 struct i2o_device *i2o_dev, *tmp;
394 struct i2o_controller *c;
395
396 i2o_dev = to_i2o_device(cd->dev);
397 c = i2o_dev->iop;
398
399 class_device_create_file(cd, &class_device_attr_class_id);
400 class_device_create_file(cd, &class_device_attr_tid);
401
402 /* create user entries for this device */
403 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
404 if (tmp)
405 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
406 "user");
407
408 /* create user entries refering to this device */
409 list_for_each_entry(tmp, &c->devices, list)
410 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
411 sysfs_create_link(&tmp->device.kobj,
412 &i2o_dev->device.kobj, "user");
413
414 /* create parent entries for this device */
415 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
416 if (tmp)
417 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
418 "parent");
419
420 /* create parent entries refering to this device */
421 list_for_each_entry(tmp, &c->devices, list)
422 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
423 sysfs_create_link(&tmp->device.kobj,
424 &i2o_dev->device.kobj, "parent");
425
426 return 0;
427};
428
429/* I2O device class interface */
430static struct class_interface i2o_device_class_interface = {
431 .class = &i2o_device_class,
432 .add = i2o_device_class_add
433};
434
435/*
436 * Run time support routines
437 */
438
439/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
440 *
441 * This function can be used for all UtilParamsGet/Set operations.
442 * The OperationList is given in oplist-buffer,
443 * and results are returned in reslist-buffer.
444 * Note that the minimum sized reslist is 8 bytes and contains
445 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
446 */
447
448int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
449 int oplen, void *reslist, int reslen)
450{
451 struct i2o_message __iomem *msg;
452 u32 m;
453 u32 *res32 = (u32 *) reslist;
454 u32 *restmp = (u32 *) reslist;
455 int len = 0;
456 int i = 0;
457 int rc;
458 struct i2o_dma res;
459 struct i2o_controller *c = i2o_dev->iop;
460 struct device *dev = &c->pdev->dev;
461
462 res.virt = NULL;
463
464 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
465 return -ENOMEM;
466
467 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
468 if (m == I2O_QUEUE_EMPTY) {
469 i2o_dma_free(dev, &res);
470 return -ETIMEDOUT;
471 }
472
473 i = 0;
474 writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid,
475 &msg->u.head[1]);
476 writel(0, &msg->body[i++]);
477 writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */
478 memcpy_toio(&msg->body[i], oplist, oplen);
479 i += (oplen / 4 + (oplen % 4 ? 1 : 0));
480 writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */
481 writel(res.phys, &msg->body[i++]);
482
483 writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
484 SGL_OFFSET_5, &msg->u.head[0]);
485
486 rc = i2o_msg_post_wait_mem(c, m, 10, &res);
487
488 /* This only looks like a memory leak - don't "fix" it. */
489 if (rc == -ETIMEDOUT)
490 return rc;
491
492 memcpy_fromio(reslist, res.virt, res.len);
493 i2o_dma_free(dev, &res);
494
495 /* Query failed */
496 if (rc)
497 return rc;
498 /*
499 * Calculate number of bytes of Result LIST
500 * We need to loop through each Result BLOCK and grab the length
501 */
502 restmp = res32 + 1;
503 len = 1;
504 for (i = 0; i < (res32[0] & 0X0000FFFF); i++) {
505 if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */
506 printk(KERN_WARNING
507 "%s - Error:\n ErrorInfoSize = 0x%02x, "
508 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
509 (cmd ==
510 I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" :
511 "PARAMS_GET", res32[1] >> 24,
512 (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF);
513
514 /*
515 * If this is the only request,than we return an error
516 */
517 if ((res32[0] & 0x0000FFFF) == 1) {
518 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
519 }
520 }
521 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
522 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
523 }
524 return (len << 2); /* bytes used by result list */
525}
526
527/*
528 * Query one field group value or a whole scalar group.
529 */
530int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
531 void *buf, int buflen)
532{
533 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
534 u8 resblk[8 + buflen]; /* 8 bytes for header */
535 int size;
536
537 if (field == -1) /* whole group */
538 opblk[4] = -1;
539
540 size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
541 sizeof(opblk), resblk, sizeof(resblk));
542
543 memcpy(buf, resblk + 8, buflen); /* cut off header */
544
545 if (size > buflen)
546 return buflen;
547
548 return size;
549}
550
551/*
552 * if oper == I2O_PARAMS_TABLE_GET, get from all rows
553 * if fieldcount == -1 return all fields
554 * ibuf and ibuflen are unused (use NULL, 0)
555 * else return specific fields
556 * ibuf contains fieldindexes
557 *
558 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
559 * if fieldcount == -1 return all fields
560 * ibuf contains rowcount, keyvalues
561 * else return specific fields
562 * fieldcount is # of fieldindexes
563 * ibuf contains fieldindexes, rowcount, keyvalues
564 *
565 * You could also use directly function i2o_issue_params().
566 */
567int i2o_parm_table_get(struct i2o_device *dev, int oper, int group,
568 int fieldcount, void *ibuf, int ibuflen, void *resblk,
569 int reslen)
570{
571 u16 *opblk;
572 int size;
573
574 size = 10 + ibuflen;
575 if (size % 4)
576 size += 4 - size % 4;
577
578 opblk = kmalloc(size, GFP_KERNEL);
579 if (opblk == NULL) {
580 printk(KERN_ERR "i2o: no memory for query buffer.\n");
581 return -ENOMEM;
582 }
583
584 opblk[0] = 1; /* operation count */
585 opblk[1] = 0; /* pad */
586 opblk[2] = oper;
587 opblk[3] = group;
588 opblk[4] = fieldcount;
589 memcpy(opblk + 5, ibuf, ibuflen); /* other params */
590
591 size = i2o_parm_issue(dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
592 size, resblk, reslen);
593
594 kfree(opblk);
595 if (size > reslen)
596 return reslen;
597
598 return size;
599}
600
601/**
602 * i2o_device_init - Initialize I2O devices
603 *
604 * Registers the I2O device class.
605 *
606 * Returns 0 on success or negative error code on failure.
607 */
608int i2o_device_init(void)
609{
610 int rc;
611
612 rc = class_register(&i2o_device_class);
613 if (rc)
614 return rc;
615
616 return class_interface_register(&i2o_device_class_interface);
617};
618
619/**
620 * i2o_device_exit - I2O devices exit function
621 *
622 * Unregisters the I2O device class.
623 */
624void i2o_device_exit(void)
625{
626 class_interface_register(&i2o_device_class_interface);
627 class_unregister(&i2o_device_class);
628};
629
630EXPORT_SYMBOL(i2o_device_claim);
631EXPORT_SYMBOL(i2o_device_claim_release);
632EXPORT_SYMBOL(i2o_parm_field_get);
633EXPORT_SYMBOL(i2o_parm_table_get);
634EXPORT_SYMBOL(i2o_parm_issue);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
new file mode 100644
index 000000000000..91f4edbb2a27
--- /dev/null
+++ b/drivers/message/i2o/driver.c
@@ -0,0 +1,374 @@
1/*
2 * Functions to handle I2O drivers (OSMs) and I2O bus type for sysfs
3 *
4 * Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Fixes/additions:
12 * Markus Lidel <Markus.Lidel@shadowconnect.com>
13 * initial version.
14 */
15
16#include <linux/device.h>
17#include <linux/module.h>
18#include <linux/rwsem.h>
19#include <linux/i2o.h>
20
21/* max_drivers - Maximum I2O drivers (OSMs) which could be registered */
22unsigned int i2o_max_drivers = I2O_MAX_DRIVERS;
23module_param_named(max_drivers, i2o_max_drivers, uint, 0);
24MODULE_PARM_DESC(max_drivers, "maximum number of OSM's to support");
25
26/* I2O drivers lock and array */
27static spinlock_t i2o_drivers_lock;
28static struct i2o_driver **i2o_drivers;
29
30/**
31 * i2o_bus_match - Tell if a I2O device class id match the class ids of
32 * the I2O driver (OSM)
33 *
34 * @dev: device which should be verified
35 * @drv: the driver to match against
36 *
37 * Used by the bus to check if the driver wants to handle the device.
38 *
39 * Returns 1 if the class ids of the driver match the class id of the
40 * device, otherwise 0.
41 */
42static int i2o_bus_match(struct device *dev, struct device_driver *drv)
43{
44 struct i2o_device *i2o_dev = to_i2o_device(dev);
45 struct i2o_driver *i2o_drv = to_i2o_driver(drv);
46 struct i2o_class_id *ids = i2o_drv->classes;
47
48 if (ids)
49 while (ids->class_id != I2O_CLASS_END) {
50 if (ids->class_id == i2o_dev->lct_data.class_id)
51 return 1;
52 ids++;
53 }
54 return 0;
55};
56
57/* I2O bus type */
58struct bus_type i2o_bus_type = {
59 .name = "i2o",
60 .match = i2o_bus_match,
61};
62
63/**
64 * i2o_driver_register - Register a I2O driver (OSM) in the I2O core
65 * @drv: I2O driver which should be registered
66 *
67 * Registers the OSM drv in the I2O core and creates an event queues if
68 * necessary.
69 *
70 * Returns 0 on success or negative error code on failure.
71 */
72int i2o_driver_register(struct i2o_driver *drv)
73{
74 struct i2o_controller *c;
75 int i;
76 int rc = 0;
77 unsigned long flags;
78
79 pr_debug("i2o: Register driver %s\n", drv->name);
80
81 if (drv->event) {
82 drv->event_queue = create_workqueue(drv->name);
83 if (!drv->event_queue) {
84 printk(KERN_ERR "i2o: Could not initialize event queue "
85 "for driver %s\n", drv->name);
86 return -EFAULT;
87 }
88 pr_debug("i2o: Event queue initialized for driver %s\n",
89 drv->name);
90 } else
91 drv->event_queue = NULL;
92
93 drv->driver.name = drv->name;
94 drv->driver.bus = &i2o_bus_type;
95
96 spin_lock_irqsave(&i2o_drivers_lock, flags);
97
98 for (i = 0; i2o_drivers[i]; i++)
99 if (i >= i2o_max_drivers) {
100 printk(KERN_ERR "i2o: too many drivers registered, "
101 "increase max_drivers\n");
102 spin_unlock_irqrestore(&i2o_drivers_lock, flags);
103 return -EFAULT;
104 }
105
106 drv->context = i;
107 i2o_drivers[i] = drv;
108
109 spin_unlock_irqrestore(&i2o_drivers_lock, flags);
110
111 pr_debug("i2o: driver %s gets context id %d\n", drv->name,
112 drv->context);
113
114 list_for_each_entry(c, &i2o_controllers, list) {
115 struct i2o_device *i2o_dev;
116
117 i2o_driver_notify_controller_add(drv, c);
118 list_for_each_entry(i2o_dev, &c->devices, list)
119 i2o_driver_notify_device_add(drv, i2o_dev);
120 }
121
122
123 rc = driver_register(&drv->driver);
124 if (rc)
125 destroy_workqueue(drv->event_queue);
126
127 return rc;
128};
129
130/**
131 * i2o_driver_unregister - Unregister a I2O driver (OSM) from the I2O core
132 * @drv: I2O driver which should be unregistered
133 *
134 * Unregisters the OSM drv from the I2O core and cleanup event queues if
135 * necessary.
136 */
137void i2o_driver_unregister(struct i2o_driver *drv)
138{
139 struct i2o_controller *c;
140 unsigned long flags;
141
142 pr_debug("i2o: unregister driver %s\n", drv->name);
143
144 driver_unregister(&drv->driver);
145
146 list_for_each_entry(c, &i2o_controllers, list) {
147 struct i2o_device *i2o_dev;
148
149 list_for_each_entry(i2o_dev, &c->devices, list)
150 i2o_driver_notify_device_remove(drv, i2o_dev);
151
152 i2o_driver_notify_controller_remove(drv, c);
153 }
154
155 spin_lock_irqsave(&i2o_drivers_lock, flags);
156 i2o_drivers[drv->context] = NULL;
157 spin_unlock_irqrestore(&i2o_drivers_lock, flags);
158
159 if (drv->event_queue) {
160 destroy_workqueue(drv->event_queue);
161 drv->event_queue = NULL;
162 pr_debug("i2o: event queue removed for %s\n", drv->name);
163 }
164};
165
166/**
167 * i2o_driver_dispatch - dispatch an I2O reply message
168 * @c: I2O controller of the message
169 * @m: I2O message number
170 * @msg: I2O message to be delivered
171 *
172 * The reply is delivered to the driver from which the original message
173 * was. This function is only called from interrupt context.
174 *
175 * Returns 0 on success and the message should not be flushed. Returns > 0
176 * on success and if the message should be flushed afterwords. Returns
177 * negative error code on failure (the message will be flushed too).
178 */
179int i2o_driver_dispatch(struct i2o_controller *c, u32 m,
180 struct i2o_message __iomem *msg)
181{
182 struct i2o_driver *drv;
183 u32 context = readl(&msg->u.s.icntxt);
184
185 if (likely(context < i2o_max_drivers)) {
186 spin_lock(&i2o_drivers_lock);
187 drv = i2o_drivers[context];
188 spin_unlock(&i2o_drivers_lock);
189
190 if (unlikely(!drv)) {
191 printk(KERN_WARNING "%s: Spurious reply to unknown "
192 "driver %d\n", c->name, context);
193 return -EIO;
194 }
195
196 if ((readl(&msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) {
197 struct i2o_device *dev, *tmp;
198 struct i2o_event *evt;
199 u16 size;
200 u16 tid;
201
202 tid = readl(&msg->u.head[1]) & 0x1fff;
203
204 pr_debug("%s: event received from device %d\n", c->name,
205 tid);
206
207 /* cut of header from message size (in 32-bit words) */
208 size = (readl(&msg->u.head[0]) >> 16) - 5;
209
210 evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
211 if (!evt)
212 return -ENOMEM;
213 memset(evt, 0, size * 4 + sizeof(*evt));
214
215 evt->size = size;
216 memcpy_fromio(&evt->tcntxt, &msg->u.s.tcntxt,
217 (size + 2) * 4);
218
219 list_for_each_entry_safe(dev, tmp, &c->devices, list)
220 if (dev->lct_data.tid == tid) {
221 evt->i2o_dev = dev;
222 break;
223 }
224
225 INIT_WORK(&evt->work, (void (*)(void *))drv->event,
226 evt);
227 queue_work(drv->event_queue, &evt->work);
228 return 1;
229 }
230
231 if (likely(drv->reply))
232 return drv->reply(c, m, msg);
233 else
234 pr_debug("%s: Reply to driver %s, but no reply function"
235 " defined!\n", c->name, drv->name);
236 return -EIO;
237 } else
238 printk(KERN_WARNING "%s: Spurious reply to unknown driver "
239 "%d\n", c->name, readl(&msg->u.s.icntxt));
240 return -EIO;
241}
242
243/**
244 * i2o_driver_notify_controller_add_all - Send notify of added controller
245 * to all I2O drivers
246 *
247 * Send notifications to all registered drivers that a new controller was
248 * added.
249 */
250void i2o_driver_notify_controller_add_all(struct i2o_controller *c)
251{
252 int i;
253 struct i2o_driver *drv;
254
255 for (i = 0; i < I2O_MAX_DRIVERS; i++) {
256 drv = i2o_drivers[i];
257
258 if (drv)
259 i2o_driver_notify_controller_add(drv, c);
260 }
261}
262
263/**
264 * i2o_driver_notify_controller_remove_all - Send notify of removed
265 * controller to all I2O drivers
266 *
267 * Send notifications to all registered drivers that a controller was
268 * removed.
269 */
270void i2o_driver_notify_controller_remove_all(struct i2o_controller *c)
271{
272 int i;
273 struct i2o_driver *drv;
274
275 for (i = 0; i < I2O_MAX_DRIVERS; i++) {
276 drv = i2o_drivers[i];
277
278 if (drv)
279 i2o_driver_notify_controller_remove(drv, c);
280 }
281}
282
283/**
284 * i2o_driver_notify_device_add_all - Send notify of added device to all
285 * I2O drivers
286 *
287 * Send notifications to all registered drivers that a device was added.
288 */
289void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev)
290{
291 int i;
292 struct i2o_driver *drv;
293
294 for (i = 0; i < I2O_MAX_DRIVERS; i++) {
295 drv = i2o_drivers[i];
296
297 if (drv)
298 i2o_driver_notify_device_add(drv, i2o_dev);
299 }
300}
301
302/**
303 * i2o_driver_notify_device_remove_all - Send notify of removed device to
304 * all I2O drivers
305 *
306 * Send notifications to all registered drivers that a device was removed.
307 */
308void i2o_driver_notify_device_remove_all(struct i2o_device *i2o_dev)
309{
310 int i;
311 struct i2o_driver *drv;
312
313 for (i = 0; i < I2O_MAX_DRIVERS; i++) {
314 drv = i2o_drivers[i];
315
316 if (drv)
317 i2o_driver_notify_device_remove(drv, i2o_dev);
318 }
319}
320
321/**
322 * i2o_driver_init - initialize I2O drivers (OSMs)
323 *
324 * Registers the I2O bus and allocate memory for the array of OSMs.
325 *
326 * Returns 0 on success or negative error code on failure.
327 */
328int __init i2o_driver_init(void)
329{
330 int rc = 0;
331
332 spin_lock_init(&i2o_drivers_lock);
333
334 if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64) ||
335 ((i2o_max_drivers ^ (i2o_max_drivers - 1)) !=
336 (2 * i2o_max_drivers - 1))) {
337 printk(KERN_WARNING "i2o: max_drivers set to %d, but must be "
338 ">=2 and <= 64 and a power of 2\n", i2o_max_drivers);
339 i2o_max_drivers = I2O_MAX_DRIVERS;
340 }
341 printk(KERN_INFO "i2o: max drivers = %d\n", i2o_max_drivers);
342
343 i2o_drivers =
344 kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
345 if (!i2o_drivers)
346 return -ENOMEM;
347
348 memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers));
349
350 rc = bus_register(&i2o_bus_type);
351
352 if (rc < 0)
353 kfree(i2o_drivers);
354
355 return rc;
356};
357
358/**
359 * i2o_driver_exit - clean up I2O drivers (OSMs)
360 *
361 * Unregisters the I2O bus and free driver array.
362 */
363void __exit i2o_driver_exit(void)
364{
365 bus_unregister(&i2o_bus_type);
366 kfree(i2o_drivers);
367};
368
369EXPORT_SYMBOL(i2o_driver_register);
370EXPORT_SYMBOL(i2o_driver_unregister);
371EXPORT_SYMBOL(i2o_driver_notify_controller_add_all);
372EXPORT_SYMBOL(i2o_driver_notify_controller_remove_all);
373EXPORT_SYMBOL(i2o_driver_notify_device_add_all);
374EXPORT_SYMBOL(i2o_driver_notify_device_remove_all);
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
new file mode 100644
index 000000000000..79c1cbfb8f44
--- /dev/null
+++ b/drivers/message/i2o/exec-osm.c
@@ -0,0 +1,507 @@
1/*
2 * Executive OSM
3 *
4 * Copyright (C) 1999-2002 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * A lot of the I2O message side code from this is taken from the Red
14 * Creek RCPCI45 adapter driver by Red Creek Communications
15 *
16 * Fixes/additions:
17 * Philipp Rumpf
18 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 * Alan Cox <alan@redhat.com>:
23 * Ported to Linux 2.5.
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Minor fixes for 2.6.
26 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
27 * Support for sysfs included.
28 */
29
30#include <linux/module.h>
31#include <linux/i2o.h>
32#include <linux/delay.h>
33
34#define OSM_NAME "exec-osm"
35
36struct i2o_driver i2o_exec_driver;
37
38static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind);
39
40/* Module internal functions from other sources */
41extern int i2o_device_parse_lct(struct i2o_controller *);
42
43/* global wait list for POST WAIT */
44static LIST_HEAD(i2o_exec_wait_list);
45
46/* Wait struct needed for POST WAIT */
47struct i2o_exec_wait {
48 wait_queue_head_t *wq; /* Pointer to Wait queue */
49 struct i2o_dma dma; /* DMA buffers to free on failure */
50 u32 tcntxt; /* transaction context from reply */
51 int complete; /* 1 if reply received otherwise 0 */
52 u32 m; /* message id */
53 struct i2o_message __iomem *msg; /* pointer to the reply message */
54 struct list_head list; /* node in global wait list */
55};
56
57/* Exec OSM class handling definition */
58static struct i2o_class_id i2o_exec_class_id[] = {
59 {I2O_CLASS_EXECUTIVE},
60 {I2O_CLASS_END}
61};
62
63/**
64 * i2o_exec_wait_alloc - Allocate a i2o_exec_wait struct an initialize it
65 *
66 * Allocate the i2o_exec_wait struct and initialize the wait.
67 *
68 * Returns i2o_exec_wait pointer on success or negative error code on
69 * failure.
70 */
71static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
72{
73 struct i2o_exec_wait *wait;
74
75 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
76 if (!wait)
77 return ERR_PTR(-ENOMEM);
78
79 memset(wait, 0, sizeof(*wait));
80
81 INIT_LIST_HEAD(&wait->list);
82
83 return wait;
84};
85
86/**
87 * i2o_exec_wait_free - Free a i2o_exec_wait struct
88 * @i2o_exec_wait: I2O wait data which should be cleaned up
89 */
90static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
91{
92 kfree(wait);
93};
94
95/**
96 * i2o_msg_post_wait_mem - Post and wait a message with DMA buffers
97 * @c: controller
98 * @m: message to post
99 * @timeout: time in seconds to wait
100 * @dma: i2o_dma struct of the DMA buffer to free on failure
101 *
102 * This API allows an OSM to post a message and then be told whether or
103 * not the system received a successful reply. If the message times out
104 * then the value '-ETIMEDOUT' is returned. This is a special case. In
105 * this situation the message may (should) complete at an indefinite time
106 * in the future. When it completes it will use the memory buffer
107 * attached to the request. If -ETIMEDOUT is returned then the memory
108 * buffer must not be freed. Instead the event completion will free them
109 * for you. In all other cases the buffer are your problem.
110 *
111 * Returns 0 on success or negative error code on failure.
112 */
113int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
114 timeout, struct i2o_dma *dma)
115{
116 DECLARE_WAIT_QUEUE_HEAD(wq);
117 struct i2o_exec_wait *wait;
118 static u32 tcntxt = 0x80000000;
119 struct i2o_message __iomem *msg = c->in_queue.virt + m;
120 int rc = 0;
121
122 wait = i2o_exec_wait_alloc();
123 if (!wait)
124 return -ENOMEM;
125
126 if (tcntxt == 0xffffffff)
127 tcntxt = 0x80000000;
128
129 if (dma)
130 wait->dma = *dma;
131
132 /*
133 * Fill in the message initiator context and transaction context.
134 * We will only use transaction contexts >= 0x80000000 for POST WAIT,
135 * so we could find a POST WAIT reply easier in the reply handler.
136 */
137 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
138 wait->tcntxt = tcntxt++;
139 writel(wait->tcntxt, &msg->u.s.tcntxt);
140
141 /*
142 * Post the message to the controller. At some point later it will
143 * return. If we time out before it returns then complete will be zero.
144 */
145 i2o_msg_post(c, m);
146
147 if (!wait->complete) {
148 wait->wq = &wq;
149 /*
150 * we add elements add the head, because if a entry in the list
151 * will never be removed, we have to iterate over it every time
152 */
153 list_add(&wait->list, &i2o_exec_wait_list);
154
155 wait_event_interruptible_timeout(wq, wait->complete,
156 timeout * HZ);
157
158 wait->wq = NULL;
159 }
160
161 barrier();
162
163 if (wait->complete) {
164 if (readl(&wait->msg->body[0]) >> 24)
165 rc = readl(&wait->msg->body[0]) & 0xff;
166 i2o_flush_reply(c, wait->m);
167 i2o_exec_wait_free(wait);
168 } else {
169 /*
170 * We cannot remove it now. This is important. When it does
171 * terminate (which it must do if the controller has not
172 * died...) then it will otherwise scribble on stuff.
173 *
174 * FIXME: try abort message
175 */
176 if (dma)
177 dma->virt = NULL;
178
179 rc = -ETIMEDOUT;
180 }
181
182 return rc;
183};
184
185/**
186 * i2o_msg_post_wait_complete - Reply to a i2o_msg_post request from IOP
187 * @c: I2O controller which answers
188 * @m: message id
189 * @msg: pointer to the I2O reply message
190 *
191 * This function is called in interrupt context only. If the reply reached
192 * before the timeout, the i2o_exec_wait struct is filled with the message
193 * and the task will be waked up. The task is now responsible for returning
194 * the message m back to the controller! If the message reaches us after
195 * the timeout clean up the i2o_exec_wait struct (including allocated
196 * DMA buffer).
197 *
198 * Return 0 on success and if the message m should not be given back to the
199 * I2O controller, or >0 on success and if the message should be given back
200 * afterwords. Returns negative error code on failure. In this case the
201 * message must also be given back to the controller.
202 */
203static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
204 struct i2o_message __iomem *msg)
205{
206 struct i2o_exec_wait *wait, *tmp;
207 static spinlock_t lock;
208 int rc = 1;
209 u32 context;
210
211 spin_lock_init(&lock);
212
213 context = readl(&msg->u.s.tcntxt);
214
215 /*
216 * We need to search through the i2o_exec_wait_list to see if the given
217 * message is still outstanding. If not, it means that the IOP took
218 * longer to respond to the message than we had allowed and timer has
219 * already expired. Not much we can do about that except log it for
220 * debug purposes, increase timeout, and recompile.
221 */
222 spin_lock(&lock);
223 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
224 if (wait->tcntxt == context) {
225 list_del(&wait->list);
226
227 wait->m = m;
228 wait->msg = msg;
229 wait->complete = 1;
230
231 barrier();
232
233 if (wait->wq) {
234 wake_up_interruptible(wait->wq);
235 rc = 0;
236 } else {
237 struct device *dev;
238
239 dev = &c->pdev->dev;
240
241 pr_debug("%s: timedout reply received!\n",
242 c->name);
243 i2o_dma_free(dev, &wait->dma);
244 i2o_exec_wait_free(wait);
245 rc = -1;
246 }
247
248 spin_unlock(&lock);
249
250 return rc;
251 }
252 }
253
254 spin_unlock(&lock);
255
256 pr_debug("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
257 context);
258
259 return -1;
260};
261
262/**
263 * i2o_exec_probe - Called if a new I2O device (executive class) appears
264 * @dev: I2O device which should be probed
265 *
266 * Registers event notification for every event from Executive device. The
267 * return is always 0, because we want all devices of class Executive.
268 *
269 * Returns 0 on success.
270 */
271static int i2o_exec_probe(struct device *dev)
272{
273 struct i2o_device *i2o_dev = to_i2o_device(dev);
274
275 i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
276
277 i2o_dev->iop->exec = i2o_dev;
278
279 return 0;
280};
281
282/**
283 * i2o_exec_remove - Called on I2O device removal
284 * @dev: I2O device which was removed
285 *
286 * Unregisters event notification from Executive I2O device.
287 *
288 * Returns 0 on success.
289 */
290static int i2o_exec_remove(struct device *dev)
291{
292 i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
293
294 return 0;
295};
296
297/**
298 * i2o_exec_lct_modified - Called on LCT NOTIFY reply
299 * @c: I2O controller on which the LCT has modified
300 *
301 * This function handles asynchronus LCT NOTIFY replies. It parses the
302 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
303 * again.
304 */
305static void i2o_exec_lct_modified(struct i2o_controller *c)
306{
307 if (i2o_device_parse_lct(c) == -EAGAIN)
308 i2o_exec_lct_notify(c, 0);
309};
310
311/**
312 * i2o_exec_reply - I2O Executive reply handler
313 * @c: I2O controller from which the reply comes
314 * @m: message id
315 * @msg: pointer to the I2O reply message
316 *
317 * This function is always called from interrupt context. If a POST WAIT
318 * reply was received, pass it to the complete function. If a LCT NOTIFY
319 * reply was received, a new event is created to handle the update.
320 *
321 * Returns 0 on success and if the reply should not be flushed or > 0
322 * on success and if the reply should be flushed. Returns negative error
323 * code on failure and if the reply should be flushed.
324 */
325static int i2o_exec_reply(struct i2o_controller *c, u32 m,
326 struct i2o_message *msg)
327{
328 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { // Fail bit is set
329 struct i2o_message __iomem *pmsg; /* preserved message */
330 u32 pm;
331
332 pm = le32_to_cpu(msg->body[3]);
333
334 pmsg = i2o_msg_in_to_virt(c, pm);
335
336 i2o_report_status(KERN_INFO, "i2o_core", msg);
337
338 /* Release the preserved msg by resubmitting it as a NOP */
339 i2o_msg_nop(c, pm);
340
341 /* If reply to i2o_post_wait failed, return causes a timeout */
342 return -1;
343 }
344
345 if (le32_to_cpu(msg->u.s.tcntxt) & 0x80000000)
346 return i2o_msg_post_wait_complete(c, m, msg);
347
348 if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) {
349 struct work_struct *work;
350
351 pr_debug("%s: LCT notify received\n", c->name);
352
353 work = kmalloc(sizeof(*work), GFP_ATOMIC);
354 if (!work)
355 return -ENOMEM;
356
357 INIT_WORK(work, (void (*)(void *))i2o_exec_lct_modified, c);
358 queue_work(i2o_exec_driver.event_queue, work);
359 return 1;
360 }
361
362 /*
363 * If this happens, we want to dump the message to the syslog so
364 * it can be sent back to the card manufacturer by the end user
365 * to aid in debugging.
366 *
367 */
368 printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
369 "Message dumped to syslog\n", c->name);
370 i2o_dump_message(msg);
371
372 return -EFAULT;
373}
374
375/**
376 * i2o_exec_event - Event handling function
377 * @evt: Event which occurs
378 *
379 * Handles events send by the Executive device. At the moment does not do
380 * anything useful.
381 */
382static void i2o_exec_event(struct i2o_event *evt)
383{
384 osm_info("Event received from device: %d\n",
385 evt->i2o_dev->lct_data.tid);
386 kfree(evt);
387};
388
389/**
390 * i2o_exec_lct_get - Get the IOP's Logical Configuration Table
391 * @c: I2O controller from which the LCT should be fetched
392 *
393 * Send a LCT NOTIFY request to the controller, and wait
394 * I2O_TIMEOUT_LCT_GET seconds until arrival of response. If the LCT is
395 * to large, retry it.
396 *
397 * Returns 0 on success or negative error code on failure.
398 */
399int i2o_exec_lct_get(struct i2o_controller *c)
400{
401 struct i2o_message __iomem *msg;
402 u32 m;
403 int i = 0;
404 int rc = -EAGAIN;
405
406 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
407 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
408 if (m == I2O_QUEUE_EMPTY)
409 return -ETIMEDOUT;
410
411 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
412 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
413 &msg->u.head[1]);
414 writel(0xffffffff, &msg->body[0]);
415 writel(0x00000000, &msg->body[1]);
416 writel(0xd0000000 | c->dlct.len, &msg->body[2]);
417 writel(c->dlct.phys, &msg->body[3]);
418
419 rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET);
420 if (rc < 0)
421 break;
422
423 rc = i2o_device_parse_lct(c);
424 if (rc != -EAGAIN)
425 break;
426 }
427
428 return rc;
429}
430
431/**
432 * i2o_exec_lct_notify - Send a asynchronus LCT NOTIFY request
433 * @c: I2O controller to which the request should be send
434 * @change_ind: change indicator
435 *
436 * This function sends a LCT NOTIFY request to the I2O controller with
437 * the change indicator change_ind. If the change_ind == 0 the controller
438 * replies immediately after the request. If change_ind > 0 the reply is
439 * send after change indicator of the LCT is > change_ind.
440 */
441static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
442{
443 i2o_status_block *sb = c->status_block.virt;
444 struct device *dev;
445 struct i2o_message __iomem *msg;
446 u32 m;
447
448 dev = &c->pdev->dev;
449
450 if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL))
451 return -ENOMEM;
452
453 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
454 if (m == I2O_QUEUE_EMPTY)
455 return -ETIMEDOUT;
456
457 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
458 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
459 &msg->u.head[1]);
460 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
461 writel(0, &msg->u.s.tcntxt); /* FIXME */
462 writel(0xffffffff, &msg->body[0]);
463 writel(change_ind, &msg->body[1]);
464 writel(0xd0000000 | c->dlct.len, &msg->body[2]);
465 writel(c->dlct.phys, &msg->body[3]);
466
467 i2o_msg_post(c, m);
468
469 return 0;
470};
471
472/* Exec OSM driver struct */
473struct i2o_driver i2o_exec_driver = {
474 .name = OSM_NAME,
475 .reply = i2o_exec_reply,
476 .event = i2o_exec_event,
477 .classes = i2o_exec_class_id,
478 .driver = {
479 .probe = i2o_exec_probe,
480 .remove = i2o_exec_remove,
481 },
482};
483
484/**
485 * i2o_exec_init - Registers the Exec OSM
486 *
487 * Registers the Exec OSM in the I2O core.
488 *
489 * Returns 0 on success or negative error code on failure.
490 */
491int __init i2o_exec_init(void)
492{
493 return i2o_driver_register(&i2o_exec_driver);
494};
495
496/**
497 * i2o_exec_exit - Removes the Exec OSM
498 *
499 * Unregisters the Exec OSM from the I2O core.
500 */
501void __exit i2o_exec_exit(void)
502{
503 i2o_driver_unregister(&i2o_exec_driver);
504};
505
506EXPORT_SYMBOL(i2o_msg_post_wait_mem);
507EXPORT_SYMBOL(i2o_exec_lct_get);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
new file mode 100644
index 000000000000..7b74c87b569e
--- /dev/null
+++ b/drivers/message/i2o/i2o_block.c
@@ -0,0 +1,1247 @@
1/*
2 * Block OSM
3 *
4 * Copyright (C) 1999-2002 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * For the purpose of avoiding doubt the preferred form of the work
19 * for making modifications shall be a standards compliant form such
20 * gzipped tar and not one requiring a proprietary or patent encumbered
21 * tool to unpack.
22 *
23 * Fixes/additions:
24 * Steve Ralston:
25 * Multiple device handling error fixes,
26 * Added a queue depth.
27 * Alan Cox:
28 * FC920 has an rmw bug. Dont or in the end marker.
29 * Removed queue walk, fixed for 64bitness.
30 * Rewrote much of the code over time
31 * Added indirect block lists
32 * Handle 64K limits on many controllers
33 * Don't use indirects on the Promise (breaks)
34 * Heavily chop down the queue depths
35 * Deepak Saxena:
36 * Independent queues per IOP
37 * Support for dynamic device creation/deletion
38 * Code cleanup
39 * Support for larger I/Os through merge* functions
40 * (taken from DAC960 driver)
41 * Boji T Kannanthanam:
42 * Set the I2O Block devices to be detected in increasing
43 * order of TIDs during boot.
44 * Search and set the I2O block device that we boot off
45 * from as the first device to be claimed (as /dev/i2o/hda)
46 * Properly attach/detach I2O gendisk structure from the
47 * system gendisk list. The I2O block devices now appear in
48 * /proc/partitions.
49 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
50 * Minor bugfixes for 2.6.
51 */
52
53#include <linux/module.h>
54#include <linux/i2o.h>
55
56#include <linux/mempool.h>
57
58#include <linux/genhd.h>
59#include <linux/blkdev.h>
60#include <linux/hdreg.h>
61
62#include "i2o_block.h"
63
64#define OSM_NAME "block-osm"
65#define OSM_VERSION "$Rev$"
66#define OSM_DESCRIPTION "I2O Block Device OSM"
67
68static struct i2o_driver i2o_block_driver;
69
70/* global Block OSM request mempool */
71static struct i2o_block_mempool i2o_blk_req_pool;
72
73/* Block OSM class handling definition */
74static struct i2o_class_id i2o_block_class_id[] = {
75 {I2O_CLASS_RANDOM_BLOCK_STORAGE},
76 {I2O_CLASS_END}
77};
78
79/**
80 * i2o_block_device_free - free the memory of the I2O Block device
81 * @dev: I2O Block device, which should be cleaned up
82 *
83 * Frees the request queue, gendisk and the i2o_block_device structure.
84 */
85static void i2o_block_device_free(struct i2o_block_device *dev)
86{
87 blk_cleanup_queue(dev->gd->queue);
88
89 put_disk(dev->gd);
90
91 kfree(dev);
92};
93
94/**
95 * i2o_block_remove - remove the I2O Block device from the system again
96 * @dev: I2O Block device which should be removed
97 *
98 * Remove gendisk from system and free all allocated memory.
99 *
100 * Always returns 0.
101 */
102static int i2o_block_remove(struct device *dev)
103{
104 struct i2o_device *i2o_dev = to_i2o_device(dev);
105 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
106
107 osm_info("Device removed %s\n", i2o_blk_dev->gd->disk_name);
108
109 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
110
111 del_gendisk(i2o_blk_dev->gd);
112
113 dev_set_drvdata(dev, NULL);
114
115 i2o_device_claim_release(i2o_dev);
116
117 i2o_block_device_free(i2o_blk_dev);
118
119 return 0;
120};
121
122/**
123 * i2o_block_device flush - Flush all dirty data of I2O device dev
124 * @dev: I2O device which should be flushed
125 *
126 * Flushes all dirty data on device dev.
127 *
128 * Returns 0 on success or negative error code on failure.
129 */
130static int i2o_block_device_flush(struct i2o_device *dev)
131{
132 struct i2o_message __iomem *msg;
133 u32 m;
134
135 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
136 if (m == I2O_QUEUE_EMPTY)
137 return -ETIMEDOUT;
138
139 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
140 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
141 &msg->u.head[1]);
142 writel(60 << 16, &msg->body[0]);
143 osm_debug("Flushing...\n");
144
145 return i2o_msg_post_wait(dev->iop, m, 60);
146};
147
148/**
149 * i2o_block_device_mount - Mount (load) the media of device dev
150 * @dev: I2O device which should receive the mount request
151 * @media_id: Media Identifier
152 *
153 * Load a media into drive. Identifier should be set to -1, because the
154 * spec does not support any other value.
155 *
156 * Returns 0 on success or negative error code on failure.
157 */
158static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
159{
160 struct i2o_message __iomem *msg;
161 u32 m;
162
163 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
164 if (m == I2O_QUEUE_EMPTY)
165 return -ETIMEDOUT;
166
167 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
168 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
169 &msg->u.head[1]);
170 writel(-1, &msg->body[0]);
171 writel(0, &msg->body[1]);
172 osm_debug("Mounting...\n");
173
174 return i2o_msg_post_wait(dev->iop, m, 2);
175};
176
177/**
178 * i2o_block_device_lock - Locks the media of device dev
179 * @dev: I2O device which should receive the lock request
180 * @media_id: Media Identifier
181 *
182 * Lock media of device dev to prevent removal. The media identifier
183 * should be set to -1, because the spec does not support any other value.
184 *
185 * Returns 0 on success or negative error code on failure.
186 */
187static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
188{
189 struct i2o_message __iomem *msg;
190 u32 m;
191
192 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
193 if (m == I2O_QUEUE_EMPTY)
194 return -ETIMEDOUT;
195
196 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
197 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
198 &msg->u.head[1]);
199 writel(-1, &msg->body[0]);
200 osm_debug("Locking...\n");
201
202 return i2o_msg_post_wait(dev->iop, m, 2);
203};
204
205/**
206 * i2o_block_device_unlock - Unlocks the media of device dev
207 * @dev: I2O device which should receive the unlocked request
208 * @media_id: Media Identifier
209 *
210 * Unlocks the media in device dev. The media identifier should be set to
211 * -1, because the spec does not support any other value.
212 *
213 * Returns 0 on success or negative error code on failure.
214 */
215static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
216{
217 struct i2o_message __iomem *msg;
218 u32 m;
219
220 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
221 if (m == I2O_QUEUE_EMPTY)
222 return -ETIMEDOUT;
223
224 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
225 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
226 &msg->u.head[1]);
227 writel(media_id, &msg->body[0]);
228 osm_debug("Unlocking...\n");
229
230 return i2o_msg_post_wait(dev->iop, m, 2);
231};
232
233/**
234 * i2o_block_device_power - Power management for device dev
235 * @dev: I2O device which should receive the power management request
236 * @operation: Operation which should be send
237 *
238 * Send a power management request to the device dev.
239 *
240 * Returns 0 on success or negative error code on failure.
241 */
242static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
243{
244 struct i2o_device *i2o_dev = dev->i2o_dev;
245 struct i2o_controller *c = i2o_dev->iop;
246 struct i2o_message __iomem *msg;
247 u32 m;
248 int rc;
249
250 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
251 if (m == I2O_QUEUE_EMPTY)
252 return -ETIMEDOUT;
253
254 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
255 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
256 tid, &msg->u.head[1]);
257 writel(op << 24, &msg->body[0]);
258 osm_debug("Power...\n");
259
260 rc = i2o_msg_post_wait(c, m, 60);
261 if (!rc)
262 dev->power = op;
263
264 return rc;
265};
266
267/**
268 * i2o_block_request_alloc - Allocate an I2O block request struct
269 *
270 * Allocates an I2O block request struct and initialize the list.
271 *
272 * Returns a i2o_block_request pointer on success or negative error code
273 * on failure.
274 */
275static inline struct i2o_block_request *i2o_block_request_alloc(void)
276{
277 struct i2o_block_request *ireq;
278
279 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
280 if (!ireq)
281 return ERR_PTR(-ENOMEM);
282
283 INIT_LIST_HEAD(&ireq->queue);
284
285 return ireq;
286};
287
288/**
289 * i2o_block_request_free - Frees a I2O block request
290 * @ireq: I2O block request which should be freed
291 *
292 * Fres the allocated memory (give it back to the request mempool).
293 */
294static inline void i2o_block_request_free(struct i2o_block_request *ireq)
295{
296 mempool_free(ireq, i2o_blk_req_pool.pool);
297};
298
299/**
300 * i2o_block_sglist_alloc - Allocate the SG list and map it
301 * @ireq: I2O block request
302 *
303 * Builds the SG list and map it into to be accessable by the controller.
304 *
305 * Returns the number of elements in the SG list or 0 on failure.
306 */
307static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
308{
309 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
310 int nents;
311
312 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
313
314 if (rq_data_dir(ireq->req) == READ)
315 ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
316 else
317 ireq->sg_dma_direction = PCI_DMA_TODEVICE;
318
319 ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
320 ireq->sg_dma_direction);
321
322 return ireq->sg_nents;
323};
324
325/**
326 * i2o_block_sglist_free - Frees the SG list
327 * @ireq: I2O block request from which the SG should be freed
328 *
329 * Frees the SG list from the I2O block request.
330 */
331static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
332{
333 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
334
335 dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
336 ireq->sg_dma_direction);
337};
338
339/**
340 * i2o_block_prep_req_fn - Allocates I2O block device specific struct
341 * @q: request queue for the request
342 * @req: the request to prepare
343 *
344 * Allocate the necessary i2o_block_request struct and connect it to
345 * the request. This is needed that we not loose the SG list later on.
346 *
347 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
348 */
349static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
350{
351 struct i2o_block_device *i2o_blk_dev = q->queuedata;
352 struct i2o_block_request *ireq;
353
354 /* request is already processed by us, so return */
355 if (req->flags & REQ_SPECIAL) {
356 osm_debug("REQ_SPECIAL already set!\n");
357 req->flags |= REQ_DONTPREP;
358 return BLKPREP_OK;
359 }
360
361 /* connect the i2o_block_request to the request */
362 if (!req->special) {
363 ireq = i2o_block_request_alloc();
364 if (unlikely(IS_ERR(ireq))) {
365 osm_debug("unable to allocate i2o_block_request!\n");
366 return BLKPREP_DEFER;
367 }
368
369 ireq->i2o_blk_dev = i2o_blk_dev;
370 req->special = ireq;
371 ireq->req = req;
372 } else
373 ireq = req->special;
374
375 /* do not come back here */
376 req->flags |= REQ_DONTPREP | REQ_SPECIAL;
377
378 return BLKPREP_OK;
379};
380
381/**
382 * i2o_block_delayed_request_fn - delayed request queue function
383 * delayed_request: the delayed request with the queue to start
384 *
385 * If the request queue is stopped for a disk, and there is no open
386 * request, a new event is created, which calls this function to start
387 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
388 * be started again.
389 */
390static void i2o_block_delayed_request_fn(void *delayed_request)
391{
392 struct i2o_block_delayed_request *dreq = delayed_request;
393 struct request_queue *q = dreq->queue;
394 unsigned long flags;
395
396 spin_lock_irqsave(q->queue_lock, flags);
397 blk_start_queue(q);
398 spin_unlock_irqrestore(q->queue_lock, flags);
399 kfree(dreq);
400};
401
402/**
403 * i2o_block_reply - Block OSM reply handler.
404 * @c: I2O controller from which the message arrives
405 * @m: message id of reply
406 * qmsg: the actuall I2O message reply
407 *
408 * This function gets all the message replies.
409 *
410 */
411static int i2o_block_reply(struct i2o_controller *c, u32 m,
412 struct i2o_message *msg)
413{
414 struct i2o_block_request *ireq;
415 struct request *req;
416 struct i2o_block_device *dev;
417 struct request_queue *q;
418 u8 st;
419 unsigned long flags;
420
421 /* FAILed message */
422 if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) {
423 struct i2o_message *pmsg;
424 u32 pm;
425
426 /*
427 * FAILed message from controller
428 * We increment the error count and abort it
429 *
430 * In theory this will never happen. The I2O block class
431 * specification states that block devices never return
432 * FAILs but instead use the REQ status field...but
433 * better be on the safe side since no one really follows
434 * the spec to the book :)
435 */
436 pm = le32_to_cpu(msg->body[3]);
437 pmsg = i2o_msg_in_to_virt(c, pm);
438
439 req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt));
440 if (unlikely(!req)) {
441 osm_err("NULL reply received!\n");
442 return -1;
443 }
444
445 ireq = req->special;
446 dev = ireq->i2o_blk_dev;
447 q = dev->gd->queue;
448
449 req->errors++;
450
451 spin_lock_irqsave(q->queue_lock, flags);
452
453 while (end_that_request_chunk(req, !req->errors,
454 le32_to_cpu(pmsg->body[1]))) ;
455 end_that_request_last(req);
456
457 dev->open_queue_depth--;
458 list_del(&ireq->queue);
459 blk_start_queue(q);
460
461 spin_unlock_irqrestore(q->queue_lock, flags);
462
463 /* Now flush the message by making it a NOP */
464 i2o_msg_nop(c, pm);
465
466 return -1;
467 }
468
469 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
470 if (unlikely(!req)) {
471 osm_err("NULL reply received!\n");
472 return -1;
473 }
474
475 ireq = req->special;
476 dev = ireq->i2o_blk_dev;
477 q = dev->gd->queue;
478
479 if (unlikely(!dev->i2o_dev)) {
480 /*
481 * This is HACK, but Intel Integrated RAID allows user
482 * to delete a volume that is claimed, locked, and in use
483 * by the OS. We have to check for a reply from a
484 * non-existent device and flag it as an error or the system
485 * goes kaput...
486 */
487 req->errors++;
488 osm_warn("Data transfer to deleted device!\n");
489 spin_lock_irqsave(q->queue_lock, flags);
490 while (end_that_request_chunk
491 (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
492 end_that_request_last(req);
493
494 dev->open_queue_depth--;
495 list_del(&ireq->queue);
496 blk_start_queue(q);
497
498 spin_unlock_irqrestore(q->queue_lock, flags);
499 return -1;
500 }
501
502 /*
503 * Lets see what is cooking. We stuffed the
504 * request in the context.
505 */
506
507 st = le32_to_cpu(msg->body[0]) >> 24;
508
509 if (st != 0) {
510 int err;
511 char *bsa_errors[] = {
512 "Success",
513 "Media Error",
514 "Failure communicating to device",
515 "Device Failure",
516 "Device is not ready",
517 "Media not present",
518 "Media is locked by another user",
519 "Media has failed",
520 "Failure communicating to device",
521 "Device bus failure",
522 "Device is locked by another user",
523 "Device is write protected",
524 "Device has reset",
525 "Volume has changed, waiting for acknowledgement"
526 };
527
528 err = le32_to_cpu(msg->body[0]) & 0xffff;
529
530 /*
531 * Device not ready means two things. One is that the
532 * the thing went offline (but not a removal media)
533 *
534 * The second is that you have a SuperTrak 100 and the
535 * firmware got constipated. Unlike standard i2o card
536 * setups the supertrak returns an error rather than
537 * blocking for the timeout in these cases.
538 *
539 * Don't stick a supertrak100 into cache aggressive modes
540 */
541
542 osm_err("block-osm: /dev/%s error: %s", dev->gd->disk_name,
543 bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]);
544 if (le32_to_cpu(msg->body[0]) & 0x00ff0000)
545 printk(KERN_ERR " - DDM attempted %d retries",
546 (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
547 printk(KERN_ERR ".\n");
548 req->errors++;
549 } else
550 req->errors = 0;
551
552 if (!end_that_request_chunk
553 (req, !req->errors, le32_to_cpu(msg->body[1]))) {
554 add_disk_randomness(req->rq_disk);
555 spin_lock_irqsave(q->queue_lock, flags);
556
557 end_that_request_last(req);
558
559 dev->open_queue_depth--;
560 list_del(&ireq->queue);
561 blk_start_queue(q);
562
563 spin_unlock_irqrestore(q->queue_lock, flags);
564
565 i2o_block_sglist_free(ireq);
566 i2o_block_request_free(ireq);
567 } else
568 osm_err("still remaining chunks\n");
569
570 return 1;
571};
572
573static void i2o_block_event(struct i2o_event *evt)
574{
575 osm_info("block-osm: event received\n");
576};
577
578/*
579 * SCSI-CAM for ioctl geometry mapping
580 * Duplicated with SCSI - this should be moved into somewhere common
581 * perhaps genhd ?
582 *
583 * LBA -> CHS mapping table taken from:
584 *
585 * "Incorporating the I2O Architecture into BIOS for Intel Architecture
586 * Platforms"
587 *
588 * This is an I2O document that is only available to I2O members,
589 * not developers.
590 *
591 * From my understanding, this is how all the I2O cards do this
592 *
593 * Disk Size | Sectors | Heads | Cylinders
594 * ---------------+---------+-------+-------------------
595 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
596 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
597 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
598 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
599 *
600 */
601#define BLOCK_SIZE_528M 1081344
602#define BLOCK_SIZE_1G 2097152
603#define BLOCK_SIZE_21G 4403200
604#define BLOCK_SIZE_42G 8806400
605#define BLOCK_SIZE_84G 17612800
606
607static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
608 unsigned char *hds, unsigned char *secs)
609{
610 unsigned long heads, sectors, cylinders;
611
612 sectors = 63L; /* Maximize sectors per track */
613 if (capacity <= BLOCK_SIZE_528M)
614 heads = 16;
615 else if (capacity <= BLOCK_SIZE_1G)
616 heads = 32;
617 else if (capacity <= BLOCK_SIZE_21G)
618 heads = 64;
619 else if (capacity <= BLOCK_SIZE_42G)
620 heads = 128;
621 else
622 heads = 255;
623
624 cylinders = (unsigned long)capacity / (heads * sectors);
625
626 *cyls = (unsigned short)cylinders; /* Stuff return values */
627 *secs = (unsigned char)sectors;
628 *hds = (unsigned char)heads;
629}
630
631/**
632 * i2o_block_open - Open the block device
633 *
634 * Power up the device, mount and lock the media. This function is called,
635 * if the block device is opened for access.
636 *
637 * Returns 0 on success or negative error code on failure.
638 */
639static int i2o_block_open(struct inode *inode, struct file *file)
640{
641 struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
642
643 if (!dev->i2o_dev)
644 return -ENODEV;
645
646 if (dev->power > 0x1f)
647 i2o_block_device_power(dev, 0x02);
648
649 i2o_block_device_mount(dev->i2o_dev, -1);
650
651 i2o_block_device_lock(dev->i2o_dev, -1);
652
653 osm_debug("Ready.\n");
654
655 return 0;
656};
657
658/**
659 * i2o_block_release - Release the I2O block device
660 *
661 * Unlock and unmount the media, and power down the device. Gets called if
662 * the block device is closed.
663 *
664 * Returns 0 on success or negative error code on failure.
665 */
666static int i2o_block_release(struct inode *inode, struct file *file)
667{
668 struct gendisk *disk = inode->i_bdev->bd_disk;
669 struct i2o_block_device *dev = disk->private_data;
670 u8 operation;
671
672 /*
673 * This is to deail with the case of an application
674 * opening a device and then the device dissapears while
675 * it's in use, and then the application tries to release
676 * it. ex: Unmounting a deleted RAID volume at reboot.
677 * If we send messages, it will just cause FAILs since
678 * the TID no longer exists.
679 */
680 if (!dev->i2o_dev)
681 return 0;
682
683 i2o_block_device_flush(dev->i2o_dev);
684
685 i2o_block_device_unlock(dev->i2o_dev, -1);
686
687 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
688 operation = 0x21;
689 else
690 operation = 0x24;
691
692 i2o_block_device_power(dev, operation);
693
694 return 0;
695}
696
697/**
698 * i2o_block_ioctl - Issue device specific ioctl calls.
699 * @cmd: ioctl command
700 * @arg: arg
701 *
702 * Handles ioctl request for the block device.
703 *
704 * Return 0 on success or negative error on failure.
705 */
706static int i2o_block_ioctl(struct inode *inode, struct file *file,
707 unsigned int cmd, unsigned long arg)
708{
709 struct gendisk *disk = inode->i_bdev->bd_disk;
710 struct i2o_block_device *dev = disk->private_data;
711 void __user *argp = (void __user *)arg;
712
713 /* Anyone capable of this syscall can do *real bad* things */
714
715 if (!capable(CAP_SYS_ADMIN))
716 return -EPERM;
717
718 switch (cmd) {
719 case HDIO_GETGEO:
720 {
721 struct hd_geometry g;
722 i2o_block_biosparam(get_capacity(disk),
723 &g.cylinders, &g.heads, &g.sectors);
724 g.start = get_start_sect(inode->i_bdev);
725 return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
726 }
727
728 case BLKI2OGRSTRAT:
729 return put_user(dev->rcache, (int __user *)arg);
730 case BLKI2OGWSTRAT:
731 return put_user(dev->wcache, (int __user *)arg);
732 case BLKI2OSRSTRAT:
733 if (arg < 0 || arg > CACHE_SMARTFETCH)
734 return -EINVAL;
735 dev->rcache = arg;
736 break;
737 case BLKI2OSWSTRAT:
738 if (arg != 0
739 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
740 return -EINVAL;
741 dev->wcache = arg;
742 break;
743 }
744 return -ENOTTY;
745};
746
747/**
748 * i2o_block_media_changed - Have we seen a media change?
749 * @disk: gendisk which should be verified
750 *
751 * Verifies if the media has changed.
752 *
753 * Returns 1 if the media was changed or 0 otherwise.
754 */
755static int i2o_block_media_changed(struct gendisk *disk)
756{
757 struct i2o_block_device *p = disk->private_data;
758
759 if (p->media_change_flag) {
760 p->media_change_flag = 0;
761 return 1;
762 }
763 return 0;
764}
765
766/**
767 * i2o_block_transfer - Transfer a request to/from the I2O controller
768 * @req: the request which should be transfered
769 *
770 * This function converts the request into a I2O message. The necessary
771 * DMA buffers are allocated and after everything is setup post the message
772 * to the I2O controller. No cleanup is done by this function. It is done
773 * on the interrupt side when the reply arrives.
774 *
775 * Return 0 on success or negative error code on failure.
776 */
777static int i2o_block_transfer(struct request *req)
778{
779 struct i2o_block_device *dev = req->rq_disk->private_data;
780 struct i2o_controller *c = dev->i2o_dev->iop;
781 int tid = dev->i2o_dev->lct_data.tid;
782 struct i2o_message __iomem *msg;
783 void __iomem *mptr;
784 struct i2o_block_request *ireq = req->special;
785 struct scatterlist *sg;
786 int sgnum;
787 int i;
788 u32 m;
789 u32 tcntxt;
790 u32 sg_flags;
791 int rc;
792
793 m = i2o_msg_get(c, &msg);
794 if (m == I2O_QUEUE_EMPTY) {
795 rc = -EBUSY;
796 goto exit;
797 }
798
799 tcntxt = i2o_cntxt_list_add(c, req);
800 if (!tcntxt) {
801 rc = -ENOMEM;
802 goto nop_msg;
803 }
804
805 if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
806 rc = -ENOMEM;
807 goto context_remove;
808 }
809
810 /* Build the message based on the request. */
811 writel(i2o_block_driver.context, &msg->u.s.icntxt);
812 writel(tcntxt, &msg->u.s.tcntxt);
813 writel(req->nr_sectors << 9, &msg->body[1]);
814
815 writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
816 writel(req->sector >> 23, &msg->body[3]);
817
818 mptr = &msg->body[4];
819
820 sg = ireq->sg_table;
821
822 if (rq_data_dir(req) == READ) {
823 writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
824 &msg->u.head[1]);
825 sg_flags = 0x10000000;
826 switch (dev->rcache) {
827 case CACHE_NULL:
828 writel(0, &msg->body[0]);
829 break;
830 case CACHE_PREFETCH:
831 writel(0x201F0008, &msg->body[0]);
832 break;
833 case CACHE_SMARTFETCH:
834 if (req->nr_sectors > 16)
835 writel(0x201F0008, &msg->body[0]);
836 else
837 writel(0x001F0000, &msg->body[0]);
838 break;
839 }
840 } else {
841 writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
842 &msg->u.head[1]);
843 sg_flags = 0x14000000;
844 switch (dev->wcache) {
845 case CACHE_NULL:
846 writel(0, &msg->body[0]);
847 break;
848 case CACHE_WRITETHROUGH:
849 writel(0x001F0008, &msg->body[0]);
850 break;
851 case CACHE_WRITEBACK:
852 writel(0x001F0010, &msg->body[0]);
853 break;
854 case CACHE_SMARTBACK:
855 if (req->nr_sectors > 16)
856 writel(0x001F0004, &msg->body[0]);
857 else
858 writel(0x001F0010, &msg->body[0]);
859 break;
860 case CACHE_SMARTTHROUGH:
861 if (req->nr_sectors > 16)
862 writel(0x001F0004, &msg->body[0]);
863 else
864 writel(0x001F0010, &msg->body[0]);
865 }
866 }
867
868 for (i = sgnum; i > 0; i--) {
869 if (i == 1)
870 sg_flags |= 0x80000000;
871 writel(sg_flags | sg_dma_len(sg), mptr);
872 writel(sg_dma_address(sg), mptr + 4);
873 mptr += 8;
874 sg++;
875 }
876
877 writel(I2O_MESSAGE_SIZE
878 (((unsigned long)mptr -
879 (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
880 &msg->u.head[0]);
881
882 list_add_tail(&ireq->queue, &dev->open_queue);
883 dev->open_queue_depth++;
884
885 i2o_msg_post(c, m);
886
887 return 0;
888
889 context_remove:
890 i2o_cntxt_list_remove(c, req);
891
892 nop_msg:
893 i2o_msg_nop(c, m);
894
895 exit:
896 return rc;
897};
898
899/**
900 * i2o_block_request_fn - request queue handling function
901 * q: request queue from which the request could be fetched
902 *
903 * Takes the next request from the queue, transfers it and if no error
904 * occurs dequeue it from the queue. On arrival of the reply the message
905 * will be processed further. If an error occurs requeue the request.
906 */
907static void i2o_block_request_fn(struct request_queue *q)
908{
909 struct request *req;
910
911 while (!blk_queue_plugged(q)) {
912 req = elv_next_request(q);
913 if (!req)
914 break;
915
916 if (blk_fs_request(req)) {
917 struct i2o_block_delayed_request *dreq;
918 struct i2o_block_request *ireq = req->special;
919 unsigned int queue_depth;
920
921 queue_depth = ireq->i2o_blk_dev->open_queue_depth;
922
923 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
924 if (!i2o_block_transfer(req)) {
925 blkdev_dequeue_request(req);
926 continue;
927 }
928
929 if (queue_depth)
930 break;
931
932 /* stop the queue and retry later */
933 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
934 if (!dreq)
935 continue;
936
937 dreq->queue = q;
938 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
939 dreq);
940
941 osm_info("transfer error\n");
942 if (!queue_delayed_work(i2o_block_driver.event_queue,
943 &dreq->work,
944 I2O_BLOCK_RETRY_TIME))
945 kfree(dreq);
946 else {
947 blk_stop_queue(q);
948 break;
949 }
950 } else
951 end_request(req, 0);
952 }
953};
954
955/* I2O Block device operations definition */
956static struct block_device_operations i2o_block_fops = {
957 .owner = THIS_MODULE,
958 .open = i2o_block_open,
959 .release = i2o_block_release,
960 .ioctl = i2o_block_ioctl,
961 .media_changed = i2o_block_media_changed
962};
963
964/**
965 * i2o_block_device_alloc - Allocate memory for a I2O Block device
966 *
967 * Allocate memory for the i2o_block_device struct, gendisk and request
968 * queue and initialize them as far as no additional information is needed.
969 *
970 * Returns a pointer to the allocated I2O Block device on succes or a
971 * negative error code on failure.
972 */
973static struct i2o_block_device *i2o_block_device_alloc(void)
974{
975 struct i2o_block_device *dev;
976 struct gendisk *gd;
977 struct request_queue *queue;
978 int rc;
979
980 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
981 if (!dev) {
982 osm_err("Insufficient memory to allocate I2O Block disk.\n");
983 rc = -ENOMEM;
984 goto exit;
985 }
986 memset(dev, 0, sizeof(*dev));
987
988 INIT_LIST_HEAD(&dev->open_queue);
989 spin_lock_init(&dev->lock);
990 dev->rcache = CACHE_PREFETCH;
991 dev->wcache = CACHE_WRITEBACK;
992
993 /* allocate a gendisk with 16 partitions */
994 gd = alloc_disk(16);
995 if (!gd) {
996 osm_err("Insufficient memory to allocate gendisk.\n");
997 rc = -ENOMEM;
998 goto cleanup_dev;
999 }
1000
1001 /* initialize the request queue */
1002 queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
1003 if (!queue) {
1004 osm_err("Insufficient memory to allocate request queue.\n");
1005 rc = -ENOMEM;
1006 goto cleanup_queue;
1007 }
1008
1009 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1010
1011 gd->major = I2O_MAJOR;
1012 gd->queue = queue;
1013 gd->fops = &i2o_block_fops;
1014 gd->private_data = dev;
1015
1016 dev->gd = gd;
1017
1018 return dev;
1019
1020 cleanup_queue:
1021 put_disk(gd);
1022
1023 cleanup_dev:
1024 kfree(dev);
1025
1026 exit:
1027 return ERR_PTR(rc);
1028};
1029
1030/**
1031 * i2o_block_probe - verify if dev is a I2O Block device and install it
1032 * @dev: device to verify if it is a I2O Block device
1033 *
1034 * We only verify if the user_tid of the device is 0xfff and then install
1035 * the device. Otherwise it is used by some other device (e. g. RAID).
1036 *
1037 * Returns 0 on success or negative error code on failure.
1038 */
1039static int i2o_block_probe(struct device *dev)
1040{
1041 struct i2o_device *i2o_dev = to_i2o_device(dev);
1042 struct i2o_block_device *i2o_blk_dev;
1043 struct i2o_controller *c = i2o_dev->iop;
1044 struct gendisk *gd;
1045 struct request_queue *queue;
1046 static int unit = 0;
1047 int rc;
1048 u64 size;
1049 u32 blocksize;
1050 u16 power;
1051 u32 flags, status;
1052 int segments;
1053
1054 /* skip devices which are used by IOP */
1055 if (i2o_dev->lct_data.user_tid != 0xfff) {
1056 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1057 return -ENODEV;
1058 }
1059
1060 osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid);
1061
1062 if (i2o_device_claim(i2o_dev)) {
1063 osm_warn("Unable to claim device. Installation aborted\n");
1064 rc = -EFAULT;
1065 goto exit;
1066 }
1067
1068 i2o_blk_dev = i2o_block_device_alloc();
1069 if (IS_ERR(i2o_blk_dev)) {
1070 osm_err("could not alloc a new I2O block device");
1071 rc = PTR_ERR(i2o_blk_dev);
1072 goto claim_release;
1073 }
1074
1075 i2o_blk_dev->i2o_dev = i2o_dev;
1076 dev_set_drvdata(dev, i2o_blk_dev);
1077
1078 /* setup gendisk */
1079 gd = i2o_blk_dev->gd;
1080 gd->first_minor = unit << 4;
1081 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1082 sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);
1083 gd->driverfs_dev = &i2o_dev->device;
1084
1085 /* setup request queue */
1086 queue = gd->queue;
1087 queue->queuedata = i2o_blk_dev;
1088
1089 blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
1090 blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
1091
1092 if (c->short_req)
1093 segments = 8;
1094 else {
1095 i2o_status_block *sb;
1096
1097 sb = c->status_block.virt;
1098
1099 segments = (sb->inbound_frame_size -
1100 sizeof(struct i2o_message) / 4 - 4) / 2;
1101 }
1102
1103 blk_queue_max_hw_segments(queue, segments);
1104
1105 osm_debug("max sectors = %d\n", I2O_MAX_SECTORS);
1106 osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS);
1107 osm_debug("hw segments = %d\n", segments);
1108
1109 /*
1110 * Ask for the current media data. If that isn't supported
1111 * then we ask for the device capacity data
1112 */
1113 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0
1114 || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) {
1115 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4);
1116 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8);
1117 }
1118 osm_debug("blocksize = %d\n", blocksize);
1119
1120 if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1121 power = 0;
1122 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1123 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1124
1125 set_capacity(gd, size >> 9);
1126
1127 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1128
1129 add_disk(gd);
1130
1131 unit++;
1132
1133 return 0;
1134
1135 claim_release:
1136 i2o_device_claim_release(i2o_dev);
1137
1138 exit:
1139 return rc;
1140};
1141
1142/* Block OSM driver struct */
1143static struct i2o_driver i2o_block_driver = {
1144 .name = OSM_NAME,
1145 .event = i2o_block_event,
1146 .reply = i2o_block_reply,
1147 .classes = i2o_block_class_id,
1148 .driver = {
1149 .probe = i2o_block_probe,
1150 .remove = i2o_block_remove,
1151 },
1152};
1153
1154/**
1155 * i2o_block_init - Block OSM initialization function
1156 *
1157 * Allocate the slab and mempool for request structs, registers i2o_block
1158 * block device and finally register the Block OSM in the I2O core.
1159 *
1160 * Returns 0 on success or negative error code on failure.
1161 */
1162static int __init i2o_block_init(void)
1163{
1164 int rc;
1165 int size;
1166
1167 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1168
1169 /* Allocate request mempool and slab */
1170 size = sizeof(struct i2o_block_request);
1171 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1172 SLAB_HWCACHE_ALIGN, NULL,
1173 NULL);
1174 if (!i2o_blk_req_pool.slab) {
1175 osm_err("can't init request slab\n");
1176 rc = -ENOMEM;
1177 goto exit;
1178 }
1179
1180 i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
1181 mempool_alloc_slab,
1182 mempool_free_slab,
1183 i2o_blk_req_pool.slab);
1184 if (!i2o_blk_req_pool.pool) {
1185 osm_err("can't init request mempool\n");
1186 rc = -ENOMEM;
1187 goto free_slab;
1188 }
1189
1190 /* Register the block device interfaces */
1191 rc = register_blkdev(I2O_MAJOR, "i2o_block");
1192 if (rc) {
1193 osm_err("unable to register block device\n");
1194 goto free_mempool;
1195 }
1196#ifdef MODULE
1197 osm_info("registered device at major %d\n", I2O_MAJOR);
1198#endif
1199
1200 /* Register Block OSM into I2O core */
1201 rc = i2o_driver_register(&i2o_block_driver);
1202 if (rc) {
1203 osm_err("Could not register Block driver\n");
1204 goto unregister_blkdev;
1205 }
1206
1207 return 0;
1208
1209 unregister_blkdev:
1210 unregister_blkdev(I2O_MAJOR, "i2o_block");
1211
1212 free_mempool:
1213 mempool_destroy(i2o_blk_req_pool.pool);
1214
1215 free_slab:
1216 kmem_cache_destroy(i2o_blk_req_pool.slab);
1217
1218 exit:
1219 return rc;
1220};
1221
1222/**
1223 * i2o_block_exit - Block OSM exit function
1224 *
1225 * Unregisters Block OSM from I2O core, unregisters i2o_block block device
1226 * and frees the mempool and slab.
1227 */
1228static void __exit i2o_block_exit(void)
1229{
1230 /* Unregister I2O Block OSM from I2O core */
1231 i2o_driver_unregister(&i2o_block_driver);
1232
1233 /* Unregister block device */
1234 unregister_blkdev(I2O_MAJOR, "i2o_block");
1235
1236 /* Free request mempool and slab */
1237 mempool_destroy(i2o_blk_req_pool.pool);
1238 kmem_cache_destroy(i2o_blk_req_pool.slab);
1239};
1240
1241MODULE_AUTHOR("Red Hat");
1242MODULE_LICENSE("GPL");
1243MODULE_DESCRIPTION(OSM_DESCRIPTION);
1244MODULE_VERSION(OSM_VERSION);
1245
1246module_init(i2o_block_init);
1247module_exit(i2o_block_exit);
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
new file mode 100644
index 000000000000..ddd9a15679c0
--- /dev/null
+++ b/drivers/message/i2o/i2o_block.h
@@ -0,0 +1,99 @@
1/*
2 * Block OSM structures/API
3 *
4 * Copyright (C) 1999-2002 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * For the purpose of avoiding doubt the preferred form of the work
19 * for making modifications shall be a standards compliant form such
20 * gzipped tar and not one requiring a proprietary or patent encumbered
21 * tool to unpack.
22 *
23 * Fixes/additions:
24 * Steve Ralston:
25 * Multiple device handling error fixes,
26 * Added a queue depth.
27 * Alan Cox:
28 * FC920 has an rmw bug. Dont or in the end marker.
29 * Removed queue walk, fixed for 64bitness.
30 * Rewrote much of the code over time
31 * Added indirect block lists
32 * Handle 64K limits on many controllers
33 * Don't use indirects on the Promise (breaks)
34 * Heavily chop down the queue depths
35 * Deepak Saxena:
36 * Independent queues per IOP
37 * Support for dynamic device creation/deletion
38 * Code cleanup
39 * Support for larger I/Os through merge* functions
40 * (taken from DAC960 driver)
41 * Boji T Kannanthanam:
42 * Set the I2O Block devices to be detected in increasing
43 * order of TIDs during boot.
44 * Search and set the I2O block device that we boot off
45 * from as the first device to be claimed (as /dev/i2o/hda)
46 * Properly attach/detach I2O gendisk structure from the
47 * system gendisk list. The I2O block devices now appear in
48 * /proc/partitions.
49 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
50 * Minor bugfixes for 2.6.
51 */
52
53#ifndef I2O_BLOCK_OSM_H
54#define I2O_BLOCK_OSM_H
55
56#define I2O_BLOCK_RETRY_TIME HZ/4
57#define I2O_BLOCK_MAX_OPEN_REQUESTS 50
58
59/* I2O Block OSM mempool struct */
60struct i2o_block_mempool {
61 kmem_cache_t *slab;
62 mempool_t *pool;
63};
64
65/* I2O Block device descriptor */
66struct i2o_block_device {
67 struct i2o_device *i2o_dev; /* pointer to I2O device */
68 struct gendisk *gd;
69 spinlock_t lock; /* queue lock */
70 struct list_head open_queue; /* list of transfered, but unfinished
71 requests */
72 unsigned int open_queue_depth; /* number of requests in the queue */
73
74 int rcache; /* read cache flags */
75 int wcache; /* write cache flags */
76 int flags;
77 int power; /* power state */
78 int media_change_flag; /* media changed flag */
79};
80
81/* I2O Block device request */
82struct i2o_block_request
83{
84 struct list_head queue;
85 struct request *req; /* corresponding request */
86 struct i2o_block_device *i2o_blk_dev; /* I2O block device */
87 int sg_dma_direction; /* direction of DMA buffer read/write */
88 int sg_nents; /* number of SG elements */
89 struct scatterlist sg_table[I2O_MAX_SEGMENTS]; /* SG table */
90};
91
92/* I2O Block device delayed request */
93struct i2o_block_delayed_request
94{
95 struct work_struct work;
96 struct request_queue *queue;
97};
98
99#endif
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
new file mode 100644
index 000000000000..5fc5004ea07a
--- /dev/null
+++ b/drivers/message/i2o/i2o_config.c
@@ -0,0 +1,1160 @@
1/*
2 * I2O Configuration Interface Driver
3 *
4 * (C) Copyright 1999-2002 Red Hat
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * Fixes/additions:
9 * Deepak Saxena (04/20/1999):
10 * Added basic ioctl() support
11 * Deepak Saxena (06/07/1999):
12 * Added software download ioctl (still testing)
13 * Auvo Häkkinen (09/10/1999):
14 * Changes to i2o_cfg_reply(), ioctl_parms()
15 * Added ioct_validate()
16 * Taneli Vähäkangas (09/30/1999):
17 * Fixed ioctl_swdl()
18 * Taneli Vähäkangas (10/04/1999):
19 * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
20 * Deepak Saxena (11/18/1999):
21 * Added event managmenet support
22 * Alan Cox <alan@redhat.com>:
23 * 2.4 rewrite ported to 2.5
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Added pass-thru support for Adaptec's raidutils
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 */
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/pci.h>
36#include <linux/i2o.h>
37#include <linux/errno.h>
38#include <linux/init.h>
39#include <linux/slab.h>
40#include <linux/miscdevice.h>
41#include <linux/mm.h>
42#include <linux/spinlock.h>
43#include <linux/smp_lock.h>
44#include <linux/ioctl32.h>
45#include <linux/compat.h>
46#include <linux/syscalls.h>
47
48#include <asm/uaccess.h>
49#include <asm/io.h>
50
51#define OSM_NAME "config-osm"
52#define OSM_VERSION "$Rev$"
53#define OSM_DESCRIPTION "I2O Configuration OSM"
54
55extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
56
57static spinlock_t i2o_config_lock;
58
59#define MODINC(x,y) ((x) = ((x) + 1) % (y))
60
61struct sg_simple_element {
62 u32 flag_count;
63 u32 addr_bus;
64};
65
66struct i2o_cfg_info {
67 struct file *fp;
68 struct fasync_struct *fasync;
69 struct i2o_evt_info event_q[I2O_EVT_Q_LEN];
70 u16 q_in; // Queue head index
71 u16 q_out; // Queue tail index
72 u16 q_len; // Queue length
73 u16 q_lost; // Number of lost events
74 ulong q_id; // Event queue ID...used as tx_context
75 struct i2o_cfg_info *next;
76};
77static struct i2o_cfg_info *open_files = NULL;
78static ulong i2o_cfg_info_id = 0;
79
80/*
81 * Each of these describes an i2o message handler. They are
82 * multiplexed by the i2o_core code
83 */
84
85static struct i2o_driver i2o_config_driver = {
86 .name = OSM_NAME
87};
88
89static int i2o_cfg_getiops(unsigned long arg)
90{
91 struct i2o_controller *c;
92 u8 __user *user_iop_table = (void __user *)arg;
93 u8 tmp[MAX_I2O_CONTROLLERS];
94 int ret = 0;
95
96 memset(tmp, 0, MAX_I2O_CONTROLLERS);
97
98 list_for_each_entry(c, &i2o_controllers, list)
99 tmp[c->unit] = 1;
100
101 if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS))
102 ret = -EFAULT;
103
104 return ret;
105};
106
107static int i2o_cfg_gethrt(unsigned long arg)
108{
109 struct i2o_controller *c;
110 struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
111 struct i2o_cmd_hrtlct kcmd;
112 i2o_hrt *hrt;
113 int len;
114 u32 reslen;
115 int ret = 0;
116
117 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
118 return -EFAULT;
119
120 if (get_user(reslen, kcmd.reslen) < 0)
121 return -EFAULT;
122
123 if (kcmd.resbuf == NULL)
124 return -EFAULT;
125
126 c = i2o_find_iop(kcmd.iop);
127 if (!c)
128 return -ENXIO;
129
130 hrt = (i2o_hrt *) c->hrt.virt;
131
132 len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
133
134 /* We did a get user...so assuming mem is ok...is this bad? */
135 put_user(len, kcmd.reslen);
136 if (len > reslen)
137 ret = -ENOBUFS;
138 if (copy_to_user(kcmd.resbuf, (void *)hrt, len))
139 ret = -EFAULT;
140
141 return ret;
142};
143
144static int i2o_cfg_getlct(unsigned long arg)
145{
146 struct i2o_controller *c;
147 struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
148 struct i2o_cmd_hrtlct kcmd;
149 i2o_lct *lct;
150 int len;
151 int ret = 0;
152 u32 reslen;
153
154 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
155 return -EFAULT;
156
157 if (get_user(reslen, kcmd.reslen) < 0)
158 return -EFAULT;
159
160 if (kcmd.resbuf == NULL)
161 return -EFAULT;
162
163 c = i2o_find_iop(kcmd.iop);
164 if (!c)
165 return -ENXIO;
166
167 lct = (i2o_lct *) c->lct;
168
169 len = (unsigned int)lct->table_size << 2;
170 put_user(len, kcmd.reslen);
171 if (len > reslen)
172 ret = -ENOBUFS;
173 else if (copy_to_user(kcmd.resbuf, lct, len))
174 ret = -EFAULT;
175
176 return ret;
177};
178
179static int i2o_cfg_parms(unsigned long arg, unsigned int type)
180{
181 int ret = 0;
182 struct i2o_controller *c;
183 struct i2o_device *dev;
184 struct i2o_cmd_psetget __user *cmd =
185 (struct i2o_cmd_psetget __user *)arg;
186 struct i2o_cmd_psetget kcmd;
187 u32 reslen;
188 u8 *ops;
189 u8 *res;
190 int len = 0;
191
192 u32 i2o_cmd = (type == I2OPARMGET ?
193 I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET);
194
195 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
196 return -EFAULT;
197
198 if (get_user(reslen, kcmd.reslen))
199 return -EFAULT;
200
201 c = i2o_find_iop(kcmd.iop);
202 if (!c)
203 return -ENXIO;
204
205 dev = i2o_iop_find_device(c, kcmd.tid);
206 if (!dev)
207 return -ENXIO;
208
209 ops = (u8 *) kmalloc(kcmd.oplen, GFP_KERNEL);
210 if (!ops)
211 return -ENOMEM;
212
213 if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
214 kfree(ops);
215 return -EFAULT;
216 }
217
218 /*
219 * It's possible to have a _very_ large table
220 * and that the user asks for all of it at once...
221 */
222 res = (u8 *) kmalloc(65536, GFP_KERNEL);
223 if (!res) {
224 kfree(ops);
225 return -ENOMEM;
226 }
227
228 len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536);
229 kfree(ops);
230
231 if (len < 0) {
232 kfree(res);
233 return -EAGAIN;
234 }
235
236 put_user(len, kcmd.reslen);
237 if (len > reslen)
238 ret = -ENOBUFS;
239 else if (copy_to_user(kcmd.resbuf, res, len))
240 ret = -EFAULT;
241
242 kfree(res);
243
244 return ret;
245};
246
247static int i2o_cfg_swdl(unsigned long arg)
248{
249 struct i2o_sw_xfer kxfer;
250 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
251 unsigned char maxfrag = 0, curfrag = 1;
252 struct i2o_dma buffer;
253 struct i2o_message __iomem *msg;
254 u32 m;
255 unsigned int status = 0, swlen = 0, fragsize = 8192;
256 struct i2o_controller *c;
257
258 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
259 return -EFAULT;
260
261 if (get_user(swlen, kxfer.swlen) < 0)
262 return -EFAULT;
263
264 if (get_user(maxfrag, kxfer.maxfrag) < 0)
265 return -EFAULT;
266
267 if (get_user(curfrag, kxfer.curfrag) < 0)
268 return -EFAULT;
269
270 if (curfrag == maxfrag)
271 fragsize = swlen - (maxfrag - 1) * 8192;
272
273 if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
274 return -EFAULT;
275
276 c = i2o_find_iop(kxfer.iop);
277 if (!c)
278 return -ENXIO;
279
280 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
281 if (m == I2O_QUEUE_EMPTY)
282 return -EBUSY;
283
284 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
285 i2o_msg_nop(c, m);
286 return -ENOMEM;
287 }
288
289 __copy_from_user(buffer.virt, kxfer.buf, fragsize);
290
291 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
292 writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
293 &msg->u.head[1]);
294 writel(i2o_config_driver.context, &msg->u.head[2]);
295 writel(0, &msg->u.head[3]);
296 writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) |
297 (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]);
298 writel(swlen, &msg->body[1]);
299 writel(kxfer.sw_id, &msg->body[2]);
300 writel(0xD0000000 | fragsize, &msg->body[3]);
301 writel(buffer.phys, &msg->body[4]);
302
303 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
304 status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
305
306 if (status != -ETIMEDOUT)
307 i2o_dma_free(&c->pdev->dev, &buffer);
308
309 if (status != I2O_POST_WAIT_OK) {
310 // it fails if you try and send frags out of order
311 // and for some yet unknown reasons too
312 osm_info("swdl failed, DetailedStatus = %d\n", status);
313 return status;
314 }
315
316 return 0;
317};
318
319static int i2o_cfg_swul(unsigned long arg)
320{
321 struct i2o_sw_xfer kxfer;
322 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
323 unsigned char maxfrag = 0, curfrag = 1;
324 struct i2o_dma buffer;
325 struct i2o_message __iomem *msg;
326 u32 m;
327 unsigned int status = 0, swlen = 0, fragsize = 8192;
328 struct i2o_controller *c;
329 int ret = 0;
330
331 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
332 goto return_fault;
333
334 if (get_user(swlen, kxfer.swlen) < 0)
335 goto return_fault;
336
337 if (get_user(maxfrag, kxfer.maxfrag) < 0)
338 goto return_fault;
339
340 if (get_user(curfrag, kxfer.curfrag) < 0)
341 goto return_fault;
342
343 if (curfrag == maxfrag)
344 fragsize = swlen - (maxfrag - 1) * 8192;
345
346 if (!kxfer.buf)
347 goto return_fault;
348
349 c = i2o_find_iop(kxfer.iop);
350 if (!c)
351 return -ENXIO;
352
353 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
354 if (m == I2O_QUEUE_EMPTY)
355 return -EBUSY;
356
357 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
358 i2o_msg_nop(c, m);
359 return -ENOMEM;
360 }
361
362 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
363 writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
364 &msg->u.head[1]);
365 writel(i2o_config_driver.context, &msg->u.head[2]);
366 writel(0, &msg->u.head[3]);
367 writel((u32) kxfer.flags << 24 | (u32) kxfer.
368 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag,
369 &msg->body[0]);
370 writel(swlen, &msg->body[1]);
371 writel(kxfer.sw_id, &msg->body[2]);
372 writel(0xD0000000 | fragsize, &msg->body[3]);
373 writel(buffer.phys, &msg->body[4]);
374
375 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
376 status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
377
378 if (status != I2O_POST_WAIT_OK) {
379 if (status != -ETIMEDOUT)
380 i2o_dma_free(&c->pdev->dev, &buffer);
381
382 osm_info("swul failed, DetailedStatus = %d\n", status);
383 return status;
384 }
385
386 if (copy_to_user(kxfer.buf, buffer.virt, fragsize))
387 ret = -EFAULT;
388
389 i2o_dma_free(&c->pdev->dev, &buffer);
390
391return_ret:
392 return ret;
393return_fault:
394 ret = -EFAULT;
395 goto return_ret;
396};
397
398static int i2o_cfg_swdel(unsigned long arg)
399{
400 struct i2o_controller *c;
401 struct i2o_sw_xfer kxfer;
402 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
403 struct i2o_message __iomem *msg;
404 u32 m;
405 unsigned int swlen;
406 int token;
407
408 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
409 return -EFAULT;
410
411 if (get_user(swlen, kxfer.swlen) < 0)
412 return -EFAULT;
413
414 c = i2o_find_iop(kxfer.iop);
415 if (!c)
416 return -ENXIO;
417
418 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
419 if (m == I2O_QUEUE_EMPTY)
420 return -EBUSY;
421
422 writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
423 writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID,
424 &msg->u.head[1]);
425 writel(i2o_config_driver.context, &msg->u.head[2]);
426 writel(0, &msg->u.head[3]);
427 writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16,
428 &msg->body[0]);
429 writel(swlen, &msg->body[1]);
430 writel(kxfer.sw_id, &msg->body[2]);
431
432 token = i2o_msg_post_wait(c, m, 10);
433
434 if (token != I2O_POST_WAIT_OK) {
435 osm_info("swdel failed, DetailedStatus = %d\n", token);
436 return -ETIMEDOUT;
437 }
438
439 return 0;
440};
441
442static int i2o_cfg_validate(unsigned long arg)
443{
444 int token;
445 int iop = (int)arg;
446 struct i2o_message __iomem *msg;
447 u32 m;
448 struct i2o_controller *c;
449
450 c = i2o_find_iop(iop);
451 if (!c)
452 return -ENXIO;
453
454 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
455 if (m == I2O_QUEUE_EMPTY)
456 return -EBUSY;
457
458 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
459 writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop,
460 &msg->u.head[1]);
461 writel(i2o_config_driver.context, &msg->u.head[2]);
462 writel(0, &msg->u.head[3]);
463
464 token = i2o_msg_post_wait(c, m, 10);
465
466 if (token != I2O_POST_WAIT_OK) {
467 osm_info("Can't validate configuration, ErrorStatus = %d\n",
468 token);
469 return -ETIMEDOUT;
470 }
471
472 return 0;
473};
474
475static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
476{
477 struct i2o_message __iomem *msg;
478 u32 m;
479 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
480 struct i2o_evt_id kdesc;
481 struct i2o_controller *c;
482 struct i2o_device *d;
483
484 if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
485 return -EFAULT;
486
487 /* IOP exists? */
488 c = i2o_find_iop(kdesc.iop);
489 if (!c)
490 return -ENXIO;
491
492 /* Device exists? */
493 d = i2o_iop_find_device(c, kdesc.tid);
494 if (!d)
495 return -ENODEV;
496
497 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
498 if (m == I2O_QUEUE_EMPTY)
499 return -EBUSY;
500
501 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
502 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid,
503 &msg->u.head[1]);
504 writel(i2o_config_driver.context, &msg->u.head[2]);
505 writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]);
506 writel(kdesc.evt_mask, &msg->body[0]);
507
508 i2o_msg_post(c, m);
509
510 return 0;
511}
512
513static int i2o_cfg_evt_get(unsigned long arg, struct file *fp)
514{
515 struct i2o_cfg_info *p = NULL;
516 struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg;
517 struct i2o_evt_get kget;
518 unsigned long flags;
519
520 for (p = open_files; p; p = p->next)
521 if (p->q_id == (ulong) fp->private_data)
522 break;
523
524 if (!p->q_len)
525 return -ENOENT;
526
527 memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
528 MODINC(p->q_out, I2O_EVT_Q_LEN);
529 spin_lock_irqsave(&i2o_config_lock, flags);
530 p->q_len--;
531 kget.pending = p->q_len;
532 kget.lost = p->q_lost;
533 spin_unlock_irqrestore(&i2o_config_lock, flags);
534
535 if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
536 return -EFAULT;
537 return 0;
538}
539
540#ifdef CONFIG_COMPAT
541static int i2o_cfg_passthru32(unsigned fd, unsigned cmnd, unsigned long arg,
542 struct file *file)
543{
544 struct i2o_cmd_passthru32 __user *cmd;
545 struct i2o_controller *c;
546 u32 __user *user_msg;
547 u32 *reply = NULL;
548 u32 __user *user_reply = NULL;
549 u32 size = 0;
550 u32 reply_size = 0;
551 u32 rcode = 0;
552 struct i2o_dma sg_list[SG_TABLESIZE];
553 u32 sg_offset = 0;
554 u32 sg_count = 0;
555 u32 i = 0;
556 i2o_status_block *sb;
557 struct i2o_message *msg;
558 u32 m;
559 unsigned int iop;
560
561 cmd = (struct i2o_cmd_passthru32 __user *)arg;
562
563 if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg))
564 return -EFAULT;
565
566 user_msg = compat_ptr(i);
567
568 c = i2o_find_iop(iop);
569 if (!c) {
570 osm_debug("controller %d not found\n", iop);
571 return -ENXIO;
572 }
573
574 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
575
576 sb = c->status_block.virt;
577
578 if (get_user(size, &user_msg[0])) {
579 osm_warn("unable to get size!\n");
580 return -EFAULT;
581 }
582 size = size >> 16;
583
584 if (size > sb->inbound_frame_size) {
585 osm_warn("size of message > inbound_frame_size");
586 return -EFAULT;
587 }
588
589 user_reply = &user_msg[size];
590
591 size <<= 2; // Convert to bytes
592
593 /* Copy in the user's I2O command */
594 if (copy_from_user(msg, user_msg, size)) {
595 osm_warn("unable to copy user message\n");
596 return -EFAULT;
597 }
598 i2o_dump_message(msg);
599
600 if (get_user(reply_size, &user_reply[0]) < 0)
601 return -EFAULT;
602
603 reply_size >>= 16;
604 reply_size <<= 2;
605
606 reply = kmalloc(reply_size, GFP_KERNEL);
607 if (!reply) {
608 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
609 c->name);
610 return -ENOMEM;
611 }
612 memset(reply, 0, reply_size);
613
614 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
615
616 writel(i2o_config_driver.context, &msg->u.s.icntxt);
617 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
618
619 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
620 if (sg_offset) {
621 struct sg_simple_element *sg;
622
623 if (sg_offset * 4 >= size) {
624 rcode = -EFAULT;
625 goto cleanup;
626 }
627 // TODO 64bit fix
628 sg = (struct sg_simple_element *)((&msg->u.head[0]) +
629 sg_offset);
630 sg_count =
631 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
632 if (sg_count > SG_TABLESIZE) {
633 printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
634 c->name, sg_count);
635 kfree(reply);
636 return -EINVAL;
637 }
638
639 for (i = 0; i < sg_count; i++) {
640 int sg_size;
641 struct i2o_dma *p;
642
643 if (!(sg[i].flag_count & 0x10000000
644 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
645 printk(KERN_DEBUG
646 "%s:Bad SG element %d - not simple (%x)\n",
647 c->name, i, sg[i].flag_count);
648 rcode = -EINVAL;
649 goto cleanup;
650 }
651 sg_size = sg[i].flag_count & 0xffffff;
652 p = &(sg_list[i]);
653 /* Allocate memory for the transfer */
654 if (i2o_dma_alloc
655 (&c->pdev->dev, p, sg_size,
656 PCI_DMA_BIDIRECTIONAL)) {
657 printk(KERN_DEBUG
658 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
659 c->name, sg_size, i, sg_count);
660 rcode = -ENOMEM;
661 goto cleanup;
662 }
663 /* Copy in the user's SG buffer if necessary */
664 if (sg[i].
665 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
666 // TODO 64bit fix
667 if (copy_from_user
668 (p->virt, (void __user *)(unsigned long)sg[i].addr_bus,
669 sg_size)) {
670 printk(KERN_DEBUG
671 "%s: Could not copy SG buf %d FROM user\n",
672 c->name, i);
673 rcode = -EFAULT;
674 goto cleanup;
675 }
676 }
677 //TODO 64bit fix
678 sg[i].addr_bus = (u32) p->phys;
679 }
680 }
681
682 rcode = i2o_msg_post_wait(c, m, 60);
683 if (rcode)
684 goto cleanup;
685
686 if (sg_offset) {
687 u32 msg[128];
688 /* Copy back the Scatter Gather buffers back to user space */
689 u32 j;
690 // TODO 64bit fix
691 struct sg_simple_element *sg;
692 int sg_size;
693
694 // re-acquire the original message to handle correctly the sg copy operation
695 memset(&msg, 0, MSG_FRAME_SIZE * 4);
696 // get user msg size in u32s
697 if (get_user(size, &user_msg[0])) {
698 rcode = -EFAULT;
699 goto cleanup;
700 }
701 size = size >> 16;
702 size *= 4;
703 /* Copy in the user's I2O command */
704 if (copy_from_user(msg, user_msg, size)) {
705 rcode = -EFAULT;
706 goto cleanup;
707 }
708 sg_count =
709 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
710
711 // TODO 64bit fix
712 sg = (struct sg_simple_element *)(msg + sg_offset);
713 for (j = 0; j < sg_count; j++) {
714 /* Copy out the SG list to user's buffer if necessary */
715 if (!
716 (sg[j].
717 flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
718 sg_size = sg[j].flag_count & 0xffffff;
719 // TODO 64bit fix
720 if (copy_to_user
721 ((void __user *)(u64) sg[j].addr_bus,
722 sg_list[j].virt, sg_size)) {
723 printk(KERN_WARNING
724 "%s: Could not copy %p TO user %x\n",
725 c->name, sg_list[j].virt,
726 sg[j].addr_bus);
727 rcode = -EFAULT;
728 goto cleanup;
729 }
730 }
731 }
732 }
733
734 /* Copy back the reply to user space */
735 if (reply_size) {
736 // we wrote our own values for context - now restore the user supplied ones
737 if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
738 printk(KERN_WARNING
739 "%s: Could not copy message context FROM user\n",
740 c->name);
741 rcode = -EFAULT;
742 }
743 if (copy_to_user(user_reply, reply, reply_size)) {
744 printk(KERN_WARNING
745 "%s: Could not copy reply TO user\n", c->name);
746 rcode = -EFAULT;
747 }
748 }
749
750 cleanup:
751 kfree(reply);
752 return rcode;
753}
754
755#else
756
757static int i2o_cfg_passthru(unsigned long arg)
758{
759 struct i2o_cmd_passthru __user *cmd =
760 (struct i2o_cmd_passthru __user *)arg;
761 struct i2o_controller *c;
762 u32 __user *user_msg;
763 u32 *reply = NULL;
764 u32 __user *user_reply = NULL;
765 u32 size = 0;
766 u32 reply_size = 0;
767 u32 rcode = 0;
768 void *sg_list[SG_TABLESIZE];
769 u32 sg_offset = 0;
770 u32 sg_count = 0;
771 int sg_index = 0;
772 u32 i = 0;
773 void *p = NULL;
774 i2o_status_block *sb;
775 struct i2o_message __iomem *msg;
776 u32 m;
777 unsigned int iop;
778
779 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
780 return -EFAULT;
781
782 c = i2o_find_iop(iop);
783 if (!c) {
784 osm_warn("controller %d not found\n", iop);
785 return -ENXIO;
786 }
787
788 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
789
790 sb = c->status_block.virt;
791
792 if (get_user(size, &user_msg[0]))
793 return -EFAULT;
794 size = size >> 16;
795
796 if (size > sb->inbound_frame_size) {
797 osm_warn("size of message > inbound_frame_size");
798 return -EFAULT;
799 }
800
801 user_reply = &user_msg[size];
802
803 size <<= 2; // Convert to bytes
804
805 /* Copy in the user's I2O command */
806 if (copy_from_user(msg, user_msg, size))
807 return -EFAULT;
808
809 if (get_user(reply_size, &user_reply[0]) < 0)
810 return -EFAULT;
811
812 reply_size >>= 16;
813 reply_size <<= 2;
814
815 reply = kmalloc(reply_size, GFP_KERNEL);
816 if (!reply) {
817 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
818 c->name);
819 return -ENOMEM;
820 }
821 memset(reply, 0, reply_size);
822
823 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
824
825 writel(i2o_config_driver.context, &msg->u.s.icntxt);
826 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
827
828 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
829 if (sg_offset) {
830 struct sg_simple_element *sg;
831
832 if (sg_offset * 4 >= size) {
833 rcode = -EFAULT;
834 goto cleanup;
835 }
836 // TODO 64bit fix
837 sg = (struct sg_simple_element *)((&msg->u.head[0]) +
838 sg_offset);
839 sg_count =
840 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
841 if (sg_count > SG_TABLESIZE) {
842 printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
843 c->name, sg_count);
844 kfree(reply);
845 return -EINVAL;
846 }
847
848 for (i = 0; i < sg_count; i++) {
849 int sg_size;
850
851 if (!(sg[i].flag_count & 0x10000000
852 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
853 printk(KERN_DEBUG
854 "%s:Bad SG element %d - not simple (%x)\n",
855 c->name, i, sg[i].flag_count);
856 rcode = -EINVAL;
857 goto cleanup;
858 }
859 sg_size = sg[i].flag_count & 0xffffff;
860 /* Allocate memory for the transfer */
861 p = kmalloc(sg_size, GFP_KERNEL);
862 if (!p) {
863 printk(KERN_DEBUG
864 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
865 c->name, sg_size, i, sg_count);
866 rcode = -ENOMEM;
867 goto cleanup;
868 }
869 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
870 /* Copy in the user's SG buffer if necessary */
871 if (sg[i].
872 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
873 // TODO 64bit fix
874 if (copy_from_user
875 (p, (void __user *)sg[i].addr_bus,
876 sg_size)) {
877 printk(KERN_DEBUG
878 "%s: Could not copy SG buf %d FROM user\n",
879 c->name, i);
880 rcode = -EFAULT;
881 goto cleanup;
882 }
883 }
884 //TODO 64bit fix
885 sg[i].addr_bus = virt_to_bus(p);
886 }
887 }
888
889 rcode = i2o_msg_post_wait(c, m, 60);
890 if (rcode)
891 goto cleanup;
892
893 if (sg_offset) {
894 u32 msg[128];
895 /* Copy back the Scatter Gather buffers back to user space */
896 u32 j;
897 // TODO 64bit fix
898 struct sg_simple_element *sg;
899 int sg_size;
900
901 // re-acquire the original message to handle correctly the sg copy operation
902 memset(&msg, 0, MSG_FRAME_SIZE * 4);
903 // get user msg size in u32s
904 if (get_user(size, &user_msg[0])) {
905 rcode = -EFAULT;
906 goto cleanup;
907 }
908 size = size >> 16;
909 size *= 4;
910 /* Copy in the user's I2O command */
911 if (copy_from_user(msg, user_msg, size)) {
912 rcode = -EFAULT;
913 goto cleanup;
914 }
915 sg_count =
916 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
917
918 // TODO 64bit fix
919 sg = (struct sg_simple_element *)(msg + sg_offset);
920 for (j = 0; j < sg_count; j++) {
921 /* Copy out the SG list to user's buffer if necessary */
922 if (!
923 (sg[j].
924 flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
925 sg_size = sg[j].flag_count & 0xffffff;
926 // TODO 64bit fix
927 if (copy_to_user
928 ((void __user *)sg[j].addr_bus, sg_list[j],
929 sg_size)) {
930 printk(KERN_WARNING
931 "%s: Could not copy %p TO user %x\n",
932 c->name, sg_list[j],
933 sg[j].addr_bus);
934 rcode = -EFAULT;
935 goto cleanup;
936 }
937 }
938 }
939 }
940
941 /* Copy back the reply to user space */
942 if (reply_size) {
943 // we wrote our own values for context - now restore the user supplied ones
944 if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
945 printk(KERN_WARNING
946 "%s: Could not copy message context FROM user\n",
947 c->name);
948 rcode = -EFAULT;
949 }
950 if (copy_to_user(user_reply, reply, reply_size)) {
951 printk(KERN_WARNING
952 "%s: Could not copy reply TO user\n", c->name);
953 rcode = -EFAULT;
954 }
955 }
956
957 cleanup:
958 kfree(reply);
959 return rcode;
960}
961#endif
962
963/*
964 * IOCTL Handler
965 */
966static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
967 unsigned long arg)
968{
969 int ret;
970
971 switch (cmd) {
972 case I2OGETIOPS:
973 ret = i2o_cfg_getiops(arg);
974 break;
975
976 case I2OHRTGET:
977 ret = i2o_cfg_gethrt(arg);
978 break;
979
980 case I2OLCTGET:
981 ret = i2o_cfg_getlct(arg);
982 break;
983
984 case I2OPARMSET:
985 ret = i2o_cfg_parms(arg, I2OPARMSET);
986 break;
987
988 case I2OPARMGET:
989 ret = i2o_cfg_parms(arg, I2OPARMGET);
990 break;
991
992 case I2OSWDL:
993 ret = i2o_cfg_swdl(arg);
994 break;
995
996 case I2OSWUL:
997 ret = i2o_cfg_swul(arg);
998 break;
999
1000 case I2OSWDEL:
1001 ret = i2o_cfg_swdel(arg);
1002 break;
1003
1004 case I2OVALIDATE:
1005 ret = i2o_cfg_validate(arg);
1006 break;
1007
1008 case I2OEVTREG:
1009 ret = i2o_cfg_evt_reg(arg, fp);
1010 break;
1011
1012 case I2OEVTGET:
1013 ret = i2o_cfg_evt_get(arg, fp);
1014 break;
1015
1016#ifndef CONFIG_COMPAT
1017 case I2OPASSTHRU:
1018 ret = i2o_cfg_passthru(arg);
1019 break;
1020#endif
1021
1022 default:
1023 osm_debug("unknown ioctl called!\n");
1024 ret = -EINVAL;
1025 }
1026
1027 return ret;
1028}
1029
1030static int cfg_open(struct inode *inode, struct file *file)
1031{
1032 struct i2o_cfg_info *tmp =
1033 (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info),
1034 GFP_KERNEL);
1035 unsigned long flags;
1036
1037 if (!tmp)
1038 return -ENOMEM;
1039
1040 file->private_data = (void *)(i2o_cfg_info_id++);
1041 tmp->fp = file;
1042 tmp->fasync = NULL;
1043 tmp->q_id = (ulong) file->private_data;
1044 tmp->q_len = 0;
1045 tmp->q_in = 0;
1046 tmp->q_out = 0;
1047 tmp->q_lost = 0;
1048 tmp->next = open_files;
1049
1050 spin_lock_irqsave(&i2o_config_lock, flags);
1051 open_files = tmp;
1052 spin_unlock_irqrestore(&i2o_config_lock, flags);
1053
1054 return 0;
1055}
1056
1057static int cfg_fasync(int fd, struct file *fp, int on)
1058{
1059 ulong id = (ulong) fp->private_data;
1060 struct i2o_cfg_info *p;
1061
1062 for (p = open_files; p; p = p->next)
1063 if (p->q_id == id)
1064 break;
1065
1066 if (!p)
1067 return -EBADF;
1068
1069 return fasync_helper(fd, fp, on, &p->fasync);
1070}
1071
1072static int cfg_release(struct inode *inode, struct file *file)
1073{
1074 ulong id = (ulong) file->private_data;
1075 struct i2o_cfg_info *p1, *p2;
1076 unsigned long flags;
1077
1078 lock_kernel();
1079 p1 = p2 = NULL;
1080
1081 spin_lock_irqsave(&i2o_config_lock, flags);
1082 for (p1 = open_files; p1;) {
1083 if (p1->q_id == id) {
1084
1085 if (p1->fasync)
1086 cfg_fasync(-1, file, 0);
1087 if (p2)
1088 p2->next = p1->next;
1089 else
1090 open_files = p1->next;
1091
1092 kfree(p1);
1093 break;
1094 }
1095 p2 = p1;
1096 p1 = p1->next;
1097 }
1098 spin_unlock_irqrestore(&i2o_config_lock, flags);
1099 unlock_kernel();
1100
1101 return 0;
1102}
1103
1104static struct file_operations config_fops = {
1105 .owner = THIS_MODULE,
1106 .llseek = no_llseek,
1107 .ioctl = i2o_cfg_ioctl,
1108 .open = cfg_open,
1109 .release = cfg_release,
1110 .fasync = cfg_fasync,
1111};
1112
1113static struct miscdevice i2o_miscdev = {
1114 I2O_MINOR,
1115 "i2octl",
1116 &config_fops
1117};
1118
1119static int __init i2o_config_init(void)
1120{
1121 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1122
1123 spin_lock_init(&i2o_config_lock);
1124
1125 if (misc_register(&i2o_miscdev) < 0) {
1126 osm_err("can't register device.\n");
1127 return -EBUSY;
1128 }
1129 /*
1130 * Install our handler
1131 */
1132 if (i2o_driver_register(&i2o_config_driver)) {
1133 osm_err("handler register failed.\n");
1134 misc_deregister(&i2o_miscdev);
1135 return -EBUSY;
1136 }
1137#ifdef CONFIG_COMPAT
1138 register_ioctl32_conversion(I2OPASSTHRU32, i2o_cfg_passthru32);
1139 register_ioctl32_conversion(I2OGETIOPS, (void *)sys_ioctl);
1140#endif
1141 return 0;
1142}
1143
1144static void i2o_config_exit(void)
1145{
1146#ifdef CONFIG_COMPAT
1147 unregister_ioctl32_conversion(I2OPASSTHRU32);
1148 unregister_ioctl32_conversion(I2OGETIOPS);
1149#endif
1150 misc_deregister(&i2o_miscdev);
1151 i2o_driver_unregister(&i2o_config_driver);
1152}
1153
1154MODULE_AUTHOR("Red Hat Software");
1155MODULE_LICENSE("GPL");
1156MODULE_DESCRIPTION(OSM_DESCRIPTION);
1157MODULE_VERSION(OSM_VERSION);
1158
1159module_init(i2o_config_init);
1160module_exit(i2o_config_exit);
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h
new file mode 100644
index 000000000000..561d63304d7e
--- /dev/null
+++ b/drivers/message/i2o/i2o_lan.h
@@ -0,0 +1,159 @@
1/*
2 * i2o_lan.h I2O LAN Class definitions
3 *
4 * I2O LAN CLASS OSM May 26th 2000
5 *
6 * (C) Copyright 1999, 2000 University of Helsinki,
7 * Department of Computer Science
8 *
9 * This code is still under development / test.
10 *
11 * Author: Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
12 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
13 * Taneli Vähäkangas <Taneli.Vahakangas@cs.Helsinki.FI>
14 */
15
16#ifndef _I2O_LAN_H
17#define _I2O_LAN_H
18
19/* Default values for tunable parameters first */
20
21#define I2O_LAN_MAX_BUCKETS_OUT 96
22#define I2O_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */
23#define I2O_LAN_RX_COPYBREAK 200
24#define I2O_LAN_TX_TIMEOUT (1*HZ)
25#define I2O_LAN_TX_BATCH_MODE 2 /* 2=automatic, 1=on, 0=off */
26#define I2O_LAN_EVENT_MASK 0 /* 0=None, 0xFFC00002=All */
27
28/* LAN types */
29#define I2O_LAN_ETHERNET 0x0030
30#define I2O_LAN_100VG 0x0040
31#define I2O_LAN_TR 0x0050
32#define I2O_LAN_FDDI 0x0060
33#define I2O_LAN_FIBRE_CHANNEL 0x0070
34#define I2O_LAN_UNKNOWN 0x00000000
35
36/* Connector types */
37
38/* Ethernet */
39#define I2O_LAN_AUI (I2O_LAN_ETHERNET << 4) + 0x00000001
40#define I2O_LAN_10BASE5 (I2O_LAN_ETHERNET << 4) + 0x00000002
41#define I2O_LAN_FIORL (I2O_LAN_ETHERNET << 4) + 0x00000003
42#define I2O_LAN_10BASE2 (I2O_LAN_ETHERNET << 4) + 0x00000004
43#define I2O_LAN_10BROAD36 (I2O_LAN_ETHERNET << 4) + 0x00000005
44#define I2O_LAN_10BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000006
45#define I2O_LAN_10BASE_FP (I2O_LAN_ETHERNET << 4) + 0x00000007
46#define I2O_LAN_10BASE_FB (I2O_LAN_ETHERNET << 4) + 0x00000008
47#define I2O_LAN_10BASE_FL (I2O_LAN_ETHERNET << 4) + 0x00000009
48#define I2O_LAN_100BASE_TX (I2O_LAN_ETHERNET << 4) + 0x0000000A
49#define I2O_LAN_100BASE_FX (I2O_LAN_ETHERNET << 4) + 0x0000000B
50#define I2O_LAN_100BASE_T4 (I2O_LAN_ETHERNET << 4) + 0x0000000C
51#define I2O_LAN_1000BASE_SX (I2O_LAN_ETHERNET << 4) + 0x0000000D
52#define I2O_LAN_1000BASE_LX (I2O_LAN_ETHERNET << 4) + 0x0000000E
53#define I2O_LAN_1000BASE_CX (I2O_LAN_ETHERNET << 4) + 0x0000000F
54#define I2O_LAN_1000BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000010
55
56/* AnyLAN */
57#define I2O_LAN_100VG_ETHERNET (I2O_LAN_100VG << 4) + 0x00000001
58#define I2O_LAN_100VG_TR (I2O_LAN_100VG << 4) + 0x00000002
59
60/* Token Ring */
61#define I2O_LAN_4MBIT (I2O_LAN_TR << 4) + 0x00000001
62#define I2O_LAN_16MBIT (I2O_LAN_TR << 4) + 0x00000002
63
64/* FDDI */
65#define I2O_LAN_125MBAUD (I2O_LAN_FDDI << 4) + 0x00000001
66
67/* Fibre Channel */
68#define I2O_LAN_POINT_POINT (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000001
69#define I2O_LAN_ARB_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000002
70#define I2O_LAN_PUBLIC_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000003
71#define I2O_LAN_FABRIC (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000004
72
73#define I2O_LAN_EMULATION 0x00000F00
74#define I2O_LAN_OTHER 0x00000F01
75#define I2O_LAN_DEFAULT 0xFFFFFFFF
76
77/* LAN class functions */
78
79#define LAN_PACKET_SEND 0x3B
80#define LAN_SDU_SEND 0x3D
81#define LAN_RECEIVE_POST 0x3E
82#define LAN_RESET 0x35
83#define LAN_SUSPEND 0x37
84
85/* LAN DetailedStatusCode defines */
86#define I2O_LAN_DSC_SUCCESS 0x00
87#define I2O_LAN_DSC_DEVICE_FAILURE 0x01
88#define I2O_LAN_DSC_DESTINATION_NOT_FOUND 0x02
89#define I2O_LAN_DSC_TRANSMIT_ERROR 0x03
90#define I2O_LAN_DSC_TRANSMIT_ABORTED 0x04
91#define I2O_LAN_DSC_RECEIVE_ERROR 0x05
92#define I2O_LAN_DSC_RECEIVE_ABORTED 0x06
93#define I2O_LAN_DSC_DMA_ERROR 0x07
94#define I2O_LAN_DSC_BAD_PACKET_DETECTED 0x08
95#define I2O_LAN_DSC_OUT_OF_MEMORY 0x09
96#define I2O_LAN_DSC_BUCKET_OVERRUN 0x0A
97#define I2O_LAN_DSC_IOP_INTERNAL_ERROR 0x0B
98#define I2O_LAN_DSC_CANCELED 0x0C
99#define I2O_LAN_DSC_INVALID_TRANSACTION_CONTEXT 0x0D
100#define I2O_LAN_DSC_DEST_ADDRESS_DETECTED 0x0E
101#define I2O_LAN_DSC_DEST_ADDRESS_OMITTED 0x0F
102#define I2O_LAN_DSC_PARTIAL_PACKET_RETURNED 0x10
103#define I2O_LAN_DSC_SUSPENDED 0x11
104
105struct i2o_packet_info {
106 u32 offset : 24;
107 u32 flags : 8;
108 u32 len : 24;
109 u32 status : 8;
110};
111
112struct i2o_bucket_descriptor {
113 u32 context; /* FIXME: 64bit support */
114 struct i2o_packet_info packet_info[1];
115};
116
117/* Event Indicator Mask Flags for LAN OSM */
118
119#define I2O_LAN_EVT_LINK_DOWN 0x01
120#define I2O_LAN_EVT_LINK_UP 0x02
121#define I2O_LAN_EVT_MEDIA_CHANGE 0x04
122
123#include <linux/netdevice.h>
124#include <linux/fddidevice.h>
125
126struct i2o_lan_local {
127 u8 unit;
128 struct i2o_device *i2o_dev;
129
130 struct fddi_statistics stats; /* see also struct net_device_stats */
131 unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
132 atomic_t buckets_out; /* nbr of unused buckets on DDM */
133 atomic_t tx_out; /* outstanding TXes */
134 u8 tx_count; /* packets in one TX message frame */
135 u16 tx_max_out; /* DDM's Tx queue len */
136 u8 sgl_max; /* max SGLs in one message frame */
137 u32 m; /* IOP address of the batch msg frame */
138
139 struct work_struct i2o_batch_send_task;
140 int send_active;
141 struct sk_buff **i2o_fbl; /* Free bucket list (to reuse skbs) */
142 int i2o_fbl_tail;
143 spinlock_t fbl_lock;
144
145 spinlock_t tx_lock;
146
147 u32 max_size_mc_table; /* max number of multicast addresses */
148
149 /* LAN OSM configurable parameters are here: */
150
151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */
152 u16 bucket_thresh; /* send more when this many used */
153 u16 rx_copybreak;
154
155 u8 tx_batch_mode; /* Set when using batch mode sends */
156 u32 i2o_event_mask; /* To turn on interesting event flags */
157};
158
159#endif /* _I2O_LAN_H */
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
new file mode 100644
index 000000000000..b176d0eeff7f
--- /dev/null
+++ b/drivers/message/i2o/i2o_proc.c
@@ -0,0 +1,2112 @@
1/*
2 * procfs handler for Linux I2O subsystem
3 *
4 * (c) Copyright 1999 Deepak Saxena
5 *
6 * Originally written by Deepak Saxena(deepak@plexity.net)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This is an initial test release. The code is based on the design of the
14 * ide procfs system (drivers/block/ide-proc.c). Some code taken from
15 * i2o-core module by Alan Cox.
16 *
17 * DISCLAIMER: This code is still under development/test and may cause
18 * your system to behave unpredictably. Use at your own discretion.
19 *
20 *
21 * Fixes/additions:
22 * Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI),
23 * Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI)
24 * University of Helsinki, Department of Computer Science
25 * LAN entries
26 * Markus Lidel <Markus.Lidel@shadowconnect.com>
27 * Changes for new I2O API
28 */
29
30#define OSM_NAME "proc-osm"
31#define OSM_VERSION "$Rev$"
32#define OSM_DESCRIPTION "I2O ProcFS OSM"
33
34#define I2O_MAX_MODULES 4
35// FIXME!
36#define FMT_U64_HEX "0x%08x%08x"
37#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64))
38
39#include <linux/types.h>
40#include <linux/kernel.h>
41#include <linux/pci.h>
42#include <linux/i2o.h>
43#include <linux/proc_fs.h>
44#include <linux/seq_file.h>
45#include <linux/init.h>
46#include <linux/module.h>
47#include <linux/errno.h>
48#include <linux/spinlock.h>
49#include <linux/workqueue.h>
50
51#include <asm/io.h>
52#include <asm/uaccess.h>
53#include <asm/byteorder.h>
54
55/* Structure used to define /proc entries */
56typedef struct _i2o_proc_entry_t {
57 char *name; /* entry name */
58 mode_t mode; /* mode */
59 struct file_operations *fops; /* open function */
60} i2o_proc_entry;
61
62/* global I2O /proc/i2o entry */
63static struct proc_dir_entry *i2o_proc_dir_root;
64
65/* proc OSM driver struct */
66static struct i2o_driver i2o_proc_driver = {
67 .name = OSM_NAME,
68};
69
70static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
71{
72 int i;
73
74 /* 19990419 -sralston
75 * The I2O v1.5 (and v2.0 so far) "official specification"
76 * got serial numbers WRONG!
77 * Apparently, and despite what Section 3.4.4 says and
78 * Figure 3-35 shows (pg 3-39 in the pdf doc),
79 * the convention / consensus seems to be:
80 * + First byte is SNFormat
81 * + Second byte is SNLen (but only if SNFormat==7 (?))
82 * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format
83 */
84 switch (serialno[0]) {
85 case I2O_SNFORMAT_BINARY: /* Binary */
86 seq_printf(seq, "0x");
87 for (i = 0; i < serialno[1]; i++) {
88 seq_printf(seq, "%02X", serialno[2 + i]);
89 }
90 break;
91
92 case I2O_SNFORMAT_ASCII: /* ASCII */
93 if (serialno[1] < ' ') { /* printable or SNLen? */
94 /* sanity */
95 max_len =
96 (max_len < serialno[1]) ? max_len : serialno[1];
97 serialno[1 + max_len] = '\0';
98
99 /* just print it */
100 seq_printf(seq, "%s", &serialno[2]);
101 } else {
102 /* print chars for specified length */
103 for (i = 0; i < serialno[1]; i++) {
104 seq_printf(seq, "%c", serialno[2 + i]);
105 }
106 }
107 break;
108
109 case I2O_SNFORMAT_UNICODE: /* UNICODE */
110 seq_printf(seq, "UNICODE Format. Can't Display\n");
111 break;
112
113 case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
114 seq_printf(seq,
115 "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X",
116 serialno[2], serialno[3],
117 serialno[4], serialno[5], serialno[6], serialno[7]);
118 break;
119
120 case I2O_SNFORMAT_WAN: /* WAN MAC Address */
121 /* FIXME: Figure out what a WAN access address looks like?? */
122 seq_printf(seq, "WAN Access Address");
123 break;
124
125/* plus new in v2.0 */
126 case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
127 /* FIXME: Figure out what a LAN-64 address really looks like?? */
128 seq_printf(seq,
129 "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X",
130 serialno[8], serialno[9],
131 serialno[2], serialno[3],
132 serialno[4], serialno[5], serialno[6], serialno[7]);
133 break;
134
135 case I2O_SNFORMAT_DDM: /* I2O DDM */
136 seq_printf(seq,
137 "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh",
138 *(u16 *) & serialno[2],
139 *(u16 *) & serialno[4], *(u16 *) & serialno[6]);
140 break;
141
142 case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */
143 case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */
144 /* FIXME: Figure if this is even close?? */
145 seq_printf(seq,
146 "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n",
147 *(u32 *) & serialno[2],
148 *(u32 *) & serialno[6],
149 *(u32 *) & serialno[10], *(u32 *) & serialno[14]);
150 break;
151
152 case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */
153 case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */
154 default:
155 seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]);
156 break;
157 }
158
159 return 0;
160}
161
162/**
163 * i2o_get_class_name - do i2o class name lookup
164 * @class: class number
165 *
166 * Return a descriptive string for an i2o class
167 */
168static const char *i2o_get_class_name(int class)
169{
170 int idx = 16;
171 static char *i2o_class_name[] = {
172 "Executive",
173 "Device Driver Module",
174 "Block Device",
175 "Tape Device",
176 "LAN Interface",
177 "WAN Interface",
178 "Fibre Channel Port",
179 "Fibre Channel Device",
180 "SCSI Device",
181 "ATE Port",
182 "ATE Device",
183 "Floppy Controller",
184 "Floppy Device",
185 "Secondary Bus Port",
186 "Peer Transport Agent",
187 "Peer Transport",
188 "Unknown"
189 };
190
191 switch (class & 0xfff) {
192 case I2O_CLASS_EXECUTIVE:
193 idx = 0;
194 break;
195 case I2O_CLASS_DDM:
196 idx = 1;
197 break;
198 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
199 idx = 2;
200 break;
201 case I2O_CLASS_SEQUENTIAL_STORAGE:
202 idx = 3;
203 break;
204 case I2O_CLASS_LAN:
205 idx = 4;
206 break;
207 case I2O_CLASS_WAN:
208 idx = 5;
209 break;
210 case I2O_CLASS_FIBRE_CHANNEL_PORT:
211 idx = 6;
212 break;
213 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
214 idx = 7;
215 break;
216 case I2O_CLASS_SCSI_PERIPHERAL:
217 idx = 8;
218 break;
219 case I2O_CLASS_ATE_PORT:
220 idx = 9;
221 break;
222 case I2O_CLASS_ATE_PERIPHERAL:
223 idx = 10;
224 break;
225 case I2O_CLASS_FLOPPY_CONTROLLER:
226 idx = 11;
227 break;
228 case I2O_CLASS_FLOPPY_DEVICE:
229 idx = 12;
230 break;
231 case I2O_CLASS_BUS_ADAPTER_PORT:
232 idx = 13;
233 break;
234 case I2O_CLASS_PEER_TRANSPORT_AGENT:
235 idx = 14;
236 break;
237 case I2O_CLASS_PEER_TRANSPORT:
238 idx = 15;
239 break;
240 }
241
242 return i2o_class_name[idx];
243}
244
245#define SCSI_TABLE_SIZE 13
246static char *scsi_devices[] = {
247 "Direct-Access Read/Write",
248 "Sequential-Access Storage",
249 "Printer",
250 "Processor",
251 "WORM Device",
252 "CD-ROM Device",
253 "Scanner Device",
254 "Optical Memory Device",
255 "Medium Changer Device",
256 "Communications Device",
257 "Graphics Art Pre-Press Device",
258 "Graphics Art Pre-Press Device",
259 "Array Controller Device"
260};
261
262static char *chtostr(u8 * chars, int n)
263{
264 char tmp[256];
265 tmp[0] = 0;
266 return strncat(tmp, (char *)chars, n);
267}
268
269static int i2o_report_query_status(struct seq_file *seq, int block_status,
270 char *group)
271{
272 switch (block_status) {
273 case -ETIMEDOUT:
274 return seq_printf(seq, "Timeout reading group %s.\n", group);
275 case -ENOMEM:
276 return seq_printf(seq, "No free memory to read the table.\n");
277 case -I2O_PARAMS_STATUS_INVALID_GROUP_ID:
278 return seq_printf(seq, "Group %s not supported.\n", group);
279 default:
280 return seq_printf(seq,
281 "Error reading group %s. BlockStatus 0x%02X\n",
282 group, -block_status);
283 }
284}
285
286static char *bus_strings[] = {
287 "Local Bus",
288 "ISA",
289 "EISA",
290 "MCA",
291 "PCI",
292 "PCMCIA",
293 "NUBUS",
294 "CARDBUS"
295};
296
297static int i2o_seq_show_hrt(struct seq_file *seq, void *v)
298{
299 struct i2o_controller *c = (struct i2o_controller *)seq->private;
300 i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt;
301 u32 bus;
302 int i;
303
304 if (hrt->hrt_version) {
305 seq_printf(seq,
306 "HRT table for controller is too new a version.\n");
307 return 0;
308 }
309
310 seq_printf(seq, "HRT has %d entries of %d bytes each.\n",
311 hrt->num_entries, hrt->entry_len << 2);
312
313 for (i = 0; i < hrt->num_entries; i++) {
314 seq_printf(seq, "Entry %d:\n", i);
315 seq_printf(seq, " Adapter ID: %0#10x\n",
316 hrt->hrt_entry[i].adapter_id);
317 seq_printf(seq, " Controlling tid: %0#6x\n",
318 hrt->hrt_entry[i].parent_tid);
319
320 if (hrt->hrt_entry[i].bus_type != 0x80) {
321 bus = hrt->hrt_entry[i].bus_type;
322 seq_printf(seq, " %s Information\n",
323 bus_strings[bus]);
324
325 switch (bus) {
326 case I2O_BUS_LOCAL:
327 seq_printf(seq, " IOBase: %0#6x,",
328 hrt->hrt_entry[i].bus.local_bus.
329 LbBaseIOPort);
330 seq_printf(seq, " MemoryBase: %0#10x\n",
331 hrt->hrt_entry[i].bus.local_bus.
332 LbBaseMemoryAddress);
333 break;
334
335 case I2O_BUS_ISA:
336 seq_printf(seq, " IOBase: %0#6x,",
337 hrt->hrt_entry[i].bus.isa_bus.
338 IsaBaseIOPort);
339 seq_printf(seq, " MemoryBase: %0#10x,",
340 hrt->hrt_entry[i].bus.isa_bus.
341 IsaBaseMemoryAddress);
342 seq_printf(seq, " CSN: %0#4x,",
343 hrt->hrt_entry[i].bus.isa_bus.CSN);
344 break;
345
346 case I2O_BUS_EISA:
347 seq_printf(seq, " IOBase: %0#6x,",
348 hrt->hrt_entry[i].bus.eisa_bus.
349 EisaBaseIOPort);
350 seq_printf(seq, " MemoryBase: %0#10x,",
351 hrt->hrt_entry[i].bus.eisa_bus.
352 EisaBaseMemoryAddress);
353 seq_printf(seq, " Slot: %0#4x,",
354 hrt->hrt_entry[i].bus.eisa_bus.
355 EisaSlotNumber);
356 break;
357
358 case I2O_BUS_MCA:
359 seq_printf(seq, " IOBase: %0#6x,",
360 hrt->hrt_entry[i].bus.mca_bus.
361 McaBaseIOPort);
362 seq_printf(seq, " MemoryBase: %0#10x,",
363 hrt->hrt_entry[i].bus.mca_bus.
364 McaBaseMemoryAddress);
365 seq_printf(seq, " Slot: %0#4x,",
366 hrt->hrt_entry[i].bus.mca_bus.
367 McaSlotNumber);
368 break;
369
370 case I2O_BUS_PCI:
371 seq_printf(seq, " Bus: %0#4x",
372 hrt->hrt_entry[i].bus.pci_bus.
373 PciBusNumber);
374 seq_printf(seq, " Dev: %0#4x",
375 hrt->hrt_entry[i].bus.pci_bus.
376 PciDeviceNumber);
377 seq_printf(seq, " Func: %0#4x",
378 hrt->hrt_entry[i].bus.pci_bus.
379 PciFunctionNumber);
380 seq_printf(seq, " Vendor: %0#6x",
381 hrt->hrt_entry[i].bus.pci_bus.
382 PciVendorID);
383 seq_printf(seq, " Device: %0#6x\n",
384 hrt->hrt_entry[i].bus.pci_bus.
385 PciDeviceID);
386 break;
387
388 default:
389 seq_printf(seq, " Unsupported Bus Type\n");
390 }
391 } else
392 seq_printf(seq, " Unknown Bus Type\n");
393 }
394
395 return 0;
396}
397
398static int i2o_seq_show_lct(struct seq_file *seq, void *v)
399{
400 struct i2o_controller *c = (struct i2o_controller *)seq->private;
401 i2o_lct *lct = (i2o_lct *) c->lct;
402 int entries;
403 int i;
404
405#define BUS_TABLE_SIZE 3
406 static char *bus_ports[] = {
407 "Generic Bus",
408 "SCSI Bus",
409 "Fibre Channel Bus"
410 };
411
412 entries = (lct->table_size - 3) / 9;
413
414 seq_printf(seq, "LCT contains %d %s\n", entries,
415 entries == 1 ? "entry" : "entries");
416 if (lct->boot_tid)
417 seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid);
418
419 seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind);
420
421 for (i = 0; i < entries; i++) {
422 seq_printf(seq, "Entry %d\n", i);
423 seq_printf(seq, " Class, SubClass : %s",
424 i2o_get_class_name(lct->lct_entry[i].class_id));
425
426 /*
427 * Classes which we'll print subclass info for
428 */
429 switch (lct->lct_entry[i].class_id & 0xFFF) {
430 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
431 switch (lct->lct_entry[i].sub_class) {
432 case 0x00:
433 seq_printf(seq, ", Direct-Access Read/Write");
434 break;
435
436 case 0x04:
437 seq_printf(seq, ", WORM Drive");
438 break;
439
440 case 0x05:
441 seq_printf(seq, ", CD-ROM Drive");
442 break;
443
444 case 0x07:
445 seq_printf(seq, ", Optical Memory Device");
446 break;
447
448 default:
449 seq_printf(seq, ", Unknown (0x%02x)",
450 lct->lct_entry[i].sub_class);
451 break;
452 }
453 break;
454
455 case I2O_CLASS_LAN:
456 switch (lct->lct_entry[i].sub_class & 0xFF) {
457 case 0x30:
458 seq_printf(seq, ", Ethernet");
459 break;
460
461 case 0x40:
462 seq_printf(seq, ", 100base VG");
463 break;
464
465 case 0x50:
466 seq_printf(seq, ", IEEE 802.5/Token-Ring");
467 break;
468
469 case 0x60:
470 seq_printf(seq, ", ANSI X3T9.5 FDDI");
471 break;
472
473 case 0x70:
474 seq_printf(seq, ", Fibre Channel");
475 break;
476
477 default:
478 seq_printf(seq, ", Unknown Sub-Class (0x%02x)",
479 lct->lct_entry[i].sub_class & 0xFF);
480 break;
481 }
482 break;
483
484 case I2O_CLASS_SCSI_PERIPHERAL:
485 if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE)
486 seq_printf(seq, ", %s",
487 scsi_devices[lct->lct_entry[i].
488 sub_class]);
489 else
490 seq_printf(seq, ", Unknown Device Type");
491 break;
492
493 case I2O_CLASS_BUS_ADAPTER_PORT:
494 if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
495 seq_printf(seq, ", %s",
496 bus_ports[lct->lct_entry[i].
497 sub_class]);
498 else
499 seq_printf(seq, ", Unknown Bus Type");
500 break;
501 }
502 seq_printf(seq, "\n");
503
504 seq_printf(seq, " Local TID : 0x%03x\n",
505 lct->lct_entry[i].tid);
506 seq_printf(seq, " User TID : 0x%03x\n",
507 lct->lct_entry[i].user_tid);
508 seq_printf(seq, " Parent TID : 0x%03x\n",
509 lct->lct_entry[i].parent_tid);
510 seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n",
511 lct->lct_entry[i].identity_tag[0],
512 lct->lct_entry[i].identity_tag[1],
513 lct->lct_entry[i].identity_tag[2],
514 lct->lct_entry[i].identity_tag[3],
515 lct->lct_entry[i].identity_tag[4],
516 lct->lct_entry[i].identity_tag[5],
517 lct->lct_entry[i].identity_tag[6],
518 lct->lct_entry[i].identity_tag[7]);
519 seq_printf(seq, " Change Indicator : %0#10x\n",
520 lct->lct_entry[i].change_ind);
521 seq_printf(seq, " Event Capab Mask : %0#10x\n",
522 lct->lct_entry[i].device_flags);
523 }
524
525 return 0;
526}
527
528static int i2o_seq_show_status(struct seq_file *seq, void *v)
529{
530 struct i2o_controller *c = (struct i2o_controller *)seq->private;
531 char prodstr[25];
532 int version;
533 i2o_status_block *sb = c->status_block.virt;
534
535 i2o_status_get(c); // reread the status block
536
537 seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id);
538
539 version = sb->i2o_version;
540
541/* FIXME for Spec 2.0
542 if (version == 0x02) {
543 seq_printf(seq, "Lowest I2O version supported: ");
544 switch(workspace[2]) {
545 case 0x00:
546 seq_printf(seq, "1.0\n");
547 break;
548 case 0x01:
549 seq_printf(seq, "1.5\n");
550 break;
551 case 0x02:
552 seq_printf(seq, "2.0\n");
553 break;
554 }
555
556 seq_printf(seq, "Highest I2O version supported: ");
557 switch(workspace[3]) {
558 case 0x00:
559 seq_printf(seq, "1.0\n");
560 break;
561 case 0x01:
562 seq_printf(seq, "1.5\n");
563 break;
564 case 0x02:
565 seq_printf(seq, "2.0\n");
566 break;
567 }
568 }
569*/
570 seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id);
571 seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id);
572 seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number);
573
574 seq_printf(seq, "I2O version : ");
575 switch (version) {
576 case 0x00:
577 seq_printf(seq, "1.0\n");
578 break;
579 case 0x01:
580 seq_printf(seq, "1.5\n");
581 break;
582 case 0x02:
583 seq_printf(seq, "2.0\n");
584 break;
585 default:
586 seq_printf(seq, "Unknown version\n");
587 }
588
589 seq_printf(seq, "IOP State : ");
590 switch (sb->iop_state) {
591 case 0x01:
592 seq_printf(seq, "INIT\n");
593 break;
594
595 case 0x02:
596 seq_printf(seq, "RESET\n");
597 break;
598
599 case 0x04:
600 seq_printf(seq, "HOLD\n");
601 break;
602
603 case 0x05:
604 seq_printf(seq, "READY\n");
605 break;
606
607 case 0x08:
608 seq_printf(seq, "OPERATIONAL\n");
609 break;
610
611 case 0x10:
612 seq_printf(seq, "FAILED\n");
613 break;
614
615 case 0x11:
616 seq_printf(seq, "FAULTED\n");
617 break;
618
619 default:
620 seq_printf(seq, "Unknown\n");
621 break;
622 }
623
624 seq_printf(seq, "Messenger Type : ");
625 switch (sb->msg_type) {
626 case 0x00:
627 seq_printf(seq, "Memory mapped\n");
628 break;
629 case 0x01:
630 seq_printf(seq, "Memory mapped only\n");
631 break;
632 case 0x02:
633 seq_printf(seq, "Remote only\n");
634 break;
635 case 0x03:
636 seq_printf(seq, "Memory mapped and remote\n");
637 break;
638 default:
639 seq_printf(seq, "Unknown\n");
640 }
641
642 seq_printf(seq, "Inbound Frame Size : %d bytes\n",
643 sb->inbound_frame_size << 2);
644 seq_printf(seq, "Max Inbound Frames : %d\n",
645 sb->max_inbound_frames);
646 seq_printf(seq, "Current Inbound Frames : %d\n",
647 sb->cur_inbound_frames);
648 seq_printf(seq, "Max Outbound Frames : %d\n",
649 sb->max_outbound_frames);
650
651 /* Spec doesn't say if NULL terminated or not... */
652 memcpy(prodstr, sb->product_id, 24);
653 prodstr[24] = '\0';
654 seq_printf(seq, "Product ID : %s\n", prodstr);
655 seq_printf(seq, "Expected LCT Size : %d bytes\n",
656 sb->expected_lct_size);
657
658 seq_printf(seq, "IOP Capabilities\n");
659 seq_printf(seq, " Context Field Size Support : ");
660 switch (sb->iop_capabilities & 0x0000003) {
661 case 0:
662 seq_printf(seq, "Supports only 32-bit context fields\n");
663 break;
664 case 1:
665 seq_printf(seq, "Supports only 64-bit context fields\n");
666 break;
667 case 2:
668 seq_printf(seq, "Supports 32-bit and 64-bit context fields, "
669 "but not concurrently\n");
670 break;
671 case 3:
672 seq_printf(seq, "Supports 32-bit and 64-bit context fields "
673 "concurrently\n");
674 break;
675 default:
676 seq_printf(seq, "0x%08x\n", sb->iop_capabilities);
677 }
678 seq_printf(seq, " Current Context Field Size : ");
679 switch (sb->iop_capabilities & 0x0000000C) {
680 case 0:
681 seq_printf(seq, "not configured\n");
682 break;
683 case 4:
684 seq_printf(seq, "Supports only 32-bit context fields\n");
685 break;
686 case 8:
687 seq_printf(seq, "Supports only 64-bit context fields\n");
688 break;
689 case 12:
690 seq_printf(seq, "Supports both 32-bit or 64-bit context fields "
691 "concurrently\n");
692 break;
693 default:
694 seq_printf(seq, "\n");
695 }
696 seq_printf(seq, " Inbound Peer Support : %s\n",
697 (sb->
698 iop_capabilities & 0x00000010) ? "Supported" :
699 "Not supported");
700 seq_printf(seq, " Outbound Peer Support : %s\n",
701 (sb->
702 iop_capabilities & 0x00000020) ? "Supported" :
703 "Not supported");
704 seq_printf(seq, " Peer to Peer Support : %s\n",
705 (sb->
706 iop_capabilities & 0x00000040) ? "Supported" :
707 "Not supported");
708
709 seq_printf(seq, "Desired private memory size : %d kB\n",
710 sb->desired_mem_size >> 10);
711 seq_printf(seq, "Allocated private memory size : %d kB\n",
712 sb->current_mem_size >> 10);
713 seq_printf(seq, "Private memory base address : %0#10x\n",
714 sb->current_mem_base);
715 seq_printf(seq, "Desired private I/O size : %d kB\n",
716 sb->desired_io_size >> 10);
717 seq_printf(seq, "Allocated private I/O size : %d kB\n",
718 sb->current_io_size >> 10);
719 seq_printf(seq, "Private I/O base address : %0#10x\n",
720 sb->current_io_base);
721
722 return 0;
723}
724
725static int i2o_seq_show_hw(struct seq_file *seq, void *v)
726{
727 struct i2o_controller *c = (struct i2o_controller *)seq->private;
728 static u32 work32[5];
729 static u8 *work8 = (u8 *) work32;
730 static u16 *work16 = (u16 *) work32;
731 int token;
732 u32 hwcap;
733
734 static char *cpu_table[] = {
735 "Intel 80960 series",
736 "AMD2900 series",
737 "Motorola 68000 series",
738 "ARM series",
739 "MIPS series",
740 "Sparc series",
741 "PowerPC series",
742 "Intel x86 series"
743 };
744
745 token =
746 i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32));
747
748 if (token < 0) {
749 i2o_report_query_status(seq, token, "0x0000 IOP Hardware");
750 return 0;
751 }
752
753 seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]);
754 seq_printf(seq, "Product ID : %0#6x\n", work16[1]);
755 seq_printf(seq, "CPU : ");
756 if (work8[16] > 8)
757 seq_printf(seq, "Unknown\n");
758 else
759 seq_printf(seq, "%s\n", cpu_table[work8[16]]);
760 /* Anyone using ProcessorVersion? */
761
762 seq_printf(seq, "RAM : %dkB\n", work32[1] >> 10);
763 seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10);
764
765 hwcap = work32[3];
766 seq_printf(seq, "Capabilities : 0x%08x\n", hwcap);
767 seq_printf(seq, " [%s] Self booting\n",
768 (hwcap & 0x00000001) ? "+" : "-");
769 seq_printf(seq, " [%s] Upgradable IRTOS\n",
770 (hwcap & 0x00000002) ? "+" : "-");
771 seq_printf(seq, " [%s] Supports downloading DDMs\n",
772 (hwcap & 0x00000004) ? "+" : "-");
773 seq_printf(seq, " [%s] Supports installing DDMs\n",
774 (hwcap & 0x00000008) ? "+" : "-");
775 seq_printf(seq, " [%s] Battery-backed RAM\n",
776 (hwcap & 0x00000010) ? "+" : "-");
777
778 return 0;
779}
780
781/* Executive group 0003h - Executing DDM List (table) */
782static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
783{
784 struct i2o_controller *c = (struct i2o_controller *)seq->private;
785 int token;
786 int i;
787
788 typedef struct _i2o_exec_execute_ddm_table {
789 u16 ddm_tid;
790 u8 module_type;
791 u8 reserved;
792 u16 i2o_vendor_id;
793 u16 module_id;
794 u8 module_name_version[28];
795 u32 data_size;
796 u32 code_size;
797 } i2o_exec_execute_ddm_table;
798
799 struct {
800 u16 result_count;
801 u16 pad;
802 u16 block_size;
803 u8 block_status;
804 u8 error_info_size;
805 u16 row_count;
806 u16 more_flag;
807 i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES];
808 } *result;
809
810 i2o_exec_execute_ddm_table ddm_table;
811
812 result = kmalloc(sizeof(*result), GFP_KERNEL);
813 if (!result)
814 return -ENOMEM;
815
816 token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1,
817 NULL, 0, result, sizeof(*result));
818
819 if (token < 0) {
820 i2o_report_query_status(seq, token,
821 "0x0003 Executing DDM List");
822 goto out;
823 }
824
825 seq_printf(seq,
826 "Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n");
827 ddm_table = result->ddm_table[0];
828
829 for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) {
830 seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF);
831
832 switch (ddm_table.module_type) {
833 case 0x01:
834 seq_printf(seq, "Downloaded DDM ");
835 break;
836 case 0x22:
837 seq_printf(seq, "Embedded DDM ");
838 break;
839 default:
840 seq_printf(seq, " ");
841 }
842
843 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
844 seq_printf(seq, "%-#8x", ddm_table.module_id);
845 seq_printf(seq, "%-29s",
846 chtostr(ddm_table.module_name_version, 28));
847 seq_printf(seq, "%9d ", ddm_table.data_size);
848 seq_printf(seq, "%8d", ddm_table.code_size);
849
850 seq_printf(seq, "\n");
851 }
852 out:
853 kfree(result);
854 return 0;
855}
856
857/* Executive group 0004h - Driver Store (scalar) */
858static int i2o_seq_show_driver_store(struct seq_file *seq, void *v)
859{
860 struct i2o_controller *c = (struct i2o_controller *)seq->private;
861 u32 work32[8];
862 int token;
863
864 token =
865 i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32));
866 if (token < 0) {
867 i2o_report_query_status(seq, token, "0x0004 Driver Store");
868 return 0;
869 }
870
871 seq_printf(seq, "Module limit : %d\n"
872 "Module count : %d\n"
873 "Current space : %d kB\n"
874 "Free space : %d kB\n",
875 work32[0], work32[1], work32[2] >> 10, work32[3] >> 10);
876
877 return 0;
878}
879
880/* Executive group 0005h - Driver Store Table (table) */
881static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
882{
883 typedef struct _i2o_driver_store {
884 u16 stored_ddm_index;
885 u8 module_type;
886 u8 reserved;
887 u16 i2o_vendor_id;
888 u16 module_id;
889 u8 module_name_version[28];
890 u8 date[8];
891 u32 module_size;
892 u32 mpb_size;
893 u32 module_flags;
894 } i2o_driver_store_table;
895
896 struct i2o_controller *c = (struct i2o_controller *)seq->private;
897 int token;
898 int i;
899
900 typedef struct {
901 u16 result_count;
902 u16 pad;
903 u16 block_size;
904 u8 block_status;
905 u8 error_info_size;
906 u16 row_count;
907 u16 more_flag;
908 i2o_driver_store_table dst[I2O_MAX_MODULES];
909 } i2o_driver_result_table;
910
911 i2o_driver_result_table *result;
912 i2o_driver_store_table *dst;
913
914 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
915 if (result == NULL)
916 return -ENOMEM;
917
918 token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1,
919 NULL, 0, result, sizeof(*result));
920
921 if (token < 0) {
922 i2o_report_query_status(seq, token,
923 "0x0005 DRIVER STORE TABLE");
924 kfree(result);
925 return 0;
926 }
927
928 seq_printf(seq,
929 "# Module_type Vendor Mod_id Module_name Vrs"
930 "Date Mod_size Par_size Flags\n");
931 for (i = 0, dst = &result->dst[0]; i < result->row_count;
932 dst = &result->dst[++i]) {
933 seq_printf(seq, "%-3d", dst->stored_ddm_index);
934 switch (dst->module_type) {
935 case 0x01:
936 seq_printf(seq, "Downloaded DDM ");
937 break;
938 case 0x22:
939 seq_printf(seq, "Embedded DDM ");
940 break;
941 default:
942 seq_printf(seq, " ");
943 }
944
945 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
946 seq_printf(seq, "%-#8x", dst->module_id);
947 seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
948 seq_printf(seq, "%-9s", chtostr(dst->date, 8));
949 seq_printf(seq, "%8d ", dst->module_size);
950 seq_printf(seq, "%8d ", dst->mpb_size);
951 seq_printf(seq, "0x%04x", dst->module_flags);
952 seq_printf(seq, "\n");
953 }
954
955 kfree(result);
956 return 0;
957}
958
959/* Generic group F000h - Params Descriptor (table) */
960static int i2o_seq_show_groups(struct seq_file *seq, void *v)
961{
962 struct i2o_device *d = (struct i2o_device *)seq->private;
963 int token;
964 int i;
965 u8 properties;
966
967 typedef struct _i2o_group_info {
968 u16 group_number;
969 u16 field_count;
970 u16 row_count;
971 u8 properties;
972 u8 reserved;
973 } i2o_group_info;
974
975 struct {
976 u16 result_count;
977 u16 pad;
978 u16 block_size;
979 u8 block_status;
980 u8 error_info_size;
981 u16 row_count;
982 u16 more_flag;
983 i2o_group_info group[256];
984 } *result;
985
986 result = kmalloc(sizeof(*result), GFP_KERNEL);
987 if (!result)
988 return -ENOMEM;
989
990 token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
991 result, sizeof(*result));
992
993 if (token < 0) {
994 i2o_report_query_status(seq, token, "0xF000 Params Descriptor");
995 goto out;
996 }
997
998 seq_printf(seq,
999 "# Group FieldCount RowCount Type Add Del Clear\n");
1000
1001 for (i = 0; i < result->row_count; i++) {
1002 seq_printf(seq, "%-3d", i);
1003 seq_printf(seq, "0x%04X ", result->group[i].group_number);
1004 seq_printf(seq, "%10d ", result->group[i].field_count);
1005 seq_printf(seq, "%8d ", result->group[i].row_count);
1006
1007 properties = result->group[i].properties;
1008 if (properties & 0x1)
1009 seq_printf(seq, "Table ");
1010 else
1011 seq_printf(seq, "Scalar ");
1012 if (properties & 0x2)
1013 seq_printf(seq, " + ");
1014 else
1015 seq_printf(seq, " - ");
1016 if (properties & 0x4)
1017 seq_printf(seq, " + ");
1018 else
1019 seq_printf(seq, " - ");
1020 if (properties & 0x8)
1021 seq_printf(seq, " + ");
1022 else
1023 seq_printf(seq, " - ");
1024
1025 seq_printf(seq, "\n");
1026 }
1027
1028 if (result->more_flag)
1029 seq_printf(seq, "There is more...\n");
1030 out:
1031 kfree(result);
1032 return 0;
1033}
1034
1035/* Generic group F001h - Physical Device Table (table) */
1036static int i2o_seq_show_phys_device(struct seq_file *seq, void *v)
1037{
1038 struct i2o_device *d = (struct i2o_device *)seq->private;
1039 int token;
1040 int i;
1041
1042 struct {
1043 u16 result_count;
1044 u16 pad;
1045 u16 block_size;
1046 u8 block_status;
1047 u8 error_info_size;
1048 u16 row_count;
1049 u16 more_flag;
1050 u32 adapter_id[64];
1051 } result;
1052
1053 token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0,
1054 &result, sizeof(result));
1055
1056 if (token < 0) {
1057 i2o_report_query_status(seq, token,
1058 "0xF001 Physical Device Table");
1059 return 0;
1060 }
1061
1062 if (result.row_count)
1063 seq_printf(seq, "# AdapterId\n");
1064
1065 for (i = 0; i < result.row_count; i++) {
1066 seq_printf(seq, "%-2d", i);
1067 seq_printf(seq, "%#7x\n", result.adapter_id[i]);
1068 }
1069
1070 if (result.more_flag)
1071 seq_printf(seq, "There is more...\n");
1072
1073 return 0;
1074}
1075
1076/* Generic group F002h - Claimed Table (table) */
1077static int i2o_seq_show_claimed(struct seq_file *seq, void *v)
1078{
1079 struct i2o_device *d = (struct i2o_device *)seq->private;
1080 int token;
1081 int i;
1082
1083 struct {
1084 u16 result_count;
1085 u16 pad;
1086 u16 block_size;
1087 u8 block_status;
1088 u8 error_info_size;
1089 u16 row_count;
1090 u16 more_flag;
1091 u16 claimed_tid[64];
1092 } result;
1093
1094 token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0,
1095 &result, sizeof(result));
1096
1097 if (token < 0) {
1098 i2o_report_query_status(seq, token, "0xF002 Claimed Table");
1099 return 0;
1100 }
1101
1102 if (result.row_count)
1103 seq_printf(seq, "# ClaimedTid\n");
1104
1105 for (i = 0; i < result.row_count; i++) {
1106 seq_printf(seq, "%-2d", i);
1107 seq_printf(seq, "%#7x\n", result.claimed_tid[i]);
1108 }
1109
1110 if (result.more_flag)
1111 seq_printf(seq, "There is more...\n");
1112
1113 return 0;
1114}
1115
1116/* Generic group F003h - User Table (table) */
1117static int i2o_seq_show_users(struct seq_file *seq, void *v)
1118{
1119 struct i2o_device *d = (struct i2o_device *)seq->private;
1120 int token;
1121 int i;
1122
1123 typedef struct _i2o_user_table {
1124 u16 instance;
1125 u16 user_tid;
1126 u8 claim_type;
1127 u8 reserved1;
1128 u16 reserved2;
1129 } i2o_user_table;
1130
1131 struct {
1132 u16 result_count;
1133 u16 pad;
1134 u16 block_size;
1135 u8 block_status;
1136 u8 error_info_size;
1137 u16 row_count;
1138 u16 more_flag;
1139 i2o_user_table user[64];
1140 } *result;
1141
1142 result = kmalloc(sizeof(*result), GFP_KERNEL);
1143 if (!result)
1144 return -ENOMEM;
1145
1146 token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0,
1147 result, sizeof(*result));
1148
1149 if (token < 0) {
1150 i2o_report_query_status(seq, token, "0xF003 User Table");
1151 goto out;
1152 }
1153
1154 seq_printf(seq, "# Instance UserTid ClaimType\n");
1155
1156 for (i = 0; i < result->row_count; i++) {
1157 seq_printf(seq, "%-3d", i);
1158 seq_printf(seq, "%#8x ", result->user[i].instance);
1159 seq_printf(seq, "%#7x ", result->user[i].user_tid);
1160 seq_printf(seq, "%#9x\n", result->user[i].claim_type);
1161 }
1162
1163 if (result->more_flag)
1164 seq_printf(seq, "There is more...\n");
1165 out:
1166 kfree(result);
1167 return 0;
1168}
1169
1170/* Generic group F005h - Private message extensions (table) (optional) */
1171static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v)
1172{
1173 struct i2o_device *d = (struct i2o_device *)seq->private;
1174 int token;
1175 int i;
1176
1177 typedef struct _i2o_private {
1178 u16 ext_instance;
1179 u16 organization_id;
1180 u16 x_function_code;
1181 } i2o_private;
1182
1183 struct {
1184 u16 result_count;
1185 u16 pad;
1186 u16 block_size;
1187 u8 block_status;
1188 u8 error_info_size;
1189 u16 row_count;
1190 u16 more_flag;
1191 i2o_private extension[64];
1192 } result;
1193
1194 token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
1195 &result, sizeof(result));
1196
1197 if (token < 0) {
1198 i2o_report_query_status(seq, token,
1199 "0xF005 Private Message Extensions (optional)");
1200 return 0;
1201 }
1202
1203 seq_printf(seq, "Instance# OrgId FunctionCode\n");
1204
1205 for (i = 0; i < result.row_count; i++) {
1206 seq_printf(seq, "%0#9x ", result.extension[i].ext_instance);
1207 seq_printf(seq, "%0#6x ", result.extension[i].organization_id);
1208 seq_printf(seq, "%0#6x", result.extension[i].x_function_code);
1209
1210 seq_printf(seq, "\n");
1211 }
1212
1213 if (result.more_flag)
1214 seq_printf(seq, "There is more...\n");
1215
1216 return 0;
1217}
1218
1219/* Generic group F006h - Authorized User Table (table) */
1220static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
1221{
1222 struct i2o_device *d = (struct i2o_device *)seq->private;
1223 int token;
1224 int i;
1225
1226 struct {
1227 u16 result_count;
1228 u16 pad;
1229 u16 block_size;
1230 u8 block_status;
1231 u8 error_info_size;
1232 u16 row_count;
1233 u16 more_flag;
1234 u32 alternate_tid[64];
1235 } result;
1236
1237 token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0,
1238 &result, sizeof(result));
1239
1240 if (token < 0) {
1241 i2o_report_query_status(seq, token,
1242 "0xF006 Autohorized User Table");
1243 return 0;
1244 }
1245
1246 if (result.row_count)
1247 seq_printf(seq, "# AlternateTid\n");
1248
1249 for (i = 0; i < result.row_count; i++) {
1250 seq_printf(seq, "%-2d", i);
1251 seq_printf(seq, "%#7x ", result.alternate_tid[i]);
1252 }
1253
1254 if (result.more_flag)
1255 seq_printf(seq, "There is more...\n");
1256
1257 return 0;
1258}
1259
1260/* Generic group F100h - Device Identity (scalar) */
1261static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
1262{
1263 struct i2o_device *d = (struct i2o_device *)seq->private;
1264 static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
1265 // == (allow) 512d bytes (max)
1266 static u16 *work16 = (u16 *) work32;
1267 int token;
1268
1269 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
1270
1271 if (token < 0) {
1272 i2o_report_query_status(seq, token, "0xF100 Device Identity");
1273 return 0;
1274 }
1275
1276 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
1277 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
1278 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
1279 seq_printf(seq, "Vendor info : %s\n",
1280 chtostr((u8 *) (work32 + 2), 16));
1281 seq_printf(seq, "Product info : %s\n",
1282 chtostr((u8 *) (work32 + 6), 16));
1283 seq_printf(seq, "Description : %s\n",
1284 chtostr((u8 *) (work32 + 10), 16));
1285 seq_printf(seq, "Product rev. : %s\n",
1286 chtostr((u8 *) (work32 + 14), 8));
1287
1288 seq_printf(seq, "Serial number : ");
1289 print_serial_number(seq, (u8 *) (work32 + 16),
1290 /* allow for SNLen plus
1291 * possible trailing '\0'
1292 */
1293 sizeof(work32) - (16 * sizeof(u32)) - 2);
1294 seq_printf(seq, "\n");
1295
1296 return 0;
1297}
1298
1299static int i2o_seq_show_dev_name(struct seq_file *seq, void *v)
1300{
1301 struct i2o_device *d = (struct i2o_device *)seq->private;
1302
1303 seq_printf(seq, "%s\n", d->device.bus_id);
1304
1305 return 0;
1306}
1307
1308/* Generic group F101h - DDM Identity (scalar) */
1309static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
1310{
1311 struct i2o_device *d = (struct i2o_device *)seq->private;
1312 int token;
1313
1314 struct {
1315 u16 ddm_tid;
1316 u8 module_name[24];
1317 u8 module_rev[8];
1318 u8 sn_format;
1319 u8 serial_number[12];
1320 u8 pad[256]; // allow up to 256 byte (max) serial number
1321 } result;
1322
1323 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
1324
1325 if (token < 0) {
1326 i2o_report_query_status(seq, token, "0xF101 DDM Identity");
1327 return 0;
1328 }
1329
1330 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
1331 seq_printf(seq, "Module name : %s\n",
1332 chtostr(result.module_name, 24));
1333 seq_printf(seq, "Module revision : %s\n",
1334 chtostr(result.module_rev, 8));
1335
1336 seq_printf(seq, "Serial number : ");
1337 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
1338 /* allow for SNLen plus possible trailing '\0' */
1339
1340 seq_printf(seq, "\n");
1341
1342 return 0;
1343}
1344
1345/* Generic group F102h - User Information (scalar) */
1346static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
1347{
1348 struct i2o_device *d = (struct i2o_device *)seq->private;
1349 int token;
1350
1351 struct {
1352 u8 device_name[64];
1353 u8 service_name[64];
1354 u8 physical_location[64];
1355 u8 instance_number[4];
1356 } result;
1357
1358 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
1359
1360 if (token < 0) {
1361 i2o_report_query_status(seq, token, "0xF102 User Information");
1362 return 0;
1363 }
1364
1365 seq_printf(seq, "Device name : %s\n",
1366 chtostr(result.device_name, 64));
1367 seq_printf(seq, "Service name : %s\n",
1368 chtostr(result.service_name, 64));
1369 seq_printf(seq, "Physical name : %s\n",
1370 chtostr(result.physical_location, 64));
1371 seq_printf(seq, "Instance number : %s\n",
1372 chtostr(result.instance_number, 4));
1373
1374 return 0;
1375}
1376
1377/* Generic group F103h - SGL Operating Limits (scalar) */
1378static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
1379{
1380 struct i2o_device *d = (struct i2o_device *)seq->private;
1381 static u32 work32[12];
1382 static u16 *work16 = (u16 *) work32;
1383 static u8 *work8 = (u8 *) work32;
1384 int token;
1385
1386 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
1387
1388 if (token < 0) {
1389 i2o_report_query_status(seq, token,
1390 "0xF103 SGL Operating Limits");
1391 return 0;
1392 }
1393
1394 seq_printf(seq, "SGL chain size : %d\n", work32[0]);
1395 seq_printf(seq, "Max SGL chain size : %d\n", work32[1]);
1396 seq_printf(seq, "SGL chain size target : %d\n", work32[2]);
1397 seq_printf(seq, "SGL frag count : %d\n", work16[6]);
1398 seq_printf(seq, "Max SGL frag count : %d\n", work16[7]);
1399 seq_printf(seq, "SGL frag count target : %d\n", work16[8]);
1400
1401/* FIXME
1402 if (d->i2oversion == 0x02)
1403 {
1404*/
1405 seq_printf(seq, "SGL data alignment : %d\n", work16[8]);
1406 seq_printf(seq, "SGL addr limit : %d\n", work8[20]);
1407 seq_printf(seq, "SGL addr sizes supported : ");
1408 if (work8[21] & 0x01)
1409 seq_printf(seq, "32 bit ");
1410 if (work8[21] & 0x02)
1411 seq_printf(seq, "64 bit ");
1412 if (work8[21] & 0x04)
1413 seq_printf(seq, "96 bit ");
1414 if (work8[21] & 0x08)
1415 seq_printf(seq, "128 bit ");
1416 seq_printf(seq, "\n");
1417/*
1418 }
1419*/
1420
1421 return 0;
1422}
1423
1424/* Generic group F200h - Sensors (scalar) */
1425static int i2o_seq_show_sensors(struct seq_file *seq, void *v)
1426{
1427 struct i2o_device *d = (struct i2o_device *)seq->private;
1428 int token;
1429
1430 struct {
1431 u16 sensor_instance;
1432 u8 component;
1433 u16 component_instance;
1434 u8 sensor_class;
1435 u8 sensor_type;
1436 u8 scaling_exponent;
1437 u32 actual_reading;
1438 u32 minimum_reading;
1439 u32 low2lowcat_treshold;
1440 u32 lowcat2low_treshold;
1441 u32 lowwarn2low_treshold;
1442 u32 low2lowwarn_treshold;
1443 u32 norm2lowwarn_treshold;
1444 u32 lowwarn2norm_treshold;
1445 u32 nominal_reading;
1446 u32 hiwarn2norm_treshold;
1447 u32 norm2hiwarn_treshold;
1448 u32 high2hiwarn_treshold;
1449 u32 hiwarn2high_treshold;
1450 u32 hicat2high_treshold;
1451 u32 hi2hicat_treshold;
1452 u32 maximum_reading;
1453 u8 sensor_state;
1454 u16 event_enable;
1455 } result;
1456
1457 token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result));
1458
1459 if (token < 0) {
1460 i2o_report_query_status(seq, token,
1461 "0xF200 Sensors (optional)");
1462 return 0;
1463 }
1464
1465 seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance);
1466
1467 seq_printf(seq, "Component : %d = ", result.component);
1468 switch (result.component) {
1469 case 0:
1470 seq_printf(seq, "Other");
1471 break;
1472 case 1:
1473 seq_printf(seq, "Planar logic Board");
1474 break;
1475 case 2:
1476 seq_printf(seq, "CPU");
1477 break;
1478 case 3:
1479 seq_printf(seq, "Chassis");
1480 break;
1481 case 4:
1482 seq_printf(seq, "Power Supply");
1483 break;
1484 case 5:
1485 seq_printf(seq, "Storage");
1486 break;
1487 case 6:
1488 seq_printf(seq, "External");
1489 break;
1490 }
1491 seq_printf(seq, "\n");
1492
1493 seq_printf(seq, "Component instance : %d\n",
1494 result.component_instance);
1495 seq_printf(seq, "Sensor class : %s\n",
1496 result.sensor_class ? "Analog" : "Digital");
1497
1498 seq_printf(seq, "Sensor type : %d = ", result.sensor_type);
1499 switch (result.sensor_type) {
1500 case 0:
1501 seq_printf(seq, "Other\n");
1502 break;
1503 case 1:
1504 seq_printf(seq, "Thermal\n");
1505 break;
1506 case 2:
1507 seq_printf(seq, "DC voltage (DC volts)\n");
1508 break;
1509 case 3:
1510 seq_printf(seq, "AC voltage (AC volts)\n");
1511 break;
1512 case 4:
1513 seq_printf(seq, "DC current (DC amps)\n");
1514 break;
1515 case 5:
1516 seq_printf(seq, "AC current (AC volts)\n");
1517 break;
1518 case 6:
1519 seq_printf(seq, "Door open\n");
1520 break;
1521 case 7:
1522 seq_printf(seq, "Fan operational\n");
1523 break;
1524 }
1525
1526 seq_printf(seq, "Scaling exponent : %d\n",
1527 result.scaling_exponent);
1528 seq_printf(seq, "Actual reading : %d\n", result.actual_reading);
1529 seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading);
1530 seq_printf(seq, "Low2LowCat treshold : %d\n",
1531 result.low2lowcat_treshold);
1532 seq_printf(seq, "LowCat2Low treshold : %d\n",
1533 result.lowcat2low_treshold);
1534 seq_printf(seq, "LowWarn2Low treshold : %d\n",
1535 result.lowwarn2low_treshold);
1536 seq_printf(seq, "Low2LowWarn treshold : %d\n",
1537 result.low2lowwarn_treshold);
1538 seq_printf(seq, "Norm2LowWarn treshold : %d\n",
1539 result.norm2lowwarn_treshold);
1540 seq_printf(seq, "LowWarn2Norm treshold : %d\n",
1541 result.lowwarn2norm_treshold);
1542 seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading);
1543 seq_printf(seq, "HiWarn2Norm treshold : %d\n",
1544 result.hiwarn2norm_treshold);
1545 seq_printf(seq, "Norm2HiWarn treshold : %d\n",
1546 result.norm2hiwarn_treshold);
1547 seq_printf(seq, "High2HiWarn treshold : %d\n",
1548 result.high2hiwarn_treshold);
1549 seq_printf(seq, "HiWarn2High treshold : %d\n",
1550 result.hiwarn2high_treshold);
1551 seq_printf(seq, "HiCat2High treshold : %d\n",
1552 result.hicat2high_treshold);
1553 seq_printf(seq, "High2HiCat treshold : %d\n",
1554 result.hi2hicat_treshold);
1555 seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading);
1556
1557 seq_printf(seq, "Sensor state : %d = ", result.sensor_state);
1558 switch (result.sensor_state) {
1559 case 0:
1560 seq_printf(seq, "Normal\n");
1561 break;
1562 case 1:
1563 seq_printf(seq, "Abnormal\n");
1564 break;
1565 case 2:
1566 seq_printf(seq, "Unknown\n");
1567 break;
1568 case 3:
1569 seq_printf(seq, "Low Catastrophic (LoCat)\n");
1570 break;
1571 case 4:
1572 seq_printf(seq, "Low (Low)\n");
1573 break;
1574 case 5:
1575 seq_printf(seq, "Low Warning (LoWarn)\n");
1576 break;
1577 case 6:
1578 seq_printf(seq, "High Warning (HiWarn)\n");
1579 break;
1580 case 7:
1581 seq_printf(seq, "High (High)\n");
1582 break;
1583 case 8:
1584 seq_printf(seq, "High Catastrophic (HiCat)\n");
1585 break;
1586 }
1587
1588 seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable);
1589 seq_printf(seq, " [%s] Operational state change. \n",
1590 (result.event_enable & 0x01) ? "+" : "-");
1591 seq_printf(seq, " [%s] Low catastrophic. \n",
1592 (result.event_enable & 0x02) ? "+" : "-");
1593 seq_printf(seq, " [%s] Low reading. \n",
1594 (result.event_enable & 0x04) ? "+" : "-");
1595 seq_printf(seq, " [%s] Low warning. \n",
1596 (result.event_enable & 0x08) ? "+" : "-");
1597 seq_printf(seq,
1598 " [%s] Change back to normal from out of range state. \n",
1599 (result.event_enable & 0x10) ? "+" : "-");
1600 seq_printf(seq, " [%s] High warning. \n",
1601 (result.event_enable & 0x20) ? "+" : "-");
1602 seq_printf(seq, " [%s] High reading. \n",
1603 (result.event_enable & 0x40) ? "+" : "-");
1604 seq_printf(seq, " [%s] High catastrophic. \n",
1605 (result.event_enable & 0x80) ? "+" : "-");
1606
1607 return 0;
1608}
1609
1610static int i2o_seq_open_hrt(struct inode *inode, struct file *file)
1611{
1612 return single_open(file, i2o_seq_show_hrt, PDE(inode)->data);
1613};
1614
1615static int i2o_seq_open_lct(struct inode *inode, struct file *file)
1616{
1617 return single_open(file, i2o_seq_show_lct, PDE(inode)->data);
1618};
1619
1620static int i2o_seq_open_status(struct inode *inode, struct file *file)
1621{
1622 return single_open(file, i2o_seq_show_status, PDE(inode)->data);
1623};
1624
1625static int i2o_seq_open_hw(struct inode *inode, struct file *file)
1626{
1627 return single_open(file, i2o_seq_show_hw, PDE(inode)->data);
1628};
1629
1630static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file)
1631{
1632 return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data);
1633};
1634
1635static int i2o_seq_open_driver_store(struct inode *inode, struct file *file)
1636{
1637 return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data);
1638};
1639
1640static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file)
1641{
1642 return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data);
1643};
1644
1645static int i2o_seq_open_groups(struct inode *inode, struct file *file)
1646{
1647 return single_open(file, i2o_seq_show_groups, PDE(inode)->data);
1648};
1649
1650static int i2o_seq_open_phys_device(struct inode *inode, struct file *file)
1651{
1652 return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data);
1653};
1654
1655static int i2o_seq_open_claimed(struct inode *inode, struct file *file)
1656{
1657 return single_open(file, i2o_seq_show_claimed, PDE(inode)->data);
1658};
1659
1660static int i2o_seq_open_users(struct inode *inode, struct file *file)
1661{
1662 return single_open(file, i2o_seq_show_users, PDE(inode)->data);
1663};
1664
1665static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file)
1666{
1667 return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data);
1668};
1669
1670static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file)
1671{
1672 return single_open(file, i2o_seq_show_authorized_users,
1673 PDE(inode)->data);
1674};
1675
1676static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file)
1677{
1678 return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data);
1679};
1680
1681static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file)
1682{
1683 return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data);
1684};
1685
1686static int i2o_seq_open_uinfo(struct inode *inode, struct file *file)
1687{
1688 return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data);
1689};
1690
1691static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file)
1692{
1693 return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data);
1694};
1695
1696static int i2o_seq_open_sensors(struct inode *inode, struct file *file)
1697{
1698 return single_open(file, i2o_seq_show_sensors, PDE(inode)->data);
1699};
1700
1701static int i2o_seq_open_dev_name(struct inode *inode, struct file *file)
1702{
1703 return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data);
1704};
1705
1706static struct file_operations i2o_seq_fops_lct = {
1707 .open = i2o_seq_open_lct,
1708 .read = seq_read,
1709 .llseek = seq_lseek,
1710 .release = single_release,
1711};
1712
1713static struct file_operations i2o_seq_fops_hrt = {
1714 .open = i2o_seq_open_hrt,
1715 .read = seq_read,
1716 .llseek = seq_lseek,
1717 .release = single_release,
1718};
1719
1720static struct file_operations i2o_seq_fops_status = {
1721 .open = i2o_seq_open_status,
1722 .read = seq_read,
1723 .llseek = seq_lseek,
1724 .release = single_release,
1725};
1726
1727static struct file_operations i2o_seq_fops_hw = {
1728 .open = i2o_seq_open_hw,
1729 .read = seq_read,
1730 .llseek = seq_lseek,
1731 .release = single_release,
1732};
1733
1734static struct file_operations i2o_seq_fops_ddm_table = {
1735 .open = i2o_seq_open_ddm_table,
1736 .read = seq_read,
1737 .llseek = seq_lseek,
1738 .release = single_release,
1739};
1740
1741static struct file_operations i2o_seq_fops_driver_store = {
1742 .open = i2o_seq_open_driver_store,
1743 .read = seq_read,
1744 .llseek = seq_lseek,
1745 .release = single_release,
1746};
1747
1748static struct file_operations i2o_seq_fops_drivers_stored = {
1749 .open = i2o_seq_open_drivers_stored,
1750 .read = seq_read,
1751 .llseek = seq_lseek,
1752 .release = single_release,
1753};
1754
1755static struct file_operations i2o_seq_fops_groups = {
1756 .open = i2o_seq_open_groups,
1757 .read = seq_read,
1758 .llseek = seq_lseek,
1759 .release = single_release,
1760};
1761
1762static struct file_operations i2o_seq_fops_phys_device = {
1763 .open = i2o_seq_open_phys_device,
1764 .read = seq_read,
1765 .llseek = seq_lseek,
1766 .release = single_release,
1767};
1768
1769static struct file_operations i2o_seq_fops_claimed = {
1770 .open = i2o_seq_open_claimed,
1771 .read = seq_read,
1772 .llseek = seq_lseek,
1773 .release = single_release,
1774};
1775
1776static struct file_operations i2o_seq_fops_users = {
1777 .open = i2o_seq_open_users,
1778 .read = seq_read,
1779 .llseek = seq_lseek,
1780 .release = single_release,
1781};
1782
1783static struct file_operations i2o_seq_fops_priv_msgs = {
1784 .open = i2o_seq_open_priv_msgs,
1785 .read = seq_read,
1786 .llseek = seq_lseek,
1787 .release = single_release,
1788};
1789
1790static struct file_operations i2o_seq_fops_authorized_users = {
1791 .open = i2o_seq_open_authorized_users,
1792 .read = seq_read,
1793 .llseek = seq_lseek,
1794 .release = single_release,
1795};
1796
1797static struct file_operations i2o_seq_fops_dev_name = {
1798 .open = i2o_seq_open_dev_name,
1799 .read = seq_read,
1800 .llseek = seq_lseek,
1801 .release = single_release,
1802};
1803
1804static struct file_operations i2o_seq_fops_dev_identity = {
1805 .open = i2o_seq_open_dev_identity,
1806 .read = seq_read,
1807 .llseek = seq_lseek,
1808 .release = single_release,
1809};
1810
1811static struct file_operations i2o_seq_fops_ddm_identity = {
1812 .open = i2o_seq_open_ddm_identity,
1813 .read = seq_read,
1814 .llseek = seq_lseek,
1815 .release = single_release,
1816};
1817
1818static struct file_operations i2o_seq_fops_uinfo = {
1819 .open = i2o_seq_open_uinfo,
1820 .read = seq_read,
1821 .llseek = seq_lseek,
1822 .release = single_release,
1823};
1824
1825static struct file_operations i2o_seq_fops_sgl_limits = {
1826 .open = i2o_seq_open_sgl_limits,
1827 .read = seq_read,
1828 .llseek = seq_lseek,
1829 .release = single_release,
1830};
1831
1832static struct file_operations i2o_seq_fops_sensors = {
1833 .open = i2o_seq_open_sensors,
1834 .read = seq_read,
1835 .llseek = seq_lseek,
1836 .release = single_release,
1837};
1838
1839/*
1840 * IOP specific entries...write field just in case someone
1841 * ever wants one.
1842 */
1843static i2o_proc_entry i2o_proc_generic_iop_entries[] = {
1844 {"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt},
1845 {"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct},
1846 {"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status},
1847 {"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw},
1848 {"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table},
1849 {"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store},
1850 {"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored},
1851 {NULL, 0, NULL}
1852};
1853
1854/*
1855 * Device specific entries
1856 */
1857static i2o_proc_entry generic_dev_entries[] = {
1858 {"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups},
1859 {"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device},
1860 {"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed},
1861 {"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users},
1862 {"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs},
1863 {"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users},
1864 {"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity},
1865 {"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity},
1866 {"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo},
1867 {"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits},
1868 {"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors},
1869 {NULL, 0, NULL}
1870};
1871
1872/*
1873 * Storage unit specific entries (SCSI Periph, BS) with device names
1874 */
1875static i2o_proc_entry rbs_dev_entries[] = {
1876 {"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name},
1877 {NULL, 0, NULL}
1878};
1879
1880/**
1881 * i2o_proc_create_entries - Creates proc dir entries
1882 * @dir: proc dir entry under which the entries should be placed
1883 * @i2o_pe: pointer to the entries which should be added
1884 * @data: pointer to I2O controller or device
1885 *
1886 * Create proc dir entries for a I2O controller or I2O device.
1887 *
1888 * Returns 0 on success or negative error code on failure.
1889 */
1890static int i2o_proc_create_entries(struct proc_dir_entry *dir,
1891 i2o_proc_entry * i2o_pe, void *data)
1892{
1893 struct proc_dir_entry *tmp;
1894
1895 while (i2o_pe->name) {
1896 tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir);
1897 if (!tmp)
1898 return -1;
1899
1900 tmp->data = data;
1901 tmp->proc_fops = i2o_pe->fops;
1902
1903 i2o_pe++;
1904 }
1905
1906 return 0;
1907}
1908
1909/**
1910 * i2o_proc_subdir_remove - Remove child entries from a proc entry
1911 * @dir: proc dir entry from which the childs should be removed
1912 *
1913 * Iterate over each i2o proc entry under dir and remove it. If the child
1914 * also has entries, remove them too.
1915 */
1916static void i2o_proc_subdir_remove(struct proc_dir_entry *dir)
1917{
1918 struct proc_dir_entry *pe, *tmp;
1919 pe = dir->subdir;
1920 while (pe) {
1921 tmp = pe->next;
1922 i2o_proc_subdir_remove(pe);
1923 remove_proc_entry(pe->name, dir);
1924 pe = tmp;
1925 }
1926};
1927
1928/**
1929 * i2o_proc_device_add - Add an I2O device to the proc dir
1930 * @dir: proc dir entry to which the device should be added
1931 * @dev: I2O device which should be added
1932 *
1933 * Add an I2O device to the proc dir entry dir and create the entries for
1934 * the device depending on the class of the I2O device.
1935 */
1936static void i2o_proc_device_add(struct proc_dir_entry *dir,
1937 struct i2o_device *dev)
1938{
1939 char buff[10];
1940 struct proc_dir_entry *devdir;
1941 i2o_proc_entry *i2o_pe = NULL;
1942
1943 sprintf(buff, "%03x", dev->lct_data.tid);
1944
1945 osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff);
1946
1947 devdir = proc_mkdir(buff, dir);
1948 if (!devdir) {
1949 osm_warn("Could not allocate procdir!\n");
1950 return;
1951 }
1952
1953 devdir->data = dev;
1954
1955 i2o_proc_create_entries(devdir, generic_dev_entries, dev);
1956
1957 /* Inform core that we want updates about this device's status */
1958 switch (dev->lct_data.class_id) {
1959 case I2O_CLASS_SCSI_PERIPHERAL:
1960 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1961 i2o_pe = rbs_dev_entries;
1962 break;
1963 default:
1964 break;
1965 }
1966 if (i2o_pe)
1967 i2o_proc_create_entries(devdir, i2o_pe, dev);
1968}
1969
1970/**
1971 * i2o_proc_iop_add - Add an I2O controller to the i2o proc tree
1972 * @dir: parent proc dir entry
1973 * @c: I2O controller which should be added
1974 *
1975 * Add the entries to the parent proc dir entry. Also each device is added
1976 * to the controllers proc dir entry.
1977 *
1978 * Returns 0 on success or negative error code on failure.
1979 */
1980static int i2o_proc_iop_add(struct proc_dir_entry *dir,
1981 struct i2o_controller *c)
1982{
1983 struct proc_dir_entry *iopdir;
1984 struct i2o_device *dev;
1985
1986 osm_debug("adding IOP /proc/i2o/%s\n", c->name);
1987
1988 iopdir = proc_mkdir(c->name, dir);
1989 if (!iopdir)
1990 return -1;
1991
1992 iopdir->data = c;
1993
1994 i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c);
1995
1996 list_for_each_entry(dev, &c->devices, list)
1997 i2o_proc_device_add(iopdir, dev);
1998
1999 return 0;
2000}
2001
2002/**
2003 * i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree
2004 * @dir: parent proc dir entry
2005 * @c: I2O controller which should be removed
2006 *
2007 * Iterate over each i2o proc entry and search controller c. If it is found
2008 * remove it from the tree.
2009 */
2010static void i2o_proc_iop_remove(struct proc_dir_entry *dir,
2011 struct i2o_controller *c)
2012{
2013 struct proc_dir_entry *pe, *tmp;
2014
2015 pe = dir->subdir;
2016 while (pe) {
2017 tmp = pe->next;
2018 if (pe->data == c) {
2019 i2o_proc_subdir_remove(pe);
2020 remove_proc_entry(pe->name, dir);
2021 }
2022 osm_debug("removing IOP /proc/i2o/%s\n", c->name);
2023 pe = tmp;
2024 }
2025}
2026
2027/**
2028 * i2o_proc_fs_create - Create the i2o proc fs.
2029 *
2030 * Iterate over each I2O controller and create the entries for it.
2031 *
2032 * Returns 0 on success or negative error code on failure.
2033 */
2034static int __init i2o_proc_fs_create(void)
2035{
2036 struct i2o_controller *c;
2037
2038 i2o_proc_dir_root = proc_mkdir("i2o", NULL);
2039 if (!i2o_proc_dir_root)
2040 return -1;
2041
2042 i2o_proc_dir_root->owner = THIS_MODULE;
2043
2044 list_for_each_entry(c, &i2o_controllers, list)
2045 i2o_proc_iop_add(i2o_proc_dir_root, c);
2046
2047 return 0;
2048};
2049
2050/**
2051 * i2o_proc_fs_destroy - Cleanup the all i2o proc entries
2052 *
2053 * Iterate over each I2O controller and remove the entries for it.
2054 *
2055 * Returns 0 on success or negative error code on failure.
2056 */
2057static int __exit i2o_proc_fs_destroy(void)
2058{
2059 struct i2o_controller *c;
2060
2061 list_for_each_entry(c, &i2o_controllers, list)
2062 i2o_proc_iop_remove(i2o_proc_dir_root, c);
2063
2064 remove_proc_entry("i2o", NULL);
2065
2066 return 0;
2067};
2068
2069/**
2070 * i2o_proc_init - Init function for procfs
2071 *
2072 * Registers Proc OSM and creates procfs entries.
2073 *
2074 * Returns 0 on success or negative error code on failure.
2075 */
2076static int __init i2o_proc_init(void)
2077{
2078 int rc;
2079
2080 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
2081
2082 rc = i2o_driver_register(&i2o_proc_driver);
2083 if (rc)
2084 return rc;
2085
2086 rc = i2o_proc_fs_create();
2087 if (rc) {
2088 i2o_driver_unregister(&i2o_proc_driver);
2089 return rc;
2090 }
2091
2092 return 0;
2093};
2094
2095/**
2096 * i2o_proc_exit - Exit function for procfs
2097 *
2098 * Unregisters Proc OSM and removes procfs entries.
2099 */
2100static void __exit i2o_proc_exit(void)
2101{
2102 i2o_driver_unregister(&i2o_proc_driver);
2103 i2o_proc_fs_destroy();
2104};
2105
2106MODULE_AUTHOR("Deepak Saxena");
2107MODULE_LICENSE("GPL");
2108MODULE_DESCRIPTION(OSM_DESCRIPTION);
2109MODULE_VERSION(OSM_VERSION);
2110
2111module_init(i2o_proc_init);
2112module_exit(i2o_proc_exit);
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
new file mode 100644
index 000000000000..43f5875e0be5
--- /dev/null
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -0,0 +1,830 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation; either version 2, or (at your option) any
5 * later version.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 *
12 * For the avoidance of doubt the "preferred form" of this code is one which
13 * is in an open non patent encumbered format. Where cryptographic key signing
14 * forms part of the process of creating an executable the information
15 * including keys needed to generate an equivalently functional executable
16 * are deemed to be part of the source code.
17 *
18 * Complications for I2O scsi
19 *
20 * o Each (bus,lun) is a logical device in I2O. We keep a map
21 * table. We spoof failed selection for unmapped units
22 * o Request sense buffers can come back for free.
23 * o Scatter gather is a bit dynamic. We have to investigate at
24 * setup time.
25 * o Some of our resources are dynamically shared. The i2o core
26 * needs a message reservation protocol to avoid swap v net
27 * deadlocking. We need to back off queue requests.
28 *
29 * In general the firmware wants to help. Where its help isn't performance
30 * useful we just ignore the aid. Its not worth the code in truth.
31 *
32 * Fixes/additions:
33 * Steve Ralston:
34 * Scatter gather now works
35 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
36 * Minor fixes for 2.6.
37 *
38 * To Do:
39 * 64bit cleanups
40 * Fix the resource management problems.
41 */
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/types.h>
46#include <linux/string.h>
47#include <linux/ioport.h>
48#include <linux/jiffies.h>
49#include <linux/interrupt.h>
50#include <linux/timer.h>
51#include <linux/delay.h>
52#include <linux/proc_fs.h>
53#include <linux/prefetch.h>
54#include <linux/pci.h>
55#include <linux/blkdev.h>
56#include <linux/i2o.h>
57
58#include <asm/dma.h>
59#include <asm/system.h>
60#include <asm/io.h>
61#include <asm/atomic.h>
62
63#include <scsi/scsi.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_device.h>
66#include <scsi/scsi_cmnd.h>
67
68#define OSM_NAME "scsi-osm"
69#define OSM_VERSION "$Rev$"
70#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
71
72static struct i2o_driver i2o_scsi_driver;
73
74static int i2o_scsi_max_id = 16;
75static int i2o_scsi_max_lun = 8;
76
77struct i2o_scsi_host {
78 struct Scsi_Host *scsi_host; /* pointer to the SCSI host */
79 struct i2o_controller *iop; /* pointer to the I2O controller */
80 struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */
81};
82
83static struct scsi_host_template i2o_scsi_host_template;
84
85#define I2O_SCSI_CAN_QUEUE 4
86
87/* SCSI OSM class handling definition */
88static struct i2o_class_id i2o_scsi_class_id[] = {
89 {I2O_CLASS_SCSI_PERIPHERAL},
90 {I2O_CLASS_END}
91};
92
93static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
94{
95 struct i2o_scsi_host *i2o_shost;
96 struct i2o_device *i2o_dev;
97 struct Scsi_Host *scsi_host;
98 int max_channel = 0;
99 u8 type;
100 int i;
101 size_t size;
102 i2o_status_block *sb;
103
104 list_for_each_entry(i2o_dev, &c->devices, list)
105 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) {
106 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* SCSI bus */
107 max_channel++;
108 }
109
110 if (!max_channel) {
111 osm_warn("no channels found on %s\n", c->name);
112 return ERR_PTR(-EFAULT);
113 }
114
115 size = max_channel * sizeof(struct i2o_device *)
116 + sizeof(struct i2o_scsi_host);
117
118 scsi_host = scsi_host_alloc(&i2o_scsi_host_template, size);
119 if (!scsi_host) {
120 osm_warn("Could not allocate SCSI host\n");
121 return ERR_PTR(-ENOMEM);
122 }
123
124 scsi_host->max_channel = max_channel - 1;
125 scsi_host->max_id = i2o_scsi_max_id;
126 scsi_host->max_lun = i2o_scsi_max_lun;
127 scsi_host->this_id = c->unit;
128
129 sb = c->status_block.virt;
130
131 scsi_host->sg_tablesize = (sb->inbound_frame_size -
132 sizeof(struct i2o_message) / 4 - 6) / 2;
133
134 i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata;
135 i2o_shost->scsi_host = scsi_host;
136 i2o_shost->iop = c;
137
138 i = 0;
139 list_for_each_entry(i2o_dev, &c->devices, list)
140 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) {
141 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* only SCSI bus */
142 i2o_shost->channel[i++] = i2o_dev;
143
144 if (i >= max_channel)
145 break;
146 }
147
148 return i2o_shost;
149};
150
151/**
152 * i2o_scsi_get_host - Get an I2O SCSI host
153 * @c: I2O controller to for which to get the SCSI host
154 *
155 * If the I2O controller already exists as SCSI host, the SCSI host
156 * is returned, otherwise the I2O controller is added to the SCSI
157 * core.
158 *
159 * Returns pointer to the I2O SCSI host on success or NULL on failure.
160 */
161static struct i2o_scsi_host *i2o_scsi_get_host(struct i2o_controller *c)
162{
163 return c->driver_data[i2o_scsi_driver.context];
164};
165
166/**
167 * i2o_scsi_remove - Remove I2O device from SCSI core
168 * @dev: device which should be removed
169 *
170 * Removes the I2O device from the SCSI core again.
171 *
172 * Returns 0 on success.
173 */
174static int i2o_scsi_remove(struct device *dev)
175{
176 struct i2o_device *i2o_dev = to_i2o_device(dev);
177 struct i2o_controller *c = i2o_dev->iop;
178 struct i2o_scsi_host *i2o_shost;
179 struct scsi_device *scsi_dev;
180
181 i2o_shost = i2o_scsi_get_host(c);
182
183 shost_for_each_device(scsi_dev, i2o_shost->scsi_host)
184 if (scsi_dev->hostdata == i2o_dev) {
185 scsi_remove_device(scsi_dev);
186 scsi_device_put(scsi_dev);
187 break;
188 }
189
190 return 0;
191};
192
193/**
194 * i2o_scsi_probe - verify if dev is a I2O SCSI device and install it
195 * @dev: device to verify if it is a I2O SCSI device
196 *
197 * Retrieve channel, id and lun for I2O device. If everthing goes well
198 * register the I2O device as SCSI device on the I2O SCSI controller.
199 *
200 * Returns 0 on success or negative error code on failure.
201 */
202static int i2o_scsi_probe(struct device *dev)
203{
204 struct i2o_device *i2o_dev = to_i2o_device(dev);
205 struct i2o_controller *c = i2o_dev->iop;
206 struct i2o_scsi_host *i2o_shost;
207 struct Scsi_Host *scsi_host;
208 struct i2o_device *parent;
209 struct scsi_device *scsi_dev;
210 u32 id;
211 u64 lun;
212 int channel = -1;
213 int i;
214
215 i2o_shost = i2o_scsi_get_host(c);
216 if (!i2o_shost)
217 return -EFAULT;
218
219 scsi_host = i2o_shost->scsi_host;
220
221 if (i2o_parm_field_get(i2o_dev, 0, 3, &id, 4) < 0)
222 return -EFAULT;
223
224 if (id >= scsi_host->max_id) {
225 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id,
226 scsi_host->max_id);
227 return -EFAULT;
228 }
229
230 if (i2o_parm_field_get(i2o_dev, 0, 4, &lun, 8) < 0)
231 return -EFAULT;
232 if (lun >= scsi_host->max_lun) {
233 osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)",
234 (unsigned int)lun, scsi_host->max_lun);
235 return -EFAULT;
236 }
237
238 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
239 if (!parent) {
240 osm_warn("can not find parent of device %03x\n",
241 i2o_dev->lct_data.tid);
242 return -EFAULT;
243 }
244
245 for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++)
246 if (i2o_shost->channel[i] == parent)
247 channel = i;
248
249 if (channel == -1) {
250 osm_warn("can not find channel of device %03x\n",
251 i2o_dev->lct_data.tid);
252 return -EFAULT;
253 }
254
255 scsi_dev =
256 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev);
257
258 if (!scsi_dev) {
259 osm_warn("can not add SCSI device %03x\n",
260 i2o_dev->lct_data.tid);
261 return -EFAULT;
262 }
263
264 osm_debug("added new SCSI device %03x (cannel: %d, id: %d, lun: %d)\n",
265 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun);
266
267 return 0;
268};
269
270static const char *i2o_scsi_info(struct Scsi_Host *SChost)
271{
272 struct i2o_scsi_host *hostdata;
273 hostdata = (struct i2o_scsi_host *)SChost->hostdata;
274 return hostdata->iop->name;
275}
276
277/**
278 * i2o_scsi_reply - SCSI OSM message reply handler
279 * @c: controller issuing the reply
280 * @m: message id for flushing
281 * @msg: the message from the controller
282 *
283 * Process reply messages (interrupts in normal scsi controller think).
284 * We can get a variety of messages to process. The normal path is
285 * scsi command completions. We must also deal with IOP failures,
286 * the reply to a bus reset and the reply to a LUN query.
287 *
288 * Returns 0 on success and if the reply should not be flushed or > 0
289 * on success and if the reply should be flushed. Returns negative error
290 * code on failure and if the reply should be flushed.
291 */
292static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
293 struct i2o_message *msg)
294{
295 struct scsi_cmnd *cmd;
296 struct device *dev;
297 u8 as, ds, st;
298
299 cmd = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
300
301 if (msg->u.head[0] & (1 << 13)) {
302 struct i2o_message __iomem *pmsg; /* preserved message */
303 u32 pm;
304 int err = DID_ERROR;
305
306 pm = le32_to_cpu(msg->body[3]);
307
308 pmsg = i2o_msg_in_to_virt(c, pm);
309
310 osm_err("IOP fail.\n");
311 osm_err("From %d To %d Cmd %d.\n",
312 (msg->u.head[1] >> 12) & 0xFFF,
313 msg->u.head[1] & 0xFFF, msg->u.head[1] >> 24);
314 osm_err("Failure Code %d.\n", msg->body[0] >> 24);
315 if (msg->body[0] & (1 << 16))
316 osm_err("Format error.\n");
317 if (msg->body[0] & (1 << 17))
318 osm_err("Path error.\n");
319 if (msg->body[0] & (1 << 18))
320 osm_err("Path State.\n");
321 if (msg->body[0] & (1 << 18))
322 {
323 osm_err("Congestion.\n");
324 err = DID_BUS_BUSY;
325 }
326
327 osm_debug("Failing message is %p.\n", pmsg);
328
329 cmd = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt));
330 if (!cmd)
331 return 1;
332
333 cmd->result = err << 16;
334 cmd->scsi_done(cmd);
335
336 /* Now flush the message by making it a NOP */
337 i2o_msg_nop(c, pm);
338
339 return 1;
340 }
341
342 /*
343 * Low byte is device status, next is adapter status,
344 * (then one byte reserved), then request status.
345 */
346 ds = (u8) le32_to_cpu(msg->body[0]);
347 as = (u8) (le32_to_cpu(msg->body[0]) >> 8);
348 st = (u8) (le32_to_cpu(msg->body[0]) >> 24);
349
350 /*
351 * Is this a control request coming back - eg an abort ?
352 */
353
354 if (!cmd) {
355 if (st)
356 osm_warn("SCSI abort: %08X", le32_to_cpu(msg->body[0]));
357 osm_info("SCSI abort completed.\n");
358 return -EFAULT;
359 }
360
361 osm_debug("Completed %ld\n", cmd->serial_number);
362
363 if (st) {
364 u32 count, error;
365 /* An error has occurred */
366
367 switch (st) {
368 case 0x06:
369 count = le32_to_cpu(msg->body[1]);
370 if (count < cmd->underflow) {
371 int i;
372
373 osm_err("SCSI underflow 0x%08X 0x%08X\n", count,
374 cmd->underflow);
375 osm_debug("Cmd: ");
376 for (i = 0; i < 15; i++)
377 pr_debug("%02X ", cmd->cmnd[i]);
378 pr_debug(".\n");
379 cmd->result = (DID_ERROR << 16);
380 }
381 break;
382
383 default:
384 error = le32_to_cpu(msg->body[0]);
385
386 osm_err("SCSI error %08x\n", error);
387
388 if ((error & 0xff) == 0x02 /*CHECK_CONDITION */ ) {
389 int i;
390 u32 len = sizeof(cmd->sense_buffer);
391 len = (len > 40) ? 40 : len;
392 // Copy over the sense data
393 memcpy(cmd->sense_buffer, (void *)&msg->body[3],
394 len);
395 for (i = 0; i <= len; i++)
396 osm_info("%02x\n",
397 cmd->sense_buffer[i]);
398 if (cmd->sense_buffer[0] == 0x70
399 && cmd->sense_buffer[2] == DATA_PROTECT) {
400 /* This is to handle an array failed */
401 cmd->result = (DID_TIME_OUT << 16);
402 printk(KERN_WARNING "%s: SCSI Data "
403 "Protect-Device (%d,%d,%d) "
404 "hba_status=0x%x, dev_status="
405 "0x%x, cmd=0x%x\n", c->name,
406 (u32) cmd->device->channel,
407 (u32) cmd->device->id,
408 (u32) cmd->device->lun,
409 (error >> 8) & 0xff,
410 error & 0xff, cmd->cmnd[0]);
411 } else
412 cmd->result = (DID_ERROR << 16);
413
414 break;
415 }
416
417 switch (as) {
418 case 0x0E:
419 /* SCSI Reset */
420 cmd->result = DID_RESET << 16;
421 break;
422
423 case 0x0F:
424 cmd->result = DID_PARITY << 16;
425 break;
426
427 default:
428 cmd->result = DID_ERROR << 16;
429 break;
430 }
431
432 break;
433 }
434
435 cmd->scsi_done(cmd);
436 return 1;
437 }
438
439 cmd->result = DID_OK << 16 | ds;
440
441 cmd->scsi_done(cmd);
442
443 dev = &c->pdev->dev;
444 if (cmd->use_sg)
445 dma_unmap_sg(dev, (struct scatterlist *)cmd->buffer,
446 cmd->use_sg, cmd->sc_data_direction);
447 else if (cmd->request_bufflen)
448 dma_unmap_single(dev, (dma_addr_t) ((long)cmd->SCp.ptr),
449 cmd->request_bufflen, cmd->sc_data_direction);
450
451 return 1;
452};
453
454/**
455 * i2o_scsi_notify_controller_add - Retrieve notifications of added
456 * controllers
457 * @c: the controller which was added
458 *
459 * If a I2O controller is added, we catch the notification to add a
460 * corresponding Scsi_Host.
461 */
462static void i2o_scsi_notify_controller_add(struct i2o_controller *c)
463{
464 struct i2o_scsi_host *i2o_shost;
465 int rc;
466
467 i2o_shost = i2o_scsi_host_alloc(c);
468 if (IS_ERR(i2o_shost)) {
469 osm_err("Could not initialize SCSI host\n");
470 return;
471 }
472
473 rc = scsi_add_host(i2o_shost->scsi_host, &c->device);
474 if (rc) {
475 osm_err("Could not add SCSI host\n");
476 scsi_host_put(i2o_shost->scsi_host);
477 return;
478 }
479
480 c->driver_data[i2o_scsi_driver.context] = i2o_shost;
481
482 osm_debug("new I2O SCSI host added\n");
483};
484
485/**
486 * i2o_scsi_notify_controller_remove - Retrieve notifications of removed
487 * controllers
488 * @c: the controller which was removed
489 *
490 * If a I2O controller is removed, we catch the notification to remove the
491 * corresponding Scsi_Host.
492 */
493static void i2o_scsi_notify_controller_remove(struct i2o_controller *c)
494{
495 struct i2o_scsi_host *i2o_shost;
496 i2o_shost = i2o_scsi_get_host(c);
497 if (!i2o_shost)
498 return;
499
500 c->driver_data[i2o_scsi_driver.context] = NULL;
501
502 scsi_remove_host(i2o_shost->scsi_host);
503 scsi_host_put(i2o_shost->scsi_host);
504 pr_info("I2O SCSI host removed\n");
505};
506
507/* SCSI OSM driver struct */
508static struct i2o_driver i2o_scsi_driver = {
509 .name = OSM_NAME,
510 .reply = i2o_scsi_reply,
511 .classes = i2o_scsi_class_id,
512 .notify_controller_add = i2o_scsi_notify_controller_add,
513 .notify_controller_remove = i2o_scsi_notify_controller_remove,
514 .driver = {
515 .probe = i2o_scsi_probe,
516 .remove = i2o_scsi_remove,
517 },
518};
519
520/**
521 * i2o_scsi_queuecommand - queue a SCSI command
522 * @SCpnt: scsi command pointer
523 * @done: callback for completion
524 *
525 * Issue a scsi command asynchronously. Return 0 on success or 1 if
526 * we hit an error (normally message queue congestion). The only
527 * minor complication here is that I2O deals with the device addressing
528 * so we have to map the bus/dev/lun back to an I2O handle as well
529 * as faking absent devices ourself.
530 *
531 * Locks: takes the controller lock on error path only
532 */
533
534static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
535 void (*done) (struct scsi_cmnd *))
536{
537 struct i2o_controller *c;
538 struct Scsi_Host *host;
539 struct i2o_device *i2o_dev;
540 struct device *dev;
541 int tid;
542 struct i2o_message __iomem *msg;
543 u32 m;
544 u32 scsi_flags, sg_flags;
545 u32 __iomem *mptr;
546 u32 __iomem *lenptr;
547 u32 len, reqlen;
548 int i;
549
550 /*
551 * Do the incoming paperwork
552 */
553
554 i2o_dev = SCpnt->device->hostdata;
555 host = SCpnt->device->host;
556 c = i2o_dev->iop;
557 dev = &c->pdev->dev;
558
559 SCpnt->scsi_done = done;
560
561 if (unlikely(!i2o_dev)) {
562 osm_warn("no I2O device in request\n");
563 SCpnt->result = DID_NO_CONNECT << 16;
564 done(SCpnt);
565 return 0;
566 }
567
568 tid = i2o_dev->lct_data.tid;
569
570 osm_debug("qcmd: Tid = %03x\n", tid);
571 osm_debug("Real scsi messages.\n");
572
573 /*
574 * Obtain an I2O message. If there are none free then
575 * throw it back to the scsi layer
576 */
577
578 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
579 if (m == I2O_QUEUE_EMPTY)
580 return SCSI_MLQUEUE_HOST_BUSY;
581
582 /*
583 * Put together a scsi execscb message
584 */
585
586 len = SCpnt->request_bufflen;
587
588 switch (SCpnt->sc_data_direction) {
589 case PCI_DMA_NONE:
590 scsi_flags = 0x00000000; // DATA NO XFER
591 sg_flags = 0x00000000;
592 break;
593
594 case PCI_DMA_TODEVICE:
595 scsi_flags = 0x80000000; // DATA OUT (iop-->dev)
596 sg_flags = 0x14000000;
597 break;
598
599 case PCI_DMA_FROMDEVICE:
600 scsi_flags = 0x40000000; // DATA IN (iop<--dev)
601 sg_flags = 0x10000000;
602 break;
603
604 default:
605 /* Unknown - kill the command */
606 SCpnt->result = DID_NO_CONNECT << 16;
607 done(SCpnt);
608 return 0;
609 }
610
611 writel(I2O_CMD_SCSI_EXEC << 24 | HOST_TID << 12 | tid, &msg->u.head[1]);
612 writel(i2o_scsi_driver.context, &msg->u.s.icntxt);
613
614 /* We want the SCSI control block back */
615 writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt);
616
617 /* LSI_920_PCI_QUIRK
618 *
619 * Intermittant observations of msg frame word data corruption
620 * observed on msg[4] after:
621 * WRITE, READ-MODIFY-WRITE
622 * operations. 19990606 -sralston
623 *
624 * (Hence we build this word via tag. Its good practice anyway
625 * we don't want fetches over PCI needlessly)
626 */
627
628 /* Attach tags to the devices */
629 /*
630 if(SCpnt->device->tagged_supported) {
631 if(SCpnt->tag == HEAD_OF_QUEUE_TAG)
632 scsi_flags |= 0x01000000;
633 else if(SCpnt->tag == ORDERED_QUEUE_TAG)
634 scsi_flags |= 0x01800000;
635 }
636 */
637
638 /* Direction, disconnect ok, tag, CDBLen */
639 writel(scsi_flags | 0x20200000 | SCpnt->cmd_len, &msg->body[0]);
640
641 mptr = &msg->body[1];
642
643 /* Write SCSI command into the message - always 16 byte block */
644 memcpy_toio(mptr, SCpnt->cmnd, 16);
645 mptr += 4;
646 lenptr = mptr++; /* Remember me - fill in when we know */
647
648 reqlen = 12; // SINGLE SGE
649
650 /* Now fill in the SGList and command */
651 if (SCpnt->use_sg) {
652 struct scatterlist *sg;
653 int sg_count;
654
655 sg = SCpnt->request_buffer;
656 len = 0;
657
658 sg_count = dma_map_sg(dev, sg, SCpnt->use_sg,
659 SCpnt->sc_data_direction);
660
661 if (unlikely(sg_count <= 0))
662 return -ENOMEM;
663
664 for (i = SCpnt->use_sg; i > 0; i--) {
665 if (i == 1)
666 sg_flags |= 0xC0000000;
667 writel(sg_flags | sg_dma_len(sg), mptr++);
668 writel(sg_dma_address(sg), mptr++);
669 len += sg_dma_len(sg);
670 sg++;
671 }
672
673 reqlen = mptr - &msg->u.head[0];
674 writel(len, lenptr);
675 } else {
676 len = SCpnt->request_bufflen;
677
678 writel(len, lenptr);
679
680 if (len > 0) {
681 dma_addr_t dma_addr;
682
683 dma_addr = dma_map_single(dev, SCpnt->request_buffer,
684 SCpnt->request_bufflen,
685 SCpnt->sc_data_direction);
686 if (!dma_addr)
687 return -ENOMEM;
688
689 SCpnt->SCp.ptr = (void *)(unsigned long)dma_addr;
690 sg_flags |= 0xC0000000;
691 writel(sg_flags | SCpnt->request_bufflen, mptr++);
692 writel(dma_addr, mptr++);
693 } else
694 reqlen = 9;
695 }
696
697 /* Stick the headers on */
698 writel(reqlen << 16 | SGL_OFFSET_10, &msg->u.head[0]);
699
700 /* Queue the message */
701 i2o_msg_post(c, m);
702
703 osm_debug("Issued %ld\n", SCpnt->serial_number);
704
705 return 0;
706};
707
708/**
709 * i2o_scsi_abort - abort a running command
710 * @SCpnt: command to abort
711 *
712 * Ask the I2O controller to abort a command. This is an asynchrnous
713 * process and our callback handler will see the command complete with an
714 * aborted message if it succeeds.
715 *
716 * Returns 0 if the command is successfully aborted or negative error code
717 * on failure.
718 */
719static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
720{
721 struct i2o_device *i2o_dev;
722 struct i2o_controller *c;
723 struct i2o_message __iomem *msg;
724 u32 m;
725 int tid;
726 int status = FAILED;
727
728 osm_warn("Aborting command block.\n");
729
730 i2o_dev = SCpnt->device->hostdata;
731 c = i2o_dev->iop;
732 tid = i2o_dev->lct_data.tid;
733
734 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
735 if (m == I2O_QUEUE_EMPTY)
736 return SCSI_MLQUEUE_HOST_BUSY;
737
738 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
739 writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid,
740 &msg->u.head[1]);
741 writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]);
742
743 if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT))
744 status = SUCCESS;
745
746 return status;
747}
748
749/**
750 * i2o_scsi_bios_param - Invent disk geometry
751 * @sdev: scsi device
752 * @dev: block layer device
753 * @capacity: size in sectors
754 * @ip: geometry array
755 *
756 * This is anyones guess quite frankly. We use the same rules everyone
757 * else appears to and hope. It seems to work.
758 */
759
760static int i2o_scsi_bios_param(struct scsi_device *sdev,
761 struct block_device *dev, sector_t capacity,
762 int *ip)
763{
764 int size;
765
766 size = capacity;
767 ip[0] = 64; /* heads */
768 ip[1] = 32; /* sectors */
769 if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */
770 ip[0] = 255; /* heads */
771 ip[1] = 63; /* sectors */
772 ip[2] = size / (255 * 63); /* cylinders */
773 }
774 return 0;
775}
776
777static struct scsi_host_template i2o_scsi_host_template = {
778 .proc_name = OSM_NAME,
779 .name = OSM_DESCRIPTION,
780 .info = i2o_scsi_info,
781 .queuecommand = i2o_scsi_queuecommand,
782 .eh_abort_handler = i2o_scsi_abort,
783 .bios_param = i2o_scsi_bios_param,
784 .can_queue = I2O_SCSI_CAN_QUEUE,
785 .sg_tablesize = 8,
786 .cmd_per_lun = 6,
787 .use_clustering = ENABLE_CLUSTERING,
788};
789
790/**
791 * i2o_scsi_init - SCSI OSM initialization function
792 *
793 * Register SCSI OSM into I2O core.
794 *
795 * Returns 0 on success or negative error code on failure.
796 */
797static int __init i2o_scsi_init(void)
798{
799 int rc;
800
801 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
802
803 /* Register SCSI OSM into I2O core */
804 rc = i2o_driver_register(&i2o_scsi_driver);
805 if (rc) {
806 osm_err("Could not register SCSI driver\n");
807 return rc;
808 }
809
810 return 0;
811};
812
813/**
814 * i2o_scsi_exit - SCSI OSM exit function
815 *
816 * Unregisters SCSI OSM from I2O core.
817 */
818static void __exit i2o_scsi_exit(void)
819{
820 /* Unregister I2O SCSI OSM from I2O core */
821 i2o_driver_unregister(&i2o_scsi_driver);
822};
823
824MODULE_AUTHOR("Red Hat Software");
825MODULE_LICENSE("GPL");
826MODULE_DESCRIPTION(OSM_DESCRIPTION);
827MODULE_VERSION(OSM_VERSION);
828
829module_init(i2o_scsi_init);
830module_exit(i2o_scsi_exit);
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
new file mode 100644
index 000000000000..50c8cedf7a2d
--- /dev/null
+++ b/drivers/message/i2o/iop.c
@@ -0,0 +1,1327 @@
1/*
2 * Functions to handle I2O controllers and I2O message handling
3 *
4 * Copyright (C) 1999-2002 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * A lot of the I2O message side code from this is taken from the
14 * Red Creek RCPCI45 adapter driver by Red Creek Communications
15 *
16 * Fixes/additions:
17 * Philipp Rumpf
18 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 * Alan Cox <alan@redhat.com>:
23 * Ported to Linux 2.5.
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Minor fixes for 2.6.
26 */
27
28#include <linux/module.h>
29#include <linux/i2o.h>
30#include <linux/delay.h>
31
32#define OSM_VERSION "$Rev$"
33#define OSM_DESCRIPTION "I2O subsystem"
34
35/* global I2O controller list */
36LIST_HEAD(i2o_controllers);
37
38/*
39 * global I2O System Table. Contains information about all the IOPs in the
40 * system. Used to inform IOPs about each others existence.
41 */
42static struct i2o_dma i2o_systab;
43
44static int i2o_hrt_get(struct i2o_controller *c);
45
46/* Module internal functions from other sources */
47extern struct i2o_driver i2o_exec_driver;
48extern int i2o_exec_lct_get(struct i2o_controller *);
49extern void i2o_device_remove(struct i2o_device *);
50
51extern int __init i2o_driver_init(void);
52extern void __exit i2o_driver_exit(void);
53extern int __init i2o_exec_init(void);
54extern void __exit i2o_exec_exit(void);
55extern int __init i2o_pci_init(void);
56extern void __exit i2o_pci_exit(void);
57extern int i2o_device_init(void);
58extern void i2o_device_exit(void);
59
60/**
61 * i2o_msg_nop - Returns a message which is not used
62 * @c: I2O controller from which the message was created
63 * @m: message which should be returned
64 *
65 * If you fetch a message via i2o_msg_get, and can't use it, you must
66 * return the message with this function. Otherwise the message frame
67 * is lost.
68 */
69void i2o_msg_nop(struct i2o_controller *c, u32 m)
70{
71 struct i2o_message __iomem *msg = c->in_queue.virt + m;
72
73 writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
74 writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
75 &msg->u.head[1]);
76 writel(0, &msg->u.head[2]);
77 writel(0, &msg->u.head[3]);
78 i2o_msg_post(c, m);
79};
80
81/**
82 * i2o_msg_get_wait - obtain an I2O message from the IOP
83 * @c: I2O controller
84 * @msg: pointer to a I2O message pointer
85 * @wait: how long to wait until timeout
86 *
87 * This function waits up to wait seconds for a message slot to be
88 * available.
89 *
90 * On a success the message is returned and the pointer to the message is
91 * set in msg. The returned message is the physical page frame offset
92 * address from the read port (see the i2o spec). If no message is
93 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
94 */
95u32 i2o_msg_get_wait(struct i2o_controller *c, struct i2o_message __iomem **msg,
96 int wait)
97{
98 unsigned long timeout = jiffies + wait * HZ;
99 u32 m;
100
101 while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) {
102 if (time_after(jiffies, timeout)) {
103 pr_debug("%s: Timeout waiting for message frame.\n",
104 c->name);
105 return I2O_QUEUE_EMPTY;
106 }
107 set_current_state(TASK_UNINTERRUPTIBLE);
108 schedule_timeout(1);
109 }
110
111 return m;
112};
113
114#if BITS_PER_LONG == 64
115/**
116 * i2o_cntxt_list_add - Append a pointer to context list and return a id
117 * @c: controller to which the context list belong
118 * @ptr: pointer to add to the context list
119 *
120 * Because the context field in I2O is only 32-bit large, on 64-bit the
121 * pointer is to large to fit in the context field. The i2o_cntxt_list
122 * functions therefore map pointers to context fields.
123 *
124 * Returns context id > 0 on success or 0 on failure.
125 */
126u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
127{
128 struct i2o_context_list_element *entry;
129 unsigned long flags;
130
131 if (!ptr)
132 printk(KERN_ERR "%s: couldn't add NULL pointer to context list!"
133 "\n", c->name);
134
135 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
136 if (!entry) {
137 printk(KERN_ERR "%s: Could not allocate memory for context "
138 "list element\n", c->name);
139 return 0;
140 }
141
142 entry->ptr = ptr;
143 entry->timestamp = jiffies;
144 INIT_LIST_HEAD(&entry->list);
145
146 spin_lock_irqsave(&c->context_list_lock, flags);
147
148 if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
149 atomic_inc(&c->context_list_counter);
150
151 entry->context = atomic_read(&c->context_list_counter);
152
153 list_add(&entry->list, &c->context_list);
154
155 spin_unlock_irqrestore(&c->context_list_lock, flags);
156
157 pr_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context);
158
159 return entry->context;
160};
161
162/**
163 * i2o_cntxt_list_remove - Remove a pointer from the context list
164 * @c: controller to which the context list belong
165 * @ptr: pointer which should be removed from the context list
166 *
167 * Removes a previously added pointer from the context list and returns
168 * the matching context id.
169 *
170 * Returns context id on succes or 0 on failure.
171 */
172u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr)
173{
174 struct i2o_context_list_element *entry;
175 u32 context = 0;
176 unsigned long flags;
177
178 spin_lock_irqsave(&c->context_list_lock, flags);
179 list_for_each_entry(entry, &c->context_list, list)
180 if (entry->ptr == ptr) {
181 list_del(&entry->list);
182 context = entry->context;
183 kfree(entry);
184 break;
185 }
186 spin_unlock_irqrestore(&c->context_list_lock, flags);
187
188 if (!context)
189 printk(KERN_WARNING "%s: Could not remove nonexistent ptr "
190 "%p\n", c->name, ptr);
191
192 pr_debug("%s: remove ptr from context list %d -> %p\n", c->name,
193 context, ptr);
194
195 return context;
196};
197
198/**
199 * i2o_cntxt_list_get - Get a pointer from the context list and remove it
200 * @c: controller to which the context list belong
201 * @context: context id to which the pointer belong
202 *
203 * Returns pointer to the matching context id on success or NULL on
204 * failure.
205 */
206void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context)
207{
208 struct i2o_context_list_element *entry;
209 unsigned long flags;
210 void *ptr = NULL;
211
212 spin_lock_irqsave(&c->context_list_lock, flags);
213 list_for_each_entry(entry, &c->context_list, list)
214 if (entry->context == context) {
215 list_del(&entry->list);
216 ptr = entry->ptr;
217 kfree(entry);
218 break;
219 }
220 spin_unlock_irqrestore(&c->context_list_lock, flags);
221
222 if (!ptr)
223 printk(KERN_WARNING "%s: context id %d not found\n", c->name,
224 context);
225
226 pr_debug("%s: get ptr from context list %d -> %p\n", c->name, context,
227 ptr);
228
229 return ptr;
230};
231
232/**
233 * i2o_cntxt_list_get_ptr - Get a context id from the context list
234 * @c: controller to which the context list belong
235 * @ptr: pointer to which the context id should be fetched
236 *
237 * Returns context id which matches to the pointer on succes or 0 on
238 * failure.
239 */
240u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr)
241{
242 struct i2o_context_list_element *entry;
243 u32 context = 0;
244 unsigned long flags;
245
246 spin_lock_irqsave(&c->context_list_lock, flags);
247 list_for_each_entry(entry, &c->context_list, list)
248 if (entry->ptr == ptr) {
249 context = entry->context;
250 break;
251 }
252 spin_unlock_irqrestore(&c->context_list_lock, flags);
253
254 if (!context)
255 printk(KERN_WARNING "%s: Could not find nonexistent ptr "
256 "%p\n", c->name, ptr);
257
258 pr_debug("%s: get context id from context list %p -> %d\n", c->name,
259 ptr, context);
260
261 return context;
262};
263#endif
264
265/**
266 * i2o_iop_find - Find an I2O controller by id
267 * @unit: unit number of the I2O controller to search for
268 *
269 * Lookup the I2O controller on the controller list.
270 *
271 * Returns pointer to the I2O controller on success or NULL if not found.
272 */
273struct i2o_controller *i2o_find_iop(int unit)
274{
275 struct i2o_controller *c;
276
277 list_for_each_entry(c, &i2o_controllers, list) {
278 if (c->unit == unit)
279 return c;
280 }
281
282 return NULL;
283};
284
285/**
286 * i2o_iop_find_device - Find a I2O device on an I2O controller
287 * @c: I2O controller where the I2O device hangs on
288 * @tid: TID of the I2O device to search for
289 *
290 * Searches the devices of the I2O controller for a device with TID tid and
291 * returns it.
292 *
293 * Returns a pointer to the I2O device if found, otherwise NULL.
294 */
295struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
296{
297 struct i2o_device *dev;
298
299 list_for_each_entry(dev, &c->devices, list)
300 if (dev->lct_data.tid == tid)
301 return dev;
302
303 return NULL;
304};
305
306/**
307 * i2o_quiesce_controller - quiesce controller
308 * @c: controller
309 *
310 * Quiesce an IOP. Causes IOP to make external operation quiescent
311 * (i2o 'READY' state). Internal operation of the IOP continues normally.
312 *
313 * Returns 0 on success or negative error code on failure.
314 */
315static int i2o_iop_quiesce(struct i2o_controller *c)
316{
317 struct i2o_message __iomem *msg;
318 u32 m;
319 i2o_status_block *sb = c->status_block.virt;
320 int rc;
321
322 i2o_status_get(c);
323
324 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
325 if ((sb->iop_state != ADAPTER_STATE_READY) &&
326 (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
327 return 0;
328
329 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
330 if (m == I2O_QUEUE_EMPTY)
331 return -ETIMEDOUT;
332
333 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
334 writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID,
335 &msg->u.head[1]);
336
337 /* Long timeout needed for quiesce if lots of devices */
338 if ((rc = i2o_msg_post_wait(c, m, 240)))
339 printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
340 c->name, -rc);
341 else
342 pr_debug("%s: Quiesced.\n", c->name);
343
344 i2o_status_get(c); // Entered READY state
345
346 return rc;
347};
348
349/**
350 * i2o_iop_enable - move controller from ready to OPERATIONAL
351 * @c: I2O controller
352 *
353 * Enable IOP. This allows the IOP to resume external operations and
354 * reverses the effect of a quiesce. Returns zero or an error code if
355 * an error occurs.
356 */
357static int i2o_iop_enable(struct i2o_controller *c)
358{
359 struct i2o_message __iomem *msg;
360 u32 m;
361 i2o_status_block *sb = c->status_block.virt;
362 int rc;
363
364 i2o_status_get(c);
365
366 /* Enable only allowed on READY state */
367 if (sb->iop_state != ADAPTER_STATE_READY)
368 return -EINVAL;
369
370 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
371 if (m == I2O_QUEUE_EMPTY)
372 return -ETIMEDOUT;
373
374 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
375 writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID,
376 &msg->u.head[1]);
377
378 /* How long of a timeout do we need? */
379 if ((rc = i2o_msg_post_wait(c, m, 240)))
380 printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
381 c->name, -rc);
382 else
383 pr_debug("%s: Enabled.\n", c->name);
384
385 i2o_status_get(c); // entered OPERATIONAL state
386
387 return rc;
388};
389
390/**
391 * i2o_iop_quiesce_all - Quiesce all I2O controllers on the system
392 *
393 * Quiesce all I2O controllers which are connected to the system.
394 */
395static inline void i2o_iop_quiesce_all(void)
396{
397 struct i2o_controller *c, *tmp;
398
399 list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
400 if (!c->no_quiesce)
401 i2o_iop_quiesce(c);
402 }
403};
404
405/**
406 * i2o_iop_enable_all - Enables all controllers on the system
407 *
408 * Enables all I2O controllers which are connected to the system.
409 */
410static inline void i2o_iop_enable_all(void)
411{
412 struct i2o_controller *c, *tmp;
413
414 list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
415 i2o_iop_enable(c);
416};
417
418/**
419 * i2o_clear_controller - Bring I2O controller into HOLD state
420 * @c: controller
421 *
422 * Clear an IOP to HOLD state, ie. terminate external operations, clear all
423 * input queues and prepare for a system restart. IOP's internal operation
424 * continues normally and the outbound queue is alive. The IOP is not
425 * expected to rebuild its LCT.
426 *
427 * Returns 0 on success or negative error code on failure.
428 */
429static int i2o_iop_clear(struct i2o_controller *c)
430{
431 struct i2o_message __iomem *msg;
432 u32 m;
433 int rc;
434
435 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
436 if (m == I2O_QUEUE_EMPTY)
437 return -ETIMEDOUT;
438
439 /* Quiesce all IOPs first */
440 i2o_iop_quiesce_all();
441
442 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
443 writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID,
444 &msg->u.head[1]);
445
446 if ((rc = i2o_msg_post_wait(c, m, 30)))
447 printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
448 c->name, -rc);
449 else
450 pr_debug("%s: Cleared.\n", c->name);
451
452 /* Enable all IOPs */
453 i2o_iop_enable_all();
454
455 i2o_status_get(c);
456
457 return rc;
458}
459
460/**
461 * i2o_iop_reset - reset an I2O controller
462 * @c: controller to reset
463 *
464 * Reset the IOP into INIT state and wait until IOP gets into RESET state.
465 * Terminate all external operations, clear IOP's inbound and outbound
466 * queues, terminate all DDMs, and reload the IOP's operating environment
467 * and all local DDMs. The IOP rebuilds its LCT.
468 */
469static int i2o_iop_reset(struct i2o_controller *c)
470{
471 u8 *status = c->status.virt;
472 struct i2o_message __iomem *msg;
473 u32 m;
474 unsigned long timeout;
475 i2o_status_block *sb = c->status_block.virt;
476 int rc = 0;
477
478 pr_debug("%s: Resetting controller\n", c->name);
479
480 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
481 if (m == I2O_QUEUE_EMPTY)
482 return -ETIMEDOUT;
483
484 memset(status, 0, 8);
485
486 /* Quiesce all IOPs first */
487 i2o_iop_quiesce_all();
488
489 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
490 writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID,
491 &msg->u.head[1]);
492 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
493 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context
494 writel(0, &msg->body[0]);
495 writel(0, &msg->body[1]);
496 writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]);
497 writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]);
498
499 i2o_msg_post(c, m);
500
501 /* Wait for a reply */
502 timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
503 while (!*status) {
504 if (time_after(jiffies, timeout)) {
505 printk(KERN_ERR "%s: IOP reset timeout.\n", c->name);
506 rc = -ETIMEDOUT;
507 goto exit;
508 }
509
510 /* Promise bug */
511 if (status[1] || status[4]) {
512 *status = 0;
513 break;
514 }
515
516 set_current_state(TASK_UNINTERRUPTIBLE);
517 schedule_timeout(1);
518
519 rmb();
520 }
521
522 if (*status == I2O_CMD_IN_PROGRESS) {
523 /*
524 * Once the reset is sent, the IOP goes into the INIT state
525 * which is indeterminate. We need to wait until the IOP
526 * has rebooted before we can let the system talk to
527 * it. We read the inbound Free_List until a message is
528 * available. If we can't read one in the given ammount of
529 * time, we assume the IOP could not reboot properly.
530 */
531 pr_debug("%s: Reset in progress, waiting for reboot...\n",
532 c->name);
533
534 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
535 while (m == I2O_QUEUE_EMPTY) {
536 if (time_after(jiffies, timeout)) {
537 printk(KERN_ERR "%s: IOP reset timeout.\n",
538 c->name);
539 rc = -ETIMEDOUT;
540 goto exit;
541 }
542 set_current_state(TASK_UNINTERRUPTIBLE);
543 schedule_timeout(1);
544
545 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
546 }
547 i2o_msg_nop(c, m);
548 }
549
550 /* from here all quiesce commands are safe */
551 c->no_quiesce = 0;
552
553 /* If IopReset was rejected or didn't perform reset, try IopClear */
554 i2o_status_get(c);
555 if (*status == I2O_CMD_REJECTED || sb->iop_state != ADAPTER_STATE_RESET) {
556 printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",
557 c->name);
558 i2o_iop_clear(c);
559 } else
560 pr_debug("%s: Reset completed.\n", c->name);
561
562 exit:
563 /* Enable all IOPs */
564 i2o_iop_enable_all();
565
566 return rc;
567};
568
569/**
570 * i2o_iop_init_outbound_queue - setup the outbound message queue
571 * @c: I2O controller
572 *
573 * Clear and (re)initialize IOP's outbound queue and post the message
574 * frames to the IOP.
575 *
576 * Returns 0 on success or a negative errno code on failure.
577 */
578static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
579{
580 u8 *status = c->status.virt;
581 u32 m;
582 struct i2o_message __iomem *msg;
583 ulong timeout;
584 int i;
585
586 pr_debug("%s: Initializing Outbound Queue...\n", c->name);
587
588 memset(status, 0, 4);
589
590 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
591 if (m == I2O_QUEUE_EMPTY)
592 return -ETIMEDOUT;
593
594 writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]);
595 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
596 &msg->u.head[1]);
597 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
598 writel(0x0106, &msg->u.s.tcntxt); /* FIXME: why 0x0106, maybe in
599 Spec? */
600 writel(PAGE_SIZE, &msg->body[0]);
601 writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); /* Outbound msg frame
602 size in words and Initcode */
603 writel(0xd0000004, &msg->body[2]);
604 writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]);
605 writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]);
606
607 i2o_msg_post(c, m);
608
609 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
610 while (*status <= I2O_CMD_IN_PROGRESS) {
611 if (time_after(jiffies, timeout)) {
612 printk(KERN_WARNING "%s: Timeout Initializing\n",
613 c->name);
614 return -ETIMEDOUT;
615 }
616 set_current_state(TASK_UNINTERRUPTIBLE);
617 schedule_timeout(1);
618
619 rmb();
620 }
621
622 m = c->out_queue.phys;
623
624 /* Post frames */
625 for (i = 0; i < NMBR_MSG_FRAMES; i++) {
626 i2o_flush_reply(c, m);
627 udelay(1); /* Promise */
628 m += MSG_FRAME_SIZE * 4;
629 }
630
631 return 0;
632}
633
634/**
635 * i2o_iop_send_nop - send a core NOP message
636 * @c: controller
637 *
638 * Send a no-operation message with a reply set to cause no
639 * action either. Needed for bringing up promise controllers.
640 */
641static int i2o_iop_send_nop(struct i2o_controller *c)
642{
643 struct i2o_message __iomem *msg;
644 u32 m = i2o_msg_get_wait(c, &msg, HZ);
645 if (m == I2O_QUEUE_EMPTY)
646 return -ETIMEDOUT;
647 i2o_msg_nop(c, m);
648 return 0;
649}
650
651/**
652 * i2o_iop_activate - Bring controller up to HOLD
653 * @c: controller
654 *
655 * This function brings an I2O controller into HOLD state. The adapter
656 * is reset if necessary and then the queues and resource table are read.
657 *
658 * Returns 0 on success or negative error code on failure.
659 */
660static int i2o_iop_activate(struct i2o_controller *c)
661{
662 struct pci_dev *i960 = NULL;
663 i2o_status_block *sb = c->status_block.virt;
664 int rc;
665
666 if (c->promise) {
667 /* Beat up the hardware first of all */
668 i960 =
669 pci_find_slot(c->pdev->bus->number,
670 PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
671 if (i960)
672 pci_write_config_word(i960, 0x42, 0);
673
674 /* Follow this sequence precisely or the controller
675 ceases to perform useful functions until reboot */
676 if ((rc = i2o_iop_send_nop(c)))
677 return rc;
678
679 if ((rc = i2o_iop_reset(c)))
680 return rc;
681 }
682
683 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
684 /* In READY state, Get status */
685
686 rc = i2o_status_get(c);
687 if (rc) {
688 printk(KERN_INFO "%s: Unable to obtain status, "
689 "attempting a reset.\n", c->name);
690 if (i2o_iop_reset(c))
691 return rc;
692 }
693
694 if (sb->i2o_version > I2OVER15) {
695 printk(KERN_ERR "%s: Not running version 1.5 of the I2O "
696 "Specification.\n", c->name);
697 return -ENODEV;
698 }
699
700 switch (sb->iop_state) {
701 case ADAPTER_STATE_FAULTED:
702 printk(KERN_CRIT "%s: hardware fault\n", c->name);
703 return -ENODEV;
704
705 case ADAPTER_STATE_READY:
706 case ADAPTER_STATE_OPERATIONAL:
707 case ADAPTER_STATE_HOLD:
708 case ADAPTER_STATE_FAILED:
709 pr_debug("%s: already running, trying to reset...\n", c->name);
710 if (i2o_iop_reset(c))
711 return -ENODEV;
712 }
713
714 rc = i2o_iop_init_outbound_queue(c);
715 if (rc)
716 return rc;
717
718 if (c->promise) {
719 if ((rc = i2o_iop_send_nop(c)))
720 return rc;
721
722 if ((rc = i2o_status_get(c)))
723 return rc;
724
725 if (i960)
726 pci_write_config_word(i960, 0x42, 0x3FF);
727 }
728
729 /* In HOLD state */
730
731 rc = i2o_hrt_get(c);
732
733 return rc;
734};
735
736/**
737 * i2o_iop_systab_set - Set the I2O System Table of the specified IOP
738 * @c: I2O controller to which the system table should be send
739 *
740 * Before the systab could be set i2o_systab_build() must be called.
741 *
742 * Returns 0 on success or negative error code on failure.
743 */
744static int i2o_iop_systab_set(struct i2o_controller *c)
745{
746 struct i2o_message __iomem *msg;
747 u32 m;
748 i2o_status_block *sb = c->status_block.virt;
749 struct device *dev = &c->pdev->dev;
750 struct resource *root;
751 int rc;
752
753 if (sb->current_mem_size < sb->desired_mem_size) {
754 struct resource *res = &c->mem_resource;
755 res->name = c->pdev->bus->name;
756 res->flags = IORESOURCE_MEM;
757 res->start = 0;
758 res->end = 0;
759 printk(KERN_INFO "%s: requires private memory resources.\n",
760 c->name);
761 root = pci_find_parent_resource(c->pdev, res);
762 if (root == NULL)
763 printk(KERN_WARNING "%s: Can't find parent resource!\n",
764 c->name);
765 if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
766 NULL, NULL) >= 0) {
767 c->mem_alloc = 1;
768 sb->current_mem_size = 1 + res->end - res->start;
769 sb->current_mem_base = res->start;
770 printk(KERN_INFO "%s: allocated %ld bytes of PCI memory"
771 " at 0x%08lX.\n", c->name,
772 1 + res->end - res->start, res->start);
773 }
774 }
775
776 if (sb->current_io_size < sb->desired_io_size) {
777 struct resource *res = &c->io_resource;
778 res->name = c->pdev->bus->name;
779 res->flags = IORESOURCE_IO;
780 res->start = 0;
781 res->end = 0;
782 printk(KERN_INFO "%s: requires private memory resources.\n",
783 c->name);
784 root = pci_find_parent_resource(c->pdev, res);
785 if (root == NULL)
786 printk(KERN_WARNING "%s: Can't find parent resource!\n",
787 c->name);
788 if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
789 NULL, NULL) >= 0) {
790 c->io_alloc = 1;
791 sb->current_io_size = 1 + res->end - res->start;
792 sb->current_mem_base = res->start;
793 printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at"
794 " 0x%08lX.\n", c->name,
795 1 + res->end - res->start, res->start);
796 }
797 }
798
799 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
800 if (m == I2O_QUEUE_EMPTY)
801 return -ETIMEDOUT;
802
803 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
804 PCI_DMA_TODEVICE);
805 if (!i2o_systab.phys) {
806 i2o_msg_nop(c, m);
807 return -ENOMEM;
808 }
809
810 writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]);
811 writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID,
812 &msg->u.head[1]);
813
814 /*
815 * Provide three SGL-elements:
816 * System table (SysTab), Private memory space declaration and
817 * Private i/o space declaration
818 *
819 * FIXME: is this still true?
820 * Nasty one here. We can't use dma_alloc_coherent to send the
821 * same table to everyone. We have to go remap it for them all
822 */
823
824 writel(c->unit + 2, &msg->body[0]);
825 writel(0, &msg->body[1]);
826 writel(0x54000000 | i2o_systab.len, &msg->body[2]);
827 writel(i2o_systab.phys, &msg->body[3]);
828 writel(0x54000000 | sb->current_mem_size, &msg->body[4]);
829 writel(sb->current_mem_base, &msg->body[5]);
830 writel(0xd4000000 | sb->current_io_size, &msg->body[6]);
831 writel(sb->current_io_base, &msg->body[6]);
832
833 rc = i2o_msg_post_wait(c, m, 120);
834
835 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
836 PCI_DMA_TODEVICE);
837
838 if (rc < 0)
839 printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n",
840 c->name, -rc);
841 else
842 pr_debug("%s: SysTab set.\n", c->name);
843
844 i2o_status_get(c); // Entered READY state
845
846 return rc;
847}
848
849/**
850 * i2o_iop_online - Bring a controller online into OPERATIONAL state.
851 * @c: I2O controller
852 *
853 * Send the system table and enable the I2O controller.
854 *
855 * Returns 0 on success or negativer error code on failure.
856 */
857static int i2o_iop_online(struct i2o_controller *c)
858{
859 int rc;
860
861 rc = i2o_iop_systab_set(c);
862 if (rc)
863 return rc;
864
865 /* In READY state */
866 pr_debug("%s: Attempting to enable...\n", c->name);
867 rc = i2o_iop_enable(c);
868 if (rc)
869 return rc;
870
871 return 0;
872};
873
874/**
875 * i2o_iop_remove - Remove the I2O controller from the I2O core
876 * @c: I2O controller
877 *
878 * Remove the I2O controller from the I2O core. If devices are attached to
879 * the controller remove these also and finally reset the controller.
880 */
881void i2o_iop_remove(struct i2o_controller *c)
882{
883 struct i2o_device *dev, *tmp;
884
885 pr_debug("%s: deleting controller\n", c->name);
886
887 i2o_driver_notify_controller_remove_all(c);
888
889 list_del(&c->list);
890
891 list_for_each_entry_safe(dev, tmp, &c->devices, list)
892 i2o_device_remove(dev);
893
894 /* Ask the IOP to switch to RESET state */
895 i2o_iop_reset(c);
896}
897
898/**
899 * i2o_systab_build - Build system table
900 *
901 * The system table contains information about all the IOPs in the system
902 * (duh) and is used by the Executives on the IOPs to establish peer2peer
903 * connections. We're not supporting peer2peer at the moment, but this
904 * will be needed down the road for things like lan2lan forwarding.
905 *
906 * Returns 0 on success or negative error code on failure.
907 */
908static int i2o_systab_build(void)
909{
910 struct i2o_controller *c, *tmp;
911 int num_controllers = 0;
912 u32 change_ind = 0;
913 int count = 0;
914 struct i2o_sys_tbl *systab = i2o_systab.virt;
915
916 list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
917 num_controllers++;
918
919 if (systab) {
920 change_ind = systab->change_ind;
921 kfree(i2o_systab.virt);
922 }
923
924 /* Header + IOPs */
925 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
926 sizeof(struct i2o_sys_tbl_entry);
927
928 systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL);
929 if (!systab) {
930 printk(KERN_ERR "i2o: unable to allocate memory for System "
931 "Table\n");
932 return -ENOMEM;
933 }
934 memset(systab, 0, i2o_systab.len);
935
936 systab->version = I2OVERSION;
937 systab->change_ind = change_ind + 1;
938
939 list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
940 i2o_status_block *sb;
941
942 if (count >= num_controllers) {
943 printk(KERN_ERR "i2o: controller added while building "
944 "system table\n");
945 break;
946 }
947
948 sb = c->status_block.virt;
949
950 /*
951 * Get updated IOP state so we have the latest information
952 *
953 * We should delete the controller at this point if it
954 * doesn't respond since if it's not on the system table
955 * it is techninically not part of the I2O subsystem...
956 */
957 if (unlikely(i2o_status_get(c))) {
958 printk(KERN_ERR "%s: Deleting b/c could not get status"
959 " while attempting to build system table\n",
960 c->name);
961 i2o_iop_remove(c);
962 continue; // try the next one
963 }
964
965 systab->iops[count].org_id = sb->org_id;
966 systab->iops[count].iop_id = c->unit + 2;
967 systab->iops[count].seg_num = 0;
968 systab->iops[count].i2o_version = sb->i2o_version;
969 systab->iops[count].iop_state = sb->iop_state;
970 systab->iops[count].msg_type = sb->msg_type;
971 systab->iops[count].frame_size = sb->inbound_frame_size;
972 systab->iops[count].last_changed = change_ind;
973 systab->iops[count].iop_capabilities = sb->iop_capabilities;
974 systab->iops[count].inbound_low = i2o_ptr_low(c->post_port);
975 systab->iops[count].inbound_high = i2o_ptr_high(c->post_port);
976
977 count++;
978 }
979
980 systab->num_entries = count;
981
982 return 0;
983};
984
985/**
986 * i2o_parse_hrt - Parse the hardware resource table.
987 * @c: I2O controller
988 *
989 * We don't do anything with it except dumping it (in debug mode).
990 *
991 * Returns 0.
992 */
993static int i2o_parse_hrt(struct i2o_controller *c)
994{
995 i2o_dump_hrt(c);
996 return 0;
997};
998
999/**
1000 * i2o_status_get - Get the status block from the I2O controller
1001 * @c: I2O controller
1002 *
1003 * Issue a status query on the controller. This updates the attached
1004 * status block. The status block could then be accessed through
1005 * c->status_block.
1006 *
1007 * Returns 0 on sucess or negative error code on failure.
1008 */
1009int i2o_status_get(struct i2o_controller *c)
1010{
1011 struct i2o_message __iomem *msg;
1012 u32 m;
1013 u8 *status_block;
1014 unsigned long timeout;
1015
1016 status_block = (u8 *) c->status_block.virt;
1017 memset(status_block, 0, sizeof(i2o_status_block));
1018
1019 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
1020 if (m == I2O_QUEUE_EMPTY)
1021 return -ETIMEDOUT;
1022
1023 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
1024 writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
1025 &msg->u.head[1]);
1026 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
1027 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context
1028 writel(0, &msg->body[0]);
1029 writel(0, &msg->body[1]);
1030 writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]);
1031 writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]);
1032 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */
1033
1034 i2o_msg_post(c, m);
1035
1036 /* Wait for a reply */
1037 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
1038 while (status_block[87] != 0xFF) {
1039 if (time_after(jiffies, timeout)) {
1040 printk(KERN_ERR "%s: Get status timeout.\n", c->name);
1041 return -ETIMEDOUT;
1042 }
1043
1044 set_current_state(TASK_UNINTERRUPTIBLE);
1045 schedule_timeout(1);
1046
1047 rmb();
1048 }
1049
1050#ifdef DEBUG
1051 i2o_debug_state(c);
1052#endif
1053
1054 return 0;
1055}
1056
1057/*
1058 * i2o_hrt_get - Get the Hardware Resource Table from the I2O controller
1059 * @c: I2O controller from which the HRT should be fetched
1060 *
1061 * The HRT contains information about possible hidden devices but is
1062 * mostly useless to us.
1063 *
1064 * Returns 0 on success or negativer error code on failure.
1065 */
1066static int i2o_hrt_get(struct i2o_controller *c)
1067{
1068 int rc;
1069 int i;
1070 i2o_hrt *hrt = c->hrt.virt;
1071 u32 size = sizeof(i2o_hrt);
1072 struct device *dev = &c->pdev->dev;
1073
1074 for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
1075 struct i2o_message __iomem *msg;
1076 u32 m;
1077
1078 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
1079 if (m == I2O_QUEUE_EMPTY)
1080 return -ETIMEDOUT;
1081
1082 writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]);
1083 writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
1084 &msg->u.head[1]);
1085 writel(0xd0000000 | c->hrt.len, &msg->body[0]);
1086 writel(c->hrt.phys, &msg->body[1]);
1087
1088 rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt);
1089
1090 if (rc < 0) {
1091 printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
1092 c->name, -rc);
1093 return rc;
1094 }
1095
1096 size = hrt->num_entries * hrt->entry_len << 2;
1097 if (size > c->hrt.len) {
1098 if (i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL))
1099 return -ENOMEM;
1100 else
1101 hrt = c->hrt.virt;
1102 } else
1103 return i2o_parse_hrt(c);
1104 }
1105
1106 printk(KERN_ERR "%s: Unable to get HRT after %d tries, giving up\n",
1107 c->name, I2O_HRT_GET_TRIES);
1108
1109 return -EBUSY;
1110}
1111
1112/**
1113 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
1114 *
1115 * Allocate the necessary memory for a i2o_controller struct and
1116 * initialize the lists.
1117 *
1118 * Returns a pointer to the I2O controller or a negative error code on
1119 * failure.
1120 */
1121struct i2o_controller *i2o_iop_alloc(void)
1122{
1123 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
1124 struct i2o_controller *c;
1125
1126 c = kmalloc(sizeof(*c), GFP_KERNEL);
1127 if (!c) {
1128 printk(KERN_ERR "i2o: Insufficient memory to allocate a I2O "
1129 "controller.\n");
1130 return ERR_PTR(-ENOMEM);
1131 }
1132 memset(c, 0, sizeof(*c));
1133
1134 INIT_LIST_HEAD(&c->devices);
1135 spin_lock_init(&c->lock);
1136 init_MUTEX(&c->lct_lock);
1137 c->unit = unit++;
1138 sprintf(c->name, "iop%d", c->unit);
1139
1140#if BITS_PER_LONG == 64
1141 spin_lock_init(&c->context_list_lock);
1142 atomic_set(&c->context_list_counter, 0);
1143 INIT_LIST_HEAD(&c->context_list);
1144#endif
1145
1146 return c;
1147};
1148
1149/**
1150 * i2o_iop_free - Free the i2o_controller struct
1151 * @c: I2O controller to free
1152 */
1153void i2o_iop_free(struct i2o_controller *c)
1154{
1155 kfree(c);
1156};
1157
1158/**
1159 * i2o_iop_add - Initialize the I2O controller and add him to the I2O core
1160 * @c: controller
1161 *
1162 * Initialize the I2O controller and if no error occurs add him to the I2O
1163 * core.
1164 *
1165 * Returns 0 on success or negative error code on failure.
1166 */
1167int i2o_iop_add(struct i2o_controller *c)
1168{
1169 int rc;
1170
1171 printk(KERN_INFO "%s: Activating I2O controller...\n", c->name);
1172 printk(KERN_INFO "%s: This may take a few minutes if there are many "
1173 "devices\n", c->name);
1174
1175 if ((rc = i2o_iop_activate(c))) {
1176 printk(KERN_ERR "%s: could not activate controller\n",
1177 c->name);
1178 i2o_iop_reset(c);
1179 return rc;
1180 }
1181
1182 pr_debug("%s: building sys table...\n", c->name);
1183
1184 if ((rc = i2o_systab_build())) {
1185 i2o_iop_reset(c);
1186 return rc;
1187 }
1188
1189 pr_debug("%s: online controller...\n", c->name);
1190
1191 if ((rc = i2o_iop_online(c))) {
1192 i2o_iop_reset(c);
1193 return rc;
1194 }
1195
1196 pr_debug("%s: getting LCT...\n", c->name);
1197
1198 if ((rc = i2o_exec_lct_get(c))) {
1199 i2o_iop_reset(c);
1200 return rc;
1201 }
1202
1203 list_add(&c->list, &i2o_controllers);
1204
1205 i2o_driver_notify_controller_add_all(c);
1206
1207 printk(KERN_INFO "%s: Controller added\n", c->name);
1208
1209 return 0;
1210};
1211
1212/**
1213 * i2o_event_register - Turn on/off event notification for a I2O device
1214 * @dev: I2O device which should receive the event registration request
1215 * @drv: driver which want to get notified
1216 * @tcntxt: transaction context to use with this notifier
1217 * @evt_mask: mask of events
1218 *
1219 * Create and posts an event registration message to the task. No reply
1220 * is waited for, or expected. If you do not want further notifications,
1221 * call the i2o_event_register again with a evt_mask of 0.
1222 *
1223 * Returns 0 on success or -ETIMEDOUT if no message could be fetched for
1224 * sending the request.
1225 */
1226int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
1227 int tcntxt, u32 evt_mask)
1228{
1229 struct i2o_controller *c = dev->iop;
1230 struct i2o_message __iomem *msg;
1231 u32 m;
1232
1233 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
1234 if (m == I2O_QUEUE_EMPTY)
1235 return -ETIMEDOUT;
1236
1237 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
1238 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data.
1239 tid, &msg->u.head[1]);
1240 writel(drv->context, &msg->u.s.icntxt);
1241 writel(tcntxt, &msg->u.s.tcntxt);
1242 writel(evt_mask, &msg->body[0]);
1243
1244 i2o_msg_post(c, m);
1245
1246 return 0;
1247};
1248
1249/**
1250 * i2o_iop_init - I2O main initialization function
1251 *
1252 * Initialize the I2O drivers (OSM) functions, register the Executive OSM,
1253 * initialize the I2O PCI part and finally initialize I2O device stuff.
1254 *
1255 * Returns 0 on success or negative error code on failure.
1256 */
1257static int __init i2o_iop_init(void)
1258{
1259 int rc = 0;
1260
1261 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1262
1263 rc = i2o_device_init();
1264 if (rc)
1265 goto exit;
1266
1267 rc = i2o_driver_init();
1268 if (rc)
1269 goto device_exit;
1270
1271 rc = i2o_exec_init();
1272 if (rc)
1273 goto driver_exit;
1274
1275 rc = i2o_pci_init();
1276 if (rc < 0)
1277 goto exec_exit;
1278
1279 return 0;
1280
1281 exec_exit:
1282 i2o_exec_exit();
1283
1284 driver_exit:
1285 i2o_driver_exit();
1286
1287 device_exit:
1288 i2o_device_exit();
1289
1290 exit:
1291 return rc;
1292}
1293
1294/**
1295 * i2o_iop_exit - I2O main exit function
1296 *
1297 * Removes I2O controllers from PCI subsystem and shut down OSMs.
1298 */
1299static void __exit i2o_iop_exit(void)
1300{
1301 i2o_pci_exit();
1302 i2o_exec_exit();
1303 i2o_driver_exit();
1304 i2o_device_exit();
1305};
1306
1307module_init(i2o_iop_init);
1308module_exit(i2o_iop_exit);
1309
1310MODULE_AUTHOR("Red Hat Software");
1311MODULE_LICENSE("GPL");
1312MODULE_DESCRIPTION(OSM_DESCRIPTION);
1313MODULE_VERSION(OSM_VERSION);
1314
1315#if BITS_PER_LONG == 64
1316EXPORT_SYMBOL(i2o_cntxt_list_add);
1317EXPORT_SYMBOL(i2o_cntxt_list_get);
1318EXPORT_SYMBOL(i2o_cntxt_list_remove);
1319EXPORT_SYMBOL(i2o_cntxt_list_get_ptr);
1320#endif
1321EXPORT_SYMBOL(i2o_msg_get_wait);
1322EXPORT_SYMBOL(i2o_msg_nop);
1323EXPORT_SYMBOL(i2o_find_iop);
1324EXPORT_SYMBOL(i2o_iop_find_device);
1325EXPORT_SYMBOL(i2o_event_register);
1326EXPORT_SYMBOL(i2o_status_get);
1327EXPORT_SYMBOL(i2o_controllers);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
new file mode 100644
index 000000000000..e772752f056d
--- /dev/null
+++ b/drivers/message/i2o/pci.c
@@ -0,0 +1,528 @@
1/*
2 * PCI handling of I2O controller
3 *
4 * Copyright (C) 1999-2002 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * A lot of the I2O message side code from this is taken from the Red
14 * Creek RCPCI45 adapter driver by Red Creek Communications
15 *
16 * Fixes/additions:
17 * Philipp Rumpf
18 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 * Alan Cox <alan@redhat.com>:
23 * Ported to Linux 2.5.
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Minor fixes for 2.6.
26 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
27 * Support for sysfs included.
28 */
29
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/i2o.h>
33
34#ifdef CONFIG_MTRR
35#include <asm/mtrr.h>
36#endif // CONFIG_MTRR
37
38/* Module internal functions from other sources */
39extern struct i2o_controller *i2o_iop_alloc(void);
40extern void i2o_iop_free(struct i2o_controller *);
41
42extern int i2o_iop_add(struct i2o_controller *);
43extern void i2o_iop_remove(struct i2o_controller *);
44
45extern int i2o_driver_dispatch(struct i2o_controller *, u32,
46 struct i2o_message *);
47
48/* PCI device id table for all I2O controllers */
49static struct pci_device_id __devinitdata i2o_pci_ids[] = {
50 {PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)},
51 {PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)},
52 {0}
53};
54
55/**
56 * i2o_dma_realloc - Realloc DMA memory
57 * @dev: struct device pointer to the PCI device of the I2O controller
58 * @addr: pointer to a i2o_dma struct DMA buffer
59 * @len: new length of memory
60 * @gfp_mask: GFP mask
61 *
62 * If there was something allocated in the addr, free it first. If len > 0
63 * than try to allocate it and write the addresses back to the addr
64 * structure. If len == 0 set the virtual address to NULL.
65 *
66 * Returns the 0 on success or negative error code on failure.
67 */
68int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len,
69 unsigned int gfp_mask)
70{
71 i2o_dma_free(dev, addr);
72
73 if (len)
74 return i2o_dma_alloc(dev, addr, len, gfp_mask);
75
76 return 0;
77};
78
79/**
80 * i2o_pci_free - Frees the DMA memory for the I2O controller
81 * @c: I2O controller to free
82 *
83 * Remove all allocated DMA memory and unmap memory IO regions. If MTRR
84 * is enabled, also remove it again.
85 */
86static void i2o_pci_free(struct i2o_controller *c)
87{
88 struct device *dev;
89
90 dev = &c->pdev->dev;
91
92 i2o_dma_free(dev, &c->out_queue);
93 i2o_dma_free(dev, &c->status_block);
94 if (c->lct)
95 kfree(c->lct);
96 i2o_dma_free(dev, &c->dlct);
97 i2o_dma_free(dev, &c->hrt);
98 i2o_dma_free(dev, &c->status);
99
100#ifdef CONFIG_MTRR
101 if (c->mtrr_reg0 >= 0)
102 mtrr_del(c->mtrr_reg0, 0, 0);
103 if (c->mtrr_reg1 >= 0)
104 mtrr_del(c->mtrr_reg1, 0, 0);
105#endif
106
107 if (c->raptor && c->in_queue.virt)
108 iounmap(c->in_queue.virt);
109
110 if (c->base.virt)
111 iounmap(c->base.virt);
112}
113
114/**
115 * i2o_pci_alloc - Allocate DMA memory, map IO memory for I2O controller
116 * @c: I2O controller
117 *
118 * Allocate DMA memory for a PCI (or in theory AGP) I2O controller. All
119 * IO mappings are also done here. If MTRR is enabled, also do add memory
120 * regions here.
121 *
122 * Returns 0 on success or negative error code on failure.
123 */
124static int __devinit i2o_pci_alloc(struct i2o_controller *c)
125{
126 struct pci_dev *pdev = c->pdev;
127 struct device *dev = &pdev->dev;
128 int i;
129
130 for (i = 0; i < 6; i++) {
131 /* Skip I/O spaces */
132 if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
133 if (!c->base.phys) {
134 c->base.phys = pci_resource_start(pdev, i);
135 c->base.len = pci_resource_len(pdev, i);
136
137 /*
138 * If we know what card it is, set the size
139 * correctly. Code is taken from dpt_i2o.c
140 */
141 if (pdev->device == 0xa501) {
142 if (pdev->subsystem_device >= 0xc032 &&
143 pdev->subsystem_device <= 0xc03b) {
144 if (c->base.len > 0x400000)
145 c->base.len = 0x400000;
146 } else {
147 if (c->base.len > 0x100000)
148 c->base.len = 0x100000;
149 }
150 }
151 if (!c->raptor)
152 break;
153 } else {
154 c->in_queue.phys = pci_resource_start(pdev, i);
155 c->in_queue.len = pci_resource_len(pdev, i);
156 break;
157 }
158 }
159 }
160
161 if (i == 6) {
162 printk(KERN_ERR "%s: I2O controller has no memory regions"
163 " defined.\n", c->name);
164 i2o_pci_free(c);
165 return -EINVAL;
166 }
167
168 /* Map the I2O controller */
169 if (c->raptor) {
170 printk(KERN_INFO "%s: PCI I2O controller\n", c->name);
171 printk(KERN_INFO " BAR0 at 0x%08lX size=%ld\n",
172 (unsigned long)c->base.phys, (unsigned long)c->base.len);
173 printk(KERN_INFO " BAR1 at 0x%08lX size=%ld\n",
174 (unsigned long)c->in_queue.phys,
175 (unsigned long)c->in_queue.len);
176 } else
177 printk(KERN_INFO "%s: PCI I2O controller at %08lX size=%ld\n",
178 c->name, (unsigned long)c->base.phys,
179 (unsigned long)c->base.len);
180
181 c->base.virt = ioremap(c->base.phys, c->base.len);
182 if (!c->base.virt) {
183 printk(KERN_ERR "%s: Unable to map controller.\n", c->name);
184 return -ENOMEM;
185 }
186
187 if (c->raptor) {
188 c->in_queue.virt = ioremap(c->in_queue.phys, c->in_queue.len);
189 if (!c->in_queue.virt) {
190 printk(KERN_ERR "%s: Unable to map controller.\n",
191 c->name);
192 i2o_pci_free(c);
193 return -ENOMEM;
194 }
195 } else
196 c->in_queue = c->base;
197
198 c->irq_mask = c->base.virt + 0x34;
199 c->post_port = c->base.virt + 0x40;
200 c->reply_port = c->base.virt + 0x44;
201
202#ifdef CONFIG_MTRR
203 /* Enable Write Combining MTRR for IOP's memory region */
204 c->mtrr_reg0 = mtrr_add(c->in_queue.phys, c->in_queue.len,
205 MTRR_TYPE_WRCOMB, 1);
206 c->mtrr_reg1 = -1;
207
208 if (c->mtrr_reg0 < 0)
209 printk(KERN_WARNING "%s: could not enable write combining "
210 "MTRR\n", c->name);
211 else
212 printk(KERN_INFO "%s: using write combining MTRR\n", c->name);
213
214 /*
215 * If it is an INTEL i960 I/O processor then set the first 64K to
216 * Uncacheable since the region contains the messaging unit which
217 * shouldn't be cached.
218 */
219 if ((pdev->vendor == PCI_VENDOR_ID_INTEL ||
220 pdev->vendor == PCI_VENDOR_ID_DPT) && !c->raptor) {
221 printk(KERN_INFO "%s: MTRR workaround for Intel i960 processor"
222 "\n", c->name);
223 c->mtrr_reg1 = mtrr_add(c->base.phys, 0x10000,
224 MTRR_TYPE_UNCACHABLE, 1);
225
226 if (c->mtrr_reg1 < 0) {
227 printk(KERN_WARNING "%s: Error in setting "
228 "MTRR_TYPE_UNCACHABLE\n", c->name);
229 mtrr_del(c->mtrr_reg0, c->in_queue.phys,
230 c->in_queue.len);
231 c->mtrr_reg0 = -1;
232 }
233 }
234#endif
235
236 if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) {
237 i2o_pci_free(c);
238 return -ENOMEM;
239 }
240
241 if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt), GFP_KERNEL)) {
242 i2o_pci_free(c);
243 return -ENOMEM;
244 }
245
246 if (i2o_dma_alloc(dev, &c->dlct, 8192, GFP_KERNEL)) {
247 i2o_pci_free(c);
248 return -ENOMEM;
249 }
250
251 if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block),
252 GFP_KERNEL)) {
253 i2o_pci_free(c);
254 return -ENOMEM;
255 }
256
257 if (i2o_dma_alloc(dev, &c->out_queue, MSG_POOL_SIZE, GFP_KERNEL)) {
258 i2o_pci_free(c);
259 return -ENOMEM;
260 }
261
262 pci_set_drvdata(pdev, c);
263
264 return 0;
265}
266
267/**
268 * i2o_pci_interrupt - Interrupt handler for I2O controller
269 * @irq: interrupt line
270 * @dev_id: pointer to the I2O controller
271 * @r: pointer to registers
272 *
273 * Handle an interrupt from a PCI based I2O controller. This turns out
274 * to be rather simple. We keep the controller pointer in the cookie.
275 */
276static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
277{
278 struct i2o_controller *c = dev_id;
279 struct device *dev = &c->pdev->dev;
280 struct i2o_message *m;
281 u32 mv;
282
283 /*
284 * Old 960 steppings had a bug in the I2O unit that caused
285 * the queue to appear empty when it wasn't.
286 */
287 mv = I2O_REPLY_READ32(c);
288 if (mv == I2O_QUEUE_EMPTY) {
289 mv = I2O_REPLY_READ32(c);
290 if (unlikely(mv == I2O_QUEUE_EMPTY)) {
291 return IRQ_NONE;
292 } else
293 pr_debug("%s: 960 bug detected\n", c->name);
294 }
295
296 while (mv != I2O_QUEUE_EMPTY) {
297 /*
298 * Map the message from the page frame map to kernel virtual.
299 * Because bus_to_virt is deprecated, we have calculate the
300 * location by ourself!
301 */
302 m = i2o_msg_out_to_virt(c, mv);
303
304 /*
305 * Ensure this message is seen coherently but cachably by
306 * the processor
307 */
308 dma_sync_single_for_cpu(dev, mv, MSG_FRAME_SIZE * 4,
309 PCI_DMA_FROMDEVICE);
310
311 /* dispatch it */
312 if (i2o_driver_dispatch(c, mv, m))
313 /* flush it if result != 0 */
314 i2o_flush_reply(c, mv);
315
316 /*
317 * That 960 bug again...
318 */
319 mv = I2O_REPLY_READ32(c);
320 if (mv == I2O_QUEUE_EMPTY)
321 mv = I2O_REPLY_READ32(c);
322 }
323 return IRQ_HANDLED;
324}
325
326/**
327 * i2o_pci_irq_enable - Allocate interrupt for I2O controller
328 *
329 * Allocate an interrupt for the I2O controller, and activate interrupts
330 * on the I2O controller.
331 *
332 * Returns 0 on success or negative error code on failure.
333 */
334static int i2o_pci_irq_enable(struct i2o_controller *c)
335{
336 struct pci_dev *pdev = c->pdev;
337 int rc;
338
339 I2O_IRQ_WRITE32(c, 0xffffffff);
340
341 if (pdev->irq) {
342 rc = request_irq(pdev->irq, i2o_pci_interrupt, SA_SHIRQ,
343 c->name, c);
344 if (rc < 0) {
345 printk(KERN_ERR "%s: unable to allocate interrupt %d."
346 "\n", c->name, pdev->irq);
347 return rc;
348 }
349 }
350
351 I2O_IRQ_WRITE32(c, 0x00000000);
352
353 printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq);
354
355 return 0;
356}
357
358/**
359 * i2o_pci_irq_disable - Free interrupt for I2O controller
360 * @c: I2O controller
361 *
362 * Disable interrupts in I2O controller and then free interrupt.
363 */
364static void i2o_pci_irq_disable(struct i2o_controller *c)
365{
366 I2O_IRQ_WRITE32(c, 0xffffffff);
367
368 if (c->pdev->irq > 0)
369 free_irq(c->pdev->irq, c);
370}
371
372/**
373 * i2o_pci_probe - Probe the PCI device for an I2O controller
374 * @dev: PCI device to test
375 * @id: id which matched with the PCI device id table
376 *
377 * Probe the PCI device for any device which is a memory of the
378 * Intelligent, I2O class or an Adaptec Zero Channel Controller. We
379 * attempt to set up each such device and register it with the core.
380 *
381 * Returns 0 on success or negative error code on failure.
382 */
383static int __devinit i2o_pci_probe(struct pci_dev *pdev,
384 const struct pci_device_id *id)
385{
386 struct i2o_controller *c;
387 int rc;
388
389 printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
390
391 if ((pdev->class & 0xff) > 1) {
392 printk(KERN_WARNING "i2o: I2O controller found but does not "
393 "support I2O 1.5 (skipping).\n");
394 return -ENODEV;
395 }
396
397 if ((rc = pci_enable_device(pdev))) {
398 printk(KERN_WARNING "i2o: I2O controller found but could not be"
399 " enabled.\n");
400 return rc;
401 }
402
403 printk(KERN_INFO "i2o: I2O controller found on bus %d at %d.\n",
404 pdev->bus->number, pdev->devfn);
405
406 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
407 printk(KERN_WARNING "i2o: I2O controller on bus %d at %d: No "
408 "suitable DMA available!\n", pdev->bus->number,
409 pdev->devfn);
410 rc = -ENODEV;
411 goto disable;
412 }
413
414 pci_set_master(pdev);
415
416 c = i2o_iop_alloc();
417 if (IS_ERR(c)) {
418 printk(KERN_ERR "i2o: memory for I2O controller could not be "
419 "allocated\n");
420 rc = PTR_ERR(c);
421 goto disable;
422 }
423
424 c->pdev = pdev;
425 c->device = pdev->dev;
426
427 /* Cards that fall apart if you hit them with large I/O loads... */
428 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
429 c->short_req = 1;
430 printk(KERN_INFO "%s: Symbios FC920 workarounds activated.\n",
431 c->name);
432 }
433
434 if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) {
435 c->promise = 1;
436 printk(KERN_INFO "%s: Promise workarounds activated.\n",
437 c->name);
438 }
439
440 /* Cards that go bananas if you quiesce them before you reset them. */
441 if (pdev->vendor == PCI_VENDOR_ID_DPT) {
442 c->no_quiesce = 1;
443 if (pdev->device == 0xa511)
444 c->raptor = 1;
445 }
446
447 if ((rc = i2o_pci_alloc(c))) {
448 printk(KERN_ERR "%s: DMA / IO allocation for I2O controller "
449 " failed\n", c->name);
450 goto free_controller;
451 }
452
453 if (i2o_pci_irq_enable(c)) {
454 printk(KERN_ERR "%s: unable to enable interrupts for I2O "
455 "controller\n", c->name);
456 goto free_pci;
457 }
458
459 if ((rc = i2o_iop_add(c)))
460 goto uninstall;
461
462 return 0;
463
464 uninstall:
465 i2o_pci_irq_disable(c);
466
467 free_pci:
468 i2o_pci_free(c);
469
470 free_controller:
471 i2o_iop_free(c);
472
473 disable:
474 pci_disable_device(pdev);
475
476 return rc;
477}
478
479/**
480 * i2o_pci_remove - Removes a I2O controller from the system
481 * pdev: I2O controller which should be removed
482 *
483 * Reset the I2O controller, disable interrupts and remove all allocated
484 * resources.
485 */
486static void __devexit i2o_pci_remove(struct pci_dev *pdev)
487{
488 struct i2o_controller *c;
489 c = pci_get_drvdata(pdev);
490
491 i2o_iop_remove(c);
492 i2o_pci_irq_disable(c);
493 i2o_pci_free(c);
494
495 printk(KERN_INFO "%s: Controller removed.\n", c->name);
496
497 i2o_iop_free(c);
498 pci_disable_device(pdev);
499};
500
501/* PCI driver for I2O controller */
502static struct pci_driver i2o_pci_driver = {
503 .name = "I2O controller",
504 .id_table = i2o_pci_ids,
505 .probe = i2o_pci_probe,
506 .remove = __devexit_p(i2o_pci_remove),
507};
508
509/**
510 * i2o_pci_init - registers I2O PCI driver in PCI subsystem
511 *
512 * Returns > 0 on success or negative error code on failure.
513 */
514int __init i2o_pci_init(void)
515{
516 return pci_register_driver(&i2o_pci_driver);
517};
518
519/**
520 * i2o_pci_exit - unregisters I2O PCI driver from PCI subsystem
521 */
522void __exit i2o_pci_exit(void)
523{
524 pci_unregister_driver(&i2o_pci_driver);
525};
526
527EXPORT_SYMBOL(i2o_dma_realloc);
528MODULE_DEVICE_TABLE(pci, i2o_pci_ids);