diff options
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/Kconfig | 7 | ||||
-rw-r--r-- | drivers/scsi/Makefile | 1 | ||||
-rw-r--r-- | drivers/scsi/stex.c | 1316 |
3 files changed, 1324 insertions, 0 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 7de5fdfdab67..c8c606589ea6 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1070,6 +1070,13 @@ config 53C700_LE_ON_BE | |||
1070 | depends on SCSI_LASI700 | 1070 | depends on SCSI_LASI700 |
1071 | default y | 1071 | default y |
1072 | 1072 | ||
1073 | config SCSI_STEX | ||
1074 | tristate "Promise SuperTrak EX Series support" | ||
1075 | depends on PCI && SCSI | ||
1076 | ---help--- | ||
1077 | This driver supports Promise SuperTrak EX8350/8300/16350/16300 | ||
1078 | Storage controllers. | ||
1079 | |||
1073 | config SCSI_SYM53C8XX_2 | 1080 | config SCSI_SYM53C8XX_2 |
1074 | tristate "SYM53C8XX Version 2 SCSI support" | 1081 | tristate "SYM53C8XX Version 2 SCSI support" |
1075 | depends on PCI && SCSI | 1082 | depends on PCI && SCSI |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 83da70decdd1..fd9aeb1ba07f 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o | |||
141 | obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o | 141 | obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o |
142 | obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o | 142 | obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o |
143 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o | 143 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o |
144 | obj-$(CONFIG_SCSI_STEX) += stex.o | ||
144 | 145 | ||
145 | obj-$(CONFIG_ARM) += arm/ | 146 | obj-$(CONFIG_ARM) += arm/ |
146 | 147 | ||
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c new file mode 100644 index 000000000000..fd093302bf1a --- /dev/null +++ b/drivers/scsi/stex.c | |||
@@ -0,0 +1,1316 @@ | |||
1 | /* | ||
2 | * SuperTrak EX Series Storage Controller driver for Linux | ||
3 | * | ||
4 | * Copyright (C) 2005, 2006 Promise Technology Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Written By: | ||
12 | * Ed Lin <promise_linux@promise.com> | ||
13 | * | ||
14 | * Version: 2.9.0.13 | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/time.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/blkdev.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/irq.h> | ||
32 | #include <asm/byteorder.h> | ||
33 | #include <scsi/scsi.h> | ||
34 | #include <scsi/scsi_device.h> | ||
35 | #include <scsi/scsi_cmnd.h> | ||
36 | #include <scsi/scsi_host.h> | ||
37 | |||
38 | #define DRV_NAME "stex" | ||
39 | #define ST_DRIVER_VERSION "2.9.0.13" | ||
40 | #define ST_VER_MAJOR 2 | ||
41 | #define ST_VER_MINOR 9 | ||
42 | #define ST_OEM 0 | ||
43 | #define ST_BUILD_VER 13 | ||
44 | |||
45 | enum { | ||
46 | /* MU register offset */ | ||
47 | IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */ | ||
48 | IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */ | ||
49 | OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */ | ||
50 | OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */ | ||
51 | IDBL = 0x20, /* MU_INBOUND_DOORBELL */ | ||
52 | IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */ | ||
53 | IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */ | ||
54 | ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */ | ||
55 | OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ | ||
56 | OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ | ||
57 | |||
58 | /* MU register value */ | ||
59 | MU_INBOUND_DOORBELL_HANDSHAKE = 1, | ||
60 | MU_INBOUND_DOORBELL_REQHEADCHANGED = 2, | ||
61 | MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4, | ||
62 | MU_INBOUND_DOORBELL_HMUSTOPPED = 8, | ||
63 | MU_INBOUND_DOORBELL_RESET = 16, | ||
64 | |||
65 | MU_OUTBOUND_DOORBELL_HANDSHAKE = 1, | ||
66 | MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2, | ||
67 | MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4, | ||
68 | MU_OUTBOUND_DOORBELL_BUSCHANGE = 8, | ||
69 | MU_OUTBOUND_DOORBELL_HASEVENT = 16, | ||
70 | |||
71 | /* MU status code */ | ||
72 | MU_STATE_STARTING = 1, | ||
73 | MU_STATE_FMU_READY_FOR_HANDSHAKE = 2, | ||
74 | MU_STATE_SEND_HANDSHAKE_FRAME = 3, | ||
75 | MU_STATE_STARTED = 4, | ||
76 | MU_STATE_RESETTING = 5, | ||
77 | |||
78 | MU_MAX_DELAY_TIME = 240000, | ||
79 | MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, | ||
80 | HMU_PARTNER_TYPE = 2, | ||
81 | |||
82 | /* firmware returned values */ | ||
83 | SRB_STATUS_SUCCESS = 0x01, | ||
84 | SRB_STATUS_ERROR = 0x04, | ||
85 | SRB_STATUS_BUSY = 0x05, | ||
86 | SRB_STATUS_INVALID_REQUEST = 0x06, | ||
87 | SRB_STATUS_SELECTION_TIMEOUT = 0x0A, | ||
88 | SRB_SEE_SENSE = 0x80, | ||
89 | |||
90 | /* task attribute */ | ||
91 | TASK_ATTRIBUTE_SIMPLE = 0x0, | ||
92 | TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, | ||
93 | TASK_ATTRIBUTE_ORDERED = 0x2, | ||
94 | TASK_ATTRIBUTE_ACA = 0x4, | ||
95 | |||
96 | /* request count, etc. */ | ||
97 | MU_MAX_REQUEST = 32, | ||
98 | TAG_BITMAP_LENGTH = MU_MAX_REQUEST, | ||
99 | |||
100 | /* one message wasted, use MU_MAX_REQUEST+1 | ||
101 | to handle MU_MAX_REQUEST messages */ | ||
102 | MU_REQ_COUNT = (MU_MAX_REQUEST + 1), | ||
103 | MU_STATUS_COUNT = (MU_MAX_REQUEST + 1), | ||
104 | |||
105 | STEX_CDB_LENGTH = MAX_COMMAND_SIZE, | ||
106 | REQ_VARIABLE_LEN = 1024, | ||
107 | STATUS_VAR_LEN = 128, | ||
108 | ST_CAN_QUEUE = MU_MAX_REQUEST, | ||
109 | ST_CMD_PER_LUN = MU_MAX_REQUEST, | ||
110 | ST_MAX_SG = 32, | ||
111 | |||
112 | /* sg flags */ | ||
113 | SG_CF_EOT = 0x80, /* end of table */ | ||
114 | SG_CF_64B = 0x40, /* 64 bit item */ | ||
115 | SG_CF_HOST = 0x20, /* sg in host memory */ | ||
116 | |||
117 | ST_MAX_ARRAY_SUPPORTED = 16, | ||
118 | ST_MAX_TARGET_NUM = (ST_MAX_ARRAY_SUPPORTED+1), | ||
119 | ST_MAX_LUN_PER_TARGET = 16, | ||
120 | |||
121 | st_shasta = 0, | ||
122 | st_vsc = 1, | ||
123 | |||
124 | PASSTHRU_REQ_TYPE = 0x00000001, | ||
125 | PASSTHRU_REQ_NO_WAKEUP = 0x00000100, | ||
126 | ST_INTERNAL_TIMEOUT = 30, | ||
127 | |||
128 | /* vendor specific commands of Promise */ | ||
129 | ARRAY_CMD = 0xe0, | ||
130 | CONTROLLER_CMD = 0xe1, | ||
131 | DEBUGGING_CMD = 0xe2, | ||
132 | PASSTHRU_CMD = 0xe3, | ||
133 | |||
134 | PASSTHRU_GET_ADAPTER = 0x05, | ||
135 | PASSTHRU_GET_DRVVER = 0x10, | ||
136 | CTLR_POWER_STATE_CHANGE = 0x0e, | ||
137 | CTLR_POWER_SAVING = 0x01, | ||
138 | |||
139 | PASSTHRU_SIGNATURE = 0x4e415041, | ||
140 | |||
141 | INQUIRY_EVPD = 0x01, | ||
142 | }; | ||
143 | |||
144 | struct st_sgitem { | ||
145 | u8 ctrl; /* SG_CF_xxx */ | ||
146 | u8 reserved[3]; | ||
147 | __le32 count; | ||
148 | __le32 addr; | ||
149 | __le32 addr_hi; | ||
150 | }; | ||
151 | |||
152 | struct st_sgtable { | ||
153 | __le16 sg_count; | ||
154 | __le16 max_sg_count; | ||
155 | __le32 sz_in_byte; | ||
156 | struct st_sgitem table[ST_MAX_SG]; | ||
157 | }; | ||
158 | |||
159 | struct handshake_frame { | ||
160 | __le32 rb_phy; /* request payload queue physical address */ | ||
161 | __le32 rb_phy_hi; | ||
162 | __le16 req_sz; /* size of each request payload */ | ||
163 | __le16 req_cnt; /* count of reqs the buffer can hold */ | ||
164 | __le16 status_sz; /* size of each status payload */ | ||
165 | __le16 status_cnt; /* count of status the buffer can hold */ | ||
166 | __le32 hosttime; /* seconds from Jan 1, 1970 (GMT) */ | ||
167 | __le32 hosttime_hi; | ||
168 | u8 partner_type; /* who sends this frame */ | ||
169 | u8 reserved0[7]; | ||
170 | __le32 partner_ver_major; | ||
171 | __le32 partner_ver_minor; | ||
172 | __le32 partner_ver_oem; | ||
173 | __le32 partner_ver_build; | ||
174 | u32 reserved1[4]; | ||
175 | }; | ||
176 | |||
177 | struct req_msg { | ||
178 | __le16 tag; | ||
179 | u8 lun; | ||
180 | u8 target; | ||
181 | u8 task_attr; | ||
182 | u8 task_manage; | ||
183 | u8 prd_entry; | ||
184 | u8 payload_sz; /* payload size in 4-byte */ | ||
185 | u8 cdb[STEX_CDB_LENGTH]; | ||
186 | u8 variable[REQ_VARIABLE_LEN]; | ||
187 | }; | ||
188 | |||
189 | struct status_msg { | ||
190 | __le16 tag; | ||
191 | u8 lun; | ||
192 | u8 target; | ||
193 | u8 srb_status; | ||
194 | u8 scsi_status; | ||
195 | u8 reserved; | ||
196 | u8 payload_sz; /* payload size in 4-byte */ | ||
197 | u8 variable[STATUS_VAR_LEN]; | ||
198 | }; | ||
199 | |||
200 | struct ver_info { | ||
201 | u32 major; | ||
202 | u32 minor; | ||
203 | u32 oem; | ||
204 | u32 build; | ||
205 | u32 reserved[2]; | ||
206 | }; | ||
207 | |||
208 | struct st_frame { | ||
209 | u32 base[6]; | ||
210 | u32 rom_addr; | ||
211 | |||
212 | struct ver_info drv_ver; | ||
213 | struct ver_info bios_ver; | ||
214 | |||
215 | u32 bus; | ||
216 | u32 slot; | ||
217 | u32 irq_level; | ||
218 | u32 irq_vec; | ||
219 | u32 id; | ||
220 | u32 subid; | ||
221 | |||
222 | u32 dimm_size; | ||
223 | u8 dimm_type; | ||
224 | u8 reserved[3]; | ||
225 | |||
226 | u32 channel; | ||
227 | u32 reserved1; | ||
228 | }; | ||
229 | |||
230 | struct st_drvver { | ||
231 | u32 major; | ||
232 | u32 minor; | ||
233 | u32 oem; | ||
234 | u32 build; | ||
235 | u32 signature[2]; | ||
236 | u8 console_id; | ||
237 | u8 host_no; | ||
238 | u8 reserved0[2]; | ||
239 | u32 reserved[3]; | ||
240 | }; | ||
241 | |||
242 | #define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg)) | ||
243 | #define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg)) | ||
244 | #define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE) | ||
245 | #define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + sizeof(struct st_frame)) | ||
246 | |||
247 | struct st_ccb { | ||
248 | struct req_msg *req; | ||
249 | struct scsi_cmnd *cmd; | ||
250 | |||
251 | void *sense_buffer; | ||
252 | unsigned int sense_bufflen; | ||
253 | int sg_count; | ||
254 | |||
255 | u32 req_type; | ||
256 | u8 srb_status; | ||
257 | u8 scsi_status; | ||
258 | }; | ||
259 | |||
260 | struct st_hba { | ||
261 | void __iomem *mmio_base; /* iomapped PCI memory space */ | ||
262 | void *dma_mem; | ||
263 | dma_addr_t dma_handle; | ||
264 | |||
265 | struct Scsi_Host *host; | ||
266 | struct pci_dev *pdev; | ||
267 | |||
268 | u32 tag; | ||
269 | u32 req_head; | ||
270 | u32 req_tail; | ||
271 | u32 status_head; | ||
272 | u32 status_tail; | ||
273 | |||
274 | struct status_msg *status_buffer; | ||
275 | void *copy_buffer; /* temp buffer for driver-handled commands */ | ||
276 | struct st_ccb ccb[MU_MAX_REQUEST]; | ||
277 | struct st_ccb *wait_ccb; | ||
278 | wait_queue_head_t waitq; | ||
279 | |||
280 | unsigned int mu_status; | ||
281 | int out_req_cnt; | ||
282 | |||
283 | unsigned int cardtype; | ||
284 | }; | ||
285 | |||
286 | static const char console_inq_page[] = | ||
287 | { | ||
288 | 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30, | ||
289 | 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */ | ||
290 | 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */ | ||
291 | 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */ | ||
292 | 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */ | ||
293 | 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */ | ||
294 | 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */ | ||
295 | 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20 | ||
296 | }; | ||
297 | |||
298 | MODULE_AUTHOR("Ed Lin"); | ||
299 | MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers"); | ||
300 | MODULE_LICENSE("GPL"); | ||
301 | MODULE_VERSION(ST_DRIVER_VERSION); | ||
302 | |||
303 | static void stex_gettime(__le32 *time) | ||
304 | { | ||
305 | struct timeval tv; | ||
306 | do_gettimeofday(&tv); | ||
307 | |||
308 | *time = cpu_to_le32(tv.tv_sec & 0xffffffff); | ||
309 | *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16); | ||
310 | } | ||
311 | |||
312 | static u16 __stex_alloc_tag(unsigned long *bitmap) | ||
313 | { | ||
314 | int i; | ||
315 | i = find_first_zero_bit(bitmap, TAG_BITMAP_LENGTH); | ||
316 | if (i < TAG_BITMAP_LENGTH) | ||
317 | __set_bit(i, bitmap); | ||
318 | return (u16)i; | ||
319 | } | ||
320 | |||
321 | static u16 stex_alloc_tag(struct st_hba *hba, unsigned long *bitmap) | ||
322 | { | ||
323 | unsigned long flags; | ||
324 | u16 tag; | ||
325 | |||
326 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
327 | tag = __stex_alloc_tag(bitmap); | ||
328 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
329 | return tag; | ||
330 | } | ||
331 | |||
332 | static void __stex_free_tag(unsigned long *bitmap, u16 tag) | ||
333 | { | ||
334 | __clear_bit((int)tag, bitmap); | ||
335 | } | ||
336 | |||
337 | static void stex_free_tag(struct st_hba *hba, unsigned long *bitmap, u16 tag) | ||
338 | { | ||
339 | unsigned long flags; | ||
340 | |||
341 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
342 | __stex_free_tag(bitmap, tag); | ||
343 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
344 | } | ||
345 | |||
346 | static struct status_msg *stex_get_status(struct st_hba *hba) | ||
347 | { | ||
348 | struct status_msg *status = | ||
349 | hba->status_buffer + hba->status_tail; | ||
350 | |||
351 | ++hba->status_tail; | ||
352 | hba->status_tail %= MU_STATUS_COUNT; | ||
353 | |||
354 | return status; | ||
355 | } | ||
356 | |||
357 | static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) | ||
358 | { | ||
359 | cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; | ||
360 | |||
361 | cmd->sense_buffer[0] = 0x70; /* fixed format, current */ | ||
362 | cmd->sense_buffer[2] = sk; | ||
363 | cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ | ||
364 | cmd->sense_buffer[12] = asc; | ||
365 | cmd->sense_buffer[13] = ascq; | ||
366 | } | ||
367 | |||
368 | static void stex_invalid_field(struct scsi_cmnd *cmd, | ||
369 | void (*done)(struct scsi_cmnd *)) | ||
370 | { | ||
371 | /* "Invalid field in cbd" */ | ||
372 | stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); | ||
373 | done(cmd); | ||
374 | } | ||
375 | |||
376 | static struct req_msg *stex_alloc_req(struct st_hba *hba) | ||
377 | { | ||
378 | struct req_msg *req = ((struct req_msg *)hba->dma_mem) + | ||
379 | hba->req_head; | ||
380 | |||
381 | ++hba->req_head; | ||
382 | hba->req_head %= MU_REQ_COUNT; | ||
383 | |||
384 | return req; | ||
385 | } | ||
386 | |||
387 | static int stex_map_sg(struct st_hba *hba, | ||
388 | struct req_msg *req, struct st_ccb *ccb) | ||
389 | { | ||
390 | struct pci_dev *pdev = hba->pdev; | ||
391 | struct scsi_cmnd *cmd; | ||
392 | dma_addr_t dma_handle; | ||
393 | struct scatterlist *src; | ||
394 | struct st_sgtable *dst; | ||
395 | int i; | ||
396 | |||
397 | cmd = ccb->cmd; | ||
398 | dst = (struct st_sgtable *)req->variable; | ||
399 | dst->max_sg_count = cpu_to_le16(ST_MAX_SG); | ||
400 | dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen); | ||
401 | |||
402 | if (cmd->use_sg) { | ||
403 | int n_elem; | ||
404 | |||
405 | src = (struct scatterlist *) cmd->request_buffer; | ||
406 | n_elem = pci_map_sg(pdev, src, | ||
407 | cmd->use_sg, cmd->sc_data_direction); | ||
408 | if (n_elem <= 0) | ||
409 | return -EIO; | ||
410 | |||
411 | ccb->sg_count = n_elem; | ||
412 | dst->sg_count = cpu_to_le16((u16)n_elem); | ||
413 | |||
414 | for (i = 0; i < n_elem; i++, src++) { | ||
415 | dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src)); | ||
416 | dst->table[i].addr = | ||
417 | cpu_to_le32(sg_dma_address(src) & 0xffffffff); | ||
418 | dst->table[i].addr_hi = | ||
419 | cpu_to_le32((sg_dma_address(src) >> 16) >> 16); | ||
420 | dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST; | ||
421 | } | ||
422 | dst->table[--i].ctrl |= SG_CF_EOT; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | dma_handle = pci_map_single(pdev, cmd->request_buffer, | ||
427 | cmd->request_bufflen, cmd->sc_data_direction); | ||
428 | cmd->SCp.dma_handle = dma_handle; | ||
429 | |||
430 | ccb->sg_count = 1; | ||
431 | dst->sg_count = cpu_to_le16(1); | ||
432 | dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff); | ||
433 | dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16); | ||
434 | dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen); | ||
435 | dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST; | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | static void stex_internal_copy(struct scsi_cmnd *cmd, | ||
441 | const void *src, size_t *count, int sg_count) | ||
442 | { | ||
443 | size_t lcount; | ||
444 | size_t len; | ||
445 | void *s, *d, *base = NULL; | ||
446 | if (*count > cmd->request_bufflen) | ||
447 | *count = cmd->request_bufflen; | ||
448 | lcount = *count; | ||
449 | while (lcount) { | ||
450 | len = lcount; | ||
451 | s = (void *)src; | ||
452 | if (cmd->use_sg) { | ||
453 | size_t offset = *count - lcount; | ||
454 | s += offset; | ||
455 | base = scsi_kmap_atomic_sg(cmd->request_buffer, | ||
456 | sg_count, &offset, &len); | ||
457 | if (base == NULL) { | ||
458 | *count -= lcount; | ||
459 | return; | ||
460 | } | ||
461 | d = base + offset; | ||
462 | } else | ||
463 | d = cmd->request_buffer; | ||
464 | |||
465 | memcpy(d, s, len); | ||
466 | |||
467 | lcount -= len; | ||
468 | if (cmd->use_sg) | ||
469 | scsi_kunmap_atomic_sg(base); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | static int stex_direct_copy(struct scsi_cmnd *cmd, | ||
474 | const void *src, size_t count) | ||
475 | { | ||
476 | struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0]; | ||
477 | size_t cp_len = count; | ||
478 | int n_elem = 0; | ||
479 | |||
480 | if (cmd->use_sg) { | ||
481 | n_elem = pci_map_sg(hba->pdev, cmd->request_buffer, | ||
482 | cmd->use_sg, cmd->sc_data_direction); | ||
483 | if (n_elem <= 0) | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | stex_internal_copy(cmd, src, &cp_len, n_elem); | ||
488 | |||
489 | if (cmd->use_sg) | ||
490 | pci_unmap_sg(hba->pdev, cmd->request_buffer, | ||
491 | cmd->use_sg, cmd->sc_data_direction); | ||
492 | return cp_len == count; | ||
493 | } | ||
494 | |||
495 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) | ||
496 | { | ||
497 | struct st_frame *p; | ||
498 | size_t count = sizeof(struct st_frame); | ||
499 | |||
500 | p = hba->copy_buffer; | ||
501 | memset(p->base, 0, sizeof(u32)*6); | ||
502 | *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); | ||
503 | p->rom_addr = 0; | ||
504 | |||
505 | p->drv_ver.major = ST_VER_MAJOR; | ||
506 | p->drv_ver.minor = ST_VER_MINOR; | ||
507 | p->drv_ver.oem = ST_OEM; | ||
508 | p->drv_ver.build = ST_BUILD_VER; | ||
509 | |||
510 | p->bus = hba->pdev->bus->number; | ||
511 | p->slot = hba->pdev->devfn; | ||
512 | p->irq_level = 0; | ||
513 | p->irq_vec = hba->pdev->irq; | ||
514 | p->id = hba->pdev->vendor << 16 | hba->pdev->device; | ||
515 | p->subid = | ||
516 | hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; | ||
517 | |||
518 | stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count); | ||
519 | } | ||
520 | |||
521 | static void | ||
522 | stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) | ||
523 | { | ||
524 | req->tag = cpu_to_le16(tag); | ||
525 | req->task_attr = TASK_ATTRIBUTE_SIMPLE; | ||
526 | req->task_manage = 0; /* not supported yet */ | ||
527 | req->payload_sz = (u8)(sizeof(struct req_msg)/sizeof(u32)); | ||
528 | |||
529 | hba->ccb[tag].req = req; | ||
530 | hba->out_req_cnt++; | ||
531 | |||
532 | writel(hba->req_head, hba->mmio_base + IMR0); | ||
533 | writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); | ||
534 | readl(hba->mmio_base + IDBL); /* flush */ | ||
535 | } | ||
536 | |||
537 | static int | ||
538 | stex_slave_config(struct scsi_device *sdev) | ||
539 | { | ||
540 | sdev->use_10_for_rw = 1; | ||
541 | sdev->use_10_for_ms = 1; | ||
542 | sdev->timeout = 60 * HZ; | ||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | static void | ||
547 | stex_slave_destroy(struct scsi_device *sdev) | ||
548 | { | ||
549 | struct st_hba *hba = (struct st_hba *) sdev->host->hostdata; | ||
550 | struct req_msg *req; | ||
551 | unsigned long flags; | ||
552 | unsigned long before; | ||
553 | u16 tag; | ||
554 | |||
555 | if (sdev->type != TYPE_DISK) | ||
556 | return; | ||
557 | |||
558 | before = jiffies; | ||
559 | while ((tag = stex_alloc_tag(hba, (unsigned long *)&hba->tag)) | ||
560 | == TAG_BITMAP_LENGTH) { | ||
561 | if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) | ||
562 | return; | ||
563 | msleep(10); | ||
564 | } | ||
565 | |||
566 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
567 | req = stex_alloc_req(hba); | ||
568 | memset(req->cdb, 0, STEX_CDB_LENGTH); | ||
569 | |||
570 | req->target = sdev->id; | ||
571 | req->lun = sdev->channel; /* firmware lun issue work around */ | ||
572 | req->cdb[0] = SYNCHRONIZE_CACHE; | ||
573 | |||
574 | hba->ccb[tag].cmd = NULL; | ||
575 | hba->ccb[tag].sg_count = 0; | ||
576 | hba->ccb[tag].sense_bufflen = 0; | ||
577 | hba->ccb[tag].sense_buffer = NULL; | ||
578 | hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE; | ||
579 | |||
580 | stex_send_cmd(hba, req, tag); | ||
581 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
582 | |||
583 | wait_event_timeout(hba->waitq, | ||
584 | !(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT * HZ); | ||
585 | if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) | ||
586 | return; | ||
587 | |||
588 | stex_free_tag(hba, (unsigned long *)&hba->tag, tag); | ||
589 | } | ||
590 | |||
591 | static int | ||
592 | stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | ||
593 | { | ||
594 | struct st_hba *hba; | ||
595 | struct Scsi_Host *host; | ||
596 | unsigned int id,lun; | ||
597 | struct req_msg *req; | ||
598 | u16 tag; | ||
599 | host = cmd->device->host; | ||
600 | id = cmd->device->id; | ||
601 | lun = cmd->device->channel; /* firmware lun issue work around */ | ||
602 | hba = (struct st_hba *) &host->hostdata[0]; | ||
603 | |||
604 | switch (cmd->cmnd[0]) { | ||
605 | case MODE_SENSE_10: | ||
606 | { | ||
607 | static char ms10_caching_page[12] = | ||
608 | { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 }; | ||
609 | unsigned char page; | ||
610 | page = cmd->cmnd[2] & 0x3f; | ||
611 | if (page == 0x8 || page == 0x3f) { | ||
612 | stex_direct_copy(cmd, ms10_caching_page, | ||
613 | sizeof(ms10_caching_page)); | ||
614 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | ||
615 | done(cmd); | ||
616 | } else | ||
617 | stex_invalid_field(cmd, done); | ||
618 | return 0; | ||
619 | } | ||
620 | case INQUIRY: | ||
621 | if (id != ST_MAX_ARRAY_SUPPORTED) | ||
622 | break; | ||
623 | if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { | ||
624 | stex_direct_copy(cmd, console_inq_page, | ||
625 | sizeof(console_inq_page)); | ||
626 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | ||
627 | done(cmd); | ||
628 | } else | ||
629 | stex_invalid_field(cmd, done); | ||
630 | return 0; | ||
631 | case PASSTHRU_CMD: | ||
632 | if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { | ||
633 | struct st_drvver ver; | ||
634 | ver.major = ST_VER_MAJOR; | ||
635 | ver.minor = ST_VER_MINOR; | ||
636 | ver.oem = ST_OEM; | ||
637 | ver.build = ST_BUILD_VER; | ||
638 | ver.signature[0] = PASSTHRU_SIGNATURE; | ||
639 | ver.console_id = ST_MAX_ARRAY_SUPPORTED; | ||
640 | ver.host_no = hba->host->host_no; | ||
641 | cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ? | ||
642 | DID_OK << 16 | COMMAND_COMPLETE << 8 : | ||
643 | DID_ERROR << 16 | COMMAND_COMPLETE << 8; | ||
644 | done(cmd); | ||
645 | return 0; | ||
646 | } | ||
647 | default: | ||
648 | break; | ||
649 | } | ||
650 | |||
651 | cmd->scsi_done = done; | ||
652 | |||
653 | if (unlikely((tag = __stex_alloc_tag((unsigned long *)&hba->tag)) | ||
654 | == TAG_BITMAP_LENGTH)) | ||
655 | return SCSI_MLQUEUE_HOST_BUSY; | ||
656 | |||
657 | req = stex_alloc_req(hba); | ||
658 | req->lun = lun; | ||
659 | req->target = id; | ||
660 | |||
661 | /* cdb */ | ||
662 | memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH); | ||
663 | |||
664 | hba->ccb[tag].cmd = cmd; | ||
665 | hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; | ||
666 | hba->ccb[tag].sense_buffer = cmd->sense_buffer; | ||
667 | hba->ccb[tag].req_type = 0; | ||
668 | |||
669 | if (cmd->sc_data_direction != DMA_NONE) | ||
670 | stex_map_sg(hba, req, &hba->ccb[tag]); | ||
671 | |||
672 | stex_send_cmd(hba, req, tag); | ||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd) | ||
677 | { | ||
678 | if (cmd->sc_data_direction != DMA_NONE) { | ||
679 | if (cmd->use_sg) | ||
680 | pci_unmap_sg(hba->pdev, cmd->request_buffer, | ||
681 | cmd->use_sg, cmd->sc_data_direction); | ||
682 | else | ||
683 | pci_unmap_single(hba->pdev, cmd->SCp.dma_handle, | ||
684 | cmd->request_bufflen, cmd->sc_data_direction); | ||
685 | } | ||
686 | } | ||
687 | |||
688 | static void stex_scsi_done(struct st_ccb *ccb) | ||
689 | { | ||
690 | struct scsi_cmnd *cmd = ccb->cmd; | ||
691 | int result; | ||
692 | |||
693 | if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) { | ||
694 | result = ccb->scsi_status; | ||
695 | switch (ccb->scsi_status) { | ||
696 | case SAM_STAT_GOOD: | ||
697 | result |= DID_OK << 16 | COMMAND_COMPLETE << 8; | ||
698 | break; | ||
699 | case SAM_STAT_CHECK_CONDITION: | ||
700 | result |= DRIVER_SENSE << 24; | ||
701 | break; | ||
702 | case SAM_STAT_BUSY: | ||
703 | result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; | ||
704 | break; | ||
705 | default: | ||
706 | result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8; | ||
707 | break; | ||
708 | } | ||
709 | } | ||
710 | else if (ccb->srb_status & SRB_SEE_SENSE) | ||
711 | result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION; | ||
712 | else switch (ccb->srb_status) { | ||
713 | case SRB_STATUS_SELECTION_TIMEOUT: | ||
714 | result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; | ||
715 | break; | ||
716 | case SRB_STATUS_BUSY: | ||
717 | result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; | ||
718 | break; | ||
719 | case SRB_STATUS_INVALID_REQUEST: | ||
720 | case SRB_STATUS_ERROR: | ||
721 | default: | ||
722 | result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; | ||
723 | break; | ||
724 | } | ||
725 | |||
726 | cmd->result = result; | ||
727 | cmd->scsi_done(cmd); | ||
728 | } | ||
729 | |||
730 | static void stex_copy_data(struct st_ccb *ccb, | ||
731 | struct status_msg *resp, unsigned int variable) | ||
732 | { | ||
733 | size_t count = variable; | ||
734 | if (resp->scsi_status != SAM_STAT_GOOD) { | ||
735 | if (ccb->sense_buffer != NULL) | ||
736 | memcpy(ccb->sense_buffer, resp->variable, | ||
737 | min(variable, ccb->sense_bufflen)); | ||
738 | return; | ||
739 | } | ||
740 | |||
741 | if (ccb->cmd == NULL) | ||
742 | return; | ||
743 | stex_internal_copy(ccb->cmd, resp->variable, &count, ccb->sg_count); | ||
744 | } | ||
745 | |||
746 | static void stex_mu_intr(struct st_hba *hba, u32 doorbell) | ||
747 | { | ||
748 | void __iomem *base = hba->mmio_base; | ||
749 | struct status_msg *resp; | ||
750 | struct st_ccb *ccb; | ||
751 | unsigned int size; | ||
752 | u16 tag; | ||
753 | |||
754 | if (!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)) | ||
755 | return; | ||
756 | |||
757 | /* status payloads */ | ||
758 | hba->status_head = readl(base + OMR1); | ||
759 | if (unlikely(hba->status_head >= MU_STATUS_COUNT)) { | ||
760 | printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n", | ||
761 | pci_name(hba->pdev)); | ||
762 | return; | ||
763 | } | ||
764 | |||
765 | if (unlikely(hba->mu_status != MU_STATE_STARTED || | ||
766 | hba->out_req_cnt <= 0)) { | ||
767 | hba->status_tail = hba->status_head; | ||
768 | goto update_status; | ||
769 | } | ||
770 | |||
771 | while (hba->status_tail != hba->status_head) { | ||
772 | resp = stex_get_status(hba); | ||
773 | tag = le16_to_cpu(resp->tag); | ||
774 | if (unlikely(tag >= TAG_BITMAP_LENGTH)) { | ||
775 | printk(KERN_WARNING DRV_NAME | ||
776 | "(%s): invalid tag\n", pci_name(hba->pdev)); | ||
777 | continue; | ||
778 | } | ||
779 | if (unlikely((hba->tag & (1 << tag)) == 0)) { | ||
780 | printk(KERN_WARNING DRV_NAME | ||
781 | "(%s): null tag\n", pci_name(hba->pdev)); | ||
782 | continue; | ||
783 | } | ||
784 | |||
785 | hba->out_req_cnt--; | ||
786 | ccb = &hba->ccb[tag]; | ||
787 | if (hba->wait_ccb == ccb) | ||
788 | hba->wait_ccb = NULL; | ||
789 | if (unlikely(ccb->req == NULL)) { | ||
790 | printk(KERN_WARNING DRV_NAME | ||
791 | "(%s): lagging req\n", pci_name(hba->pdev)); | ||
792 | __stex_free_tag((unsigned long *)&hba->tag, tag); | ||
793 | stex_unmap_sg(hba, ccb->cmd); /* ??? */ | ||
794 | continue; | ||
795 | } | ||
796 | |||
797 | size = resp->payload_sz * sizeof(u32); /* payload size */ | ||
798 | if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || | ||
799 | size > sizeof(*resp))) { | ||
800 | printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", | ||
801 | pci_name(hba->pdev)); | ||
802 | } else { | ||
803 | size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */ | ||
804 | if (size) | ||
805 | stex_copy_data(ccb, resp, size); | ||
806 | } | ||
807 | |||
808 | ccb->srb_status = resp->srb_status; | ||
809 | ccb->scsi_status = resp->scsi_status; | ||
810 | |||
811 | if (ccb->req_type & PASSTHRU_REQ_TYPE) { | ||
812 | if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) { | ||
813 | ccb->req_type = 0; | ||
814 | continue; | ||
815 | } | ||
816 | ccb->req_type = 0; | ||
817 | if (waitqueue_active(&hba->waitq)) | ||
818 | wake_up(&hba->waitq); | ||
819 | continue; | ||
820 | } | ||
821 | if (ccb->cmd->cmnd[0] == PASSTHRU_CMD && | ||
822 | ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER) | ||
823 | stex_controller_info(hba, ccb); | ||
824 | __stex_free_tag((unsigned long *)&hba->tag, tag); | ||
825 | stex_unmap_sg(hba, ccb->cmd); | ||
826 | stex_scsi_done(ccb); | ||
827 | } | ||
828 | |||
829 | update_status: | ||
830 | writel(hba->status_head, base + IMR1); | ||
831 | readl(base + IMR1); /* flush */ | ||
832 | } | ||
833 | |||
834 | static irqreturn_t stex_intr(int irq, void *__hba, struct pt_regs *regs) | ||
835 | { | ||
836 | struct st_hba *hba = __hba; | ||
837 | void __iomem *base = hba->mmio_base; | ||
838 | u32 data; | ||
839 | unsigned long flags; | ||
840 | int handled = 0; | ||
841 | |||
842 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
843 | |||
844 | data = readl(base + ODBL); | ||
845 | |||
846 | if (data && data != 0xffffffff) { | ||
847 | /* clear the interrupt */ | ||
848 | writel(data, base + ODBL); | ||
849 | readl(base + ODBL); /* flush */ | ||
850 | stex_mu_intr(hba, data); | ||
851 | handled = 1; | ||
852 | } | ||
853 | |||
854 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
855 | |||
856 | return IRQ_RETVAL(handled); | ||
857 | } | ||
858 | |||
859 | static int stex_handshake(struct st_hba *hba) | ||
860 | { | ||
861 | void __iomem *base = hba->mmio_base; | ||
862 | struct handshake_frame *h; | ||
863 | dma_addr_t status_phys; | ||
864 | int i; | ||
865 | |||
866 | if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { | ||
867 | writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); | ||
868 | readl(base + IDBL); | ||
869 | for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE | ||
870 | && i < MU_MAX_DELAY_TIME; i++) { | ||
871 | rmb(); | ||
872 | msleep(1); | ||
873 | } | ||
874 | |||
875 | if (i == MU_MAX_DELAY_TIME) { | ||
876 | printk(KERN_ERR DRV_NAME | ||
877 | "(%s): no handshake signature\n", | ||
878 | pci_name(hba->pdev)); | ||
879 | return -1; | ||
880 | } | ||
881 | } | ||
882 | |||
883 | udelay(10); | ||
884 | |||
885 | h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE); | ||
886 | h->rb_phy = cpu_to_le32(hba->dma_handle); | ||
887 | h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16); | ||
888 | h->req_sz = cpu_to_le16(sizeof(struct req_msg)); | ||
889 | h->req_cnt = cpu_to_le16(MU_REQ_COUNT); | ||
890 | h->status_sz = cpu_to_le16(sizeof(struct status_msg)); | ||
891 | h->status_cnt = cpu_to_le16(MU_STATUS_COUNT); | ||
892 | stex_gettime(&h->hosttime); | ||
893 | h->partner_type = HMU_PARTNER_TYPE; | ||
894 | |||
895 | status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE; | ||
896 | writel(status_phys, base + IMR0); | ||
897 | readl(base + IMR0); | ||
898 | writel((status_phys >> 16) >> 16, base + IMR1); | ||
899 | readl(base + IMR1); | ||
900 | |||
901 | writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */ | ||
902 | readl(base + OMR0); | ||
903 | writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); | ||
904 | readl(base + IDBL); /* flush */ | ||
905 | |||
906 | udelay(10); | ||
907 | for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE | ||
908 | && i < MU_MAX_DELAY_TIME; i++) { | ||
909 | rmb(); | ||
910 | msleep(1); | ||
911 | } | ||
912 | |||
913 | if (i == MU_MAX_DELAY_TIME) { | ||
914 | printk(KERN_ERR DRV_NAME | ||
915 | "(%s): no signature after handshake frame\n", | ||
916 | pci_name(hba->pdev)); | ||
917 | return -1; | ||
918 | } | ||
919 | |||
920 | writel(0, base + IMR0); | ||
921 | readl(base + IMR0); | ||
922 | writel(0, base + OMR0); | ||
923 | readl(base + OMR0); | ||
924 | writel(0, base + IMR1); | ||
925 | readl(base + IMR1); | ||
926 | writel(0, base + OMR1); | ||
927 | readl(base + OMR1); /* flush */ | ||
928 | hba->mu_status = MU_STATE_STARTED; | ||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | static int stex_abort(struct scsi_cmnd *cmd) | ||
933 | { | ||
934 | struct Scsi_Host *host = cmd->device->host; | ||
935 | struct st_hba *hba = (struct st_hba *)host->hostdata; | ||
936 | u16 tag; | ||
937 | void __iomem *base; | ||
938 | u32 data; | ||
939 | int result = SUCCESS; | ||
940 | unsigned long flags; | ||
941 | base = hba->mmio_base; | ||
942 | spin_lock_irqsave(host->host_lock, flags); | ||
943 | |||
944 | for (tag = 0; tag < MU_MAX_REQUEST; tag++) | ||
945 | if (hba->ccb[tag].cmd == cmd && (hba->tag & (1 << tag))) { | ||
946 | hba->wait_ccb = &(hba->ccb[tag]); | ||
947 | break; | ||
948 | } | ||
949 | if (tag >= MU_MAX_REQUEST) | ||
950 | goto out; | ||
951 | |||
952 | data = readl(base + ODBL); | ||
953 | if (data == 0 || data == 0xffffffff) | ||
954 | goto fail_out; | ||
955 | |||
956 | writel(data, base + ODBL); | ||
957 | readl(base + ODBL); /* flush */ | ||
958 | |||
959 | stex_mu_intr(hba, data); | ||
960 | |||
961 | if (hba->wait_ccb == NULL) { | ||
962 | printk(KERN_WARNING DRV_NAME | ||
963 | "(%s): lost interrupt\n", pci_name(hba->pdev)); | ||
964 | goto out; | ||
965 | } | ||
966 | |||
967 | fail_out: | ||
968 | hba->wait_ccb->req = NULL; /* nullify the req's future return */ | ||
969 | hba->wait_ccb = NULL; | ||
970 | result = FAILED; | ||
971 | out: | ||
972 | spin_unlock_irqrestore(host->host_lock, flags); | ||
973 | return result; | ||
974 | } | ||
975 | |||
976 | static void stex_hard_reset(struct st_hba *hba) | ||
977 | { | ||
978 | struct pci_bus *bus; | ||
979 | int i; | ||
980 | u16 pci_cmd; | ||
981 | u8 pci_bctl; | ||
982 | |||
983 | for (i = 0; i < 16; i++) | ||
984 | pci_read_config_dword(hba->pdev, i * 4, | ||
985 | &hba->pdev->saved_config_space[i]); | ||
986 | |||
987 | /* Reset secondary bus. Our controller(MU/ATU) is the only device on | ||
988 | secondary bus. Consult Intel 80331/3 developer's manual for detail */ | ||
989 | bus = hba->pdev->bus; | ||
990 | pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl); | ||
991 | pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
992 | pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); | ||
993 | msleep(1); | ||
994 | pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
995 | pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); | ||
996 | |||
997 | for (i = 0; i < MU_MAX_DELAY_TIME; i++) { | ||
998 | pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); | ||
999 | if (pci_cmd & PCI_COMMAND_MASTER) | ||
1000 | break; | ||
1001 | msleep(1); | ||
1002 | } | ||
1003 | |||
1004 | ssleep(5); | ||
1005 | for (i = 0; i < 16; i++) | ||
1006 | pci_write_config_dword(hba->pdev, i * 4, | ||
1007 | hba->pdev->saved_config_space[i]); | ||
1008 | } | ||
1009 | |||
1010 | static int stex_reset(struct scsi_cmnd *cmd) | ||
1011 | { | ||
1012 | struct st_hba *hba; | ||
1013 | unsigned long flags; | ||
1014 | hba = (struct st_hba *) &cmd->device->host->hostdata[0]; | ||
1015 | |||
1016 | hba->mu_status = MU_STATE_RESETTING; | ||
1017 | |||
1018 | if (hba->cardtype == st_shasta) | ||
1019 | stex_hard_reset(hba); | ||
1020 | |||
1021 | if (stex_handshake(hba)) { | ||
1022 | printk(KERN_WARNING DRV_NAME | ||
1023 | "(%s): resetting: handshake failed\n", | ||
1024 | pci_name(hba->pdev)); | ||
1025 | return FAILED; | ||
1026 | } | ||
1027 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
1028 | hba->tag = 0; | ||
1029 | hba->req_head = 0; | ||
1030 | hba->req_tail = 0; | ||
1031 | hba->status_head = 0; | ||
1032 | hba->status_tail = 0; | ||
1033 | hba->out_req_cnt = 0; | ||
1034 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
1035 | |||
1036 | return SUCCESS; | ||
1037 | } | ||
1038 | |||
1039 | static int stex_biosparam(struct scsi_device *sdev, | ||
1040 | struct block_device *bdev, sector_t capacity, int geom[]) | ||
1041 | { | ||
1042 | int heads = 255, sectors = 63, cylinders; | ||
1043 | |||
1044 | if (capacity < 0x200000) { | ||
1045 | heads = 64; | ||
1046 | sectors = 32; | ||
1047 | } | ||
1048 | |||
1049 | cylinders = sector_div(capacity, heads * sectors); | ||
1050 | |||
1051 | geom[0] = heads; | ||
1052 | geom[1] = sectors; | ||
1053 | geom[2] = cylinders; | ||
1054 | |||
1055 | return 0; | ||
1056 | } | ||
1057 | |||
1058 | static struct scsi_host_template driver_template = { | ||
1059 | .module = THIS_MODULE, | ||
1060 | .name = DRV_NAME, | ||
1061 | .proc_name = DRV_NAME, | ||
1062 | .bios_param = stex_biosparam, | ||
1063 | .queuecommand = stex_queuecommand, | ||
1064 | .slave_configure = stex_slave_config, | ||
1065 | .slave_destroy = stex_slave_destroy, | ||
1066 | .eh_abort_handler = stex_abort, | ||
1067 | .eh_host_reset_handler = stex_reset, | ||
1068 | .can_queue = ST_CAN_QUEUE, | ||
1069 | .this_id = -1, | ||
1070 | .sg_tablesize = ST_MAX_SG, | ||
1071 | .cmd_per_lun = ST_CMD_PER_LUN, | ||
1072 | }; | ||
1073 | |||
1074 | static int stex_set_dma_mask(struct pci_dev * pdev) | ||
1075 | { | ||
1076 | int ret; | ||
1077 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) | ||
1078 | && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) | ||
1079 | return 0; | ||
1080 | ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
1081 | if (!ret) | ||
1082 | ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
1083 | return ret; | ||
1084 | } | ||
1085 | |||
1086 | static int __devinit | ||
1087 | stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
1088 | { | ||
1089 | struct st_hba *hba; | ||
1090 | struct Scsi_Host *host; | ||
1091 | int err; | ||
1092 | |||
1093 | err = pci_enable_device(pdev); | ||
1094 | if (err) | ||
1095 | return err; | ||
1096 | |||
1097 | pci_set_master(pdev); | ||
1098 | |||
1099 | host = scsi_host_alloc(&driver_template, sizeof(struct st_hba)); | ||
1100 | |||
1101 | if (!host) { | ||
1102 | printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n", | ||
1103 | pci_name(pdev)); | ||
1104 | err = -ENOMEM; | ||
1105 | goto out_disable; | ||
1106 | } | ||
1107 | |||
1108 | hba = (struct st_hba *)host->hostdata; | ||
1109 | memset(hba, 0, sizeof(struct st_hba)); | ||
1110 | |||
1111 | err = pci_request_regions(pdev, DRV_NAME); | ||
1112 | if (err < 0) { | ||
1113 | printk(KERN_ERR DRV_NAME "(%s): request regions failed\n", | ||
1114 | pci_name(pdev)); | ||
1115 | goto out_scsi_host_put; | ||
1116 | } | ||
1117 | |||
1118 | hba->mmio_base = ioremap(pci_resource_start(pdev, 0), | ||
1119 | pci_resource_len(pdev, 0)); | ||
1120 | if ( !hba->mmio_base) { | ||
1121 | printk(KERN_ERR DRV_NAME "(%s): memory map failed\n", | ||
1122 | pci_name(pdev)); | ||
1123 | err = -ENOMEM; | ||
1124 | goto out_release_regions; | ||
1125 | } | ||
1126 | |||
1127 | err = stex_set_dma_mask(pdev); | ||
1128 | if (err) { | ||
1129 | printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n", | ||
1130 | pci_name(pdev)); | ||
1131 | goto out_iounmap; | ||
1132 | } | ||
1133 | |||
1134 | hba->dma_mem = dma_alloc_coherent(&pdev->dev, | ||
1135 | STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL); | ||
1136 | if (!hba->dma_mem) { | ||
1137 | err = -ENOMEM; | ||
1138 | printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", | ||
1139 | pci_name(pdev)); | ||
1140 | goto out_iounmap; | ||
1141 | } | ||
1142 | |||
1143 | hba->status_buffer = | ||
1144 | (struct status_msg *)(hba->dma_mem + MU_REQ_BUFFER_SIZE); | ||
1145 | hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE; | ||
1146 | hba->mu_status = MU_STATE_STARTING; | ||
1147 | |||
1148 | hba->cardtype = (unsigned int) id->driver_data; | ||
1149 | |||
1150 | /* firmware uses id/lun pair for a logical drive, but lun would be | ||
1151 | always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use | ||
1152 | channel to map lun here */ | ||
1153 | host->max_channel = ST_MAX_LUN_PER_TARGET - 1; | ||
1154 | host->max_id = ST_MAX_TARGET_NUM; | ||
1155 | host->max_lun = 1; | ||
1156 | host->unique_id = host->host_no; | ||
1157 | host->max_cmd_len = STEX_CDB_LENGTH; | ||
1158 | |||
1159 | hba->host = host; | ||
1160 | hba->pdev = pdev; | ||
1161 | init_waitqueue_head(&hba->waitq); | ||
1162 | |||
1163 | err = request_irq(pdev->irq, stex_intr, IRQF_SHARED, DRV_NAME, hba); | ||
1164 | if (err) { | ||
1165 | printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", | ||
1166 | pci_name(pdev)); | ||
1167 | goto out_pci_free; | ||
1168 | } | ||
1169 | |||
1170 | err = stex_handshake(hba); | ||
1171 | if (err) | ||
1172 | goto out_free_irq; | ||
1173 | |||
1174 | pci_set_drvdata(pdev, hba); | ||
1175 | |||
1176 | err = scsi_add_host(host, &pdev->dev); | ||
1177 | if (err) { | ||
1178 | printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n", | ||
1179 | pci_name(pdev)); | ||
1180 | goto out_free_irq; | ||
1181 | } | ||
1182 | |||
1183 | scsi_scan_host(host); | ||
1184 | |||
1185 | return 0; | ||
1186 | |||
1187 | out_free_irq: | ||
1188 | free_irq(pdev->irq, hba); | ||
1189 | out_pci_free: | ||
1190 | dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE, | ||
1191 | hba->dma_mem, hba->dma_handle); | ||
1192 | out_iounmap: | ||
1193 | iounmap(hba->mmio_base); | ||
1194 | out_release_regions: | ||
1195 | pci_release_regions(pdev); | ||
1196 | out_scsi_host_put: | ||
1197 | scsi_host_put(host); | ||
1198 | out_disable: | ||
1199 | pci_disable_device(pdev); | ||
1200 | |||
1201 | return err; | ||
1202 | } | ||
1203 | |||
1204 | static void stex_hba_stop(struct st_hba *hba) | ||
1205 | { | ||
1206 | struct req_msg *req; | ||
1207 | unsigned long flags; | ||
1208 | unsigned long before; | ||
1209 | u16 tag; | ||
1210 | |||
1211 | before = jiffies; | ||
1212 | while ((tag = stex_alloc_tag(hba, (unsigned long *)&hba->tag)) | ||
1213 | == TAG_BITMAP_LENGTH) { | ||
1214 | if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) | ||
1215 | return; | ||
1216 | msleep(10); | ||
1217 | } | ||
1218 | |||
1219 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
1220 | req = stex_alloc_req(hba); | ||
1221 | memset(req->cdb, 0, STEX_CDB_LENGTH); | ||
1222 | |||
1223 | req->cdb[0] = CONTROLLER_CMD; | ||
1224 | req->cdb[1] = CTLR_POWER_STATE_CHANGE; | ||
1225 | req->cdb[2] = CTLR_POWER_SAVING; | ||
1226 | |||
1227 | hba->ccb[tag].cmd = NULL; | ||
1228 | hba->ccb[tag].sg_count = 0; | ||
1229 | hba->ccb[tag].sense_bufflen = 0; | ||
1230 | hba->ccb[tag].sense_buffer = NULL; | ||
1231 | hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE; | ||
1232 | |||
1233 | stex_send_cmd(hba, req, tag); | ||
1234 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
1235 | |||
1236 | wait_event_timeout(hba->waitq, | ||
1237 | !(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT * HZ); | ||
1238 | if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) | ||
1239 | return; | ||
1240 | |||
1241 | stex_free_tag(hba, (unsigned long *)&hba->tag, tag); | ||
1242 | } | ||
1243 | |||
1244 | static void stex_hba_free(struct st_hba *hba) | ||
1245 | { | ||
1246 | free_irq(hba->pdev->irq, hba); | ||
1247 | |||
1248 | iounmap(hba->mmio_base); | ||
1249 | |||
1250 | pci_release_regions(hba->pdev); | ||
1251 | |||
1252 | dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE, | ||
1253 | hba->dma_mem, hba->dma_handle); | ||
1254 | } | ||
1255 | |||
1256 | static void stex_remove(struct pci_dev *pdev) | ||
1257 | { | ||
1258 | struct st_hba *hba = pci_get_drvdata(pdev); | ||
1259 | |||
1260 | scsi_remove_host(hba->host); | ||
1261 | |||
1262 | pci_set_drvdata(pdev, NULL); | ||
1263 | |||
1264 | stex_hba_stop(hba); | ||
1265 | |||
1266 | stex_hba_free(hba); | ||
1267 | |||
1268 | scsi_host_put(hba->host); | ||
1269 | |||
1270 | pci_disable_device(pdev); | ||
1271 | } | ||
1272 | |||
1273 | static void stex_shutdown(struct pci_dev *pdev) | ||
1274 | { | ||
1275 | struct st_hba *hba = pci_get_drvdata(pdev); | ||
1276 | |||
1277 | stex_hba_stop(hba); | ||
1278 | } | ||
1279 | |||
1280 | static struct pci_device_id stex_pci_tbl[] = { | ||
1281 | { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, | ||
1282 | { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, | ||
1283 | { 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, | ||
1284 | { 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, | ||
1285 | { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, | ||
1286 | { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, | ||
1287 | { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, | ||
1288 | { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, | ||
1289 | { } /* terminate list */ | ||
1290 | }; | ||
1291 | MODULE_DEVICE_TABLE(pci, stex_pci_tbl); | ||
1292 | |||
1293 | static struct pci_driver stex_pci_driver = { | ||
1294 | .name = DRV_NAME, | ||
1295 | .id_table = stex_pci_tbl, | ||
1296 | .probe = stex_probe, | ||
1297 | .remove = __devexit_p(stex_remove), | ||
1298 | .shutdown = stex_shutdown, | ||
1299 | }; | ||
1300 | |||
1301 | static int __init stex_init(void) | ||
1302 | { | ||
1303 | printk(KERN_INFO DRV_NAME | ||
1304 | ": Promise SuperTrak EX Driver version: %s\n", | ||
1305 | ST_DRIVER_VERSION); | ||
1306 | |||
1307 | return pci_register_driver(&stex_pci_driver); | ||
1308 | } | ||
1309 | |||
1310 | static void __exit stex_exit(void) | ||
1311 | { | ||
1312 | pci_unregister_driver(&stex_pci_driver); | ||
1313 | } | ||
1314 | |||
1315 | module_init(stex_init); | ||
1316 | module_exit(stex_exit); | ||