diff options
-rw-r--r-- | Documentation/scsi/hptiop.txt | 92 | ||||
-rw-r--r-- | MAINTAINERS | 6 | ||||
-rw-r--r-- | drivers/scsi/Kconfig | 10 | ||||
-rw-r--r-- | drivers/scsi/Makefile | 1 | ||||
-rw-r--r-- | drivers/scsi/hptiop.c | 1501 | ||||
-rw-r--r-- | drivers/scsi/hptiop.h | 465 |
6 files changed, 2075 insertions, 0 deletions
diff --git a/Documentation/scsi/hptiop.txt b/Documentation/scsi/hptiop.txt new file mode 100644 index 000000000000..d28a31247d4c --- /dev/null +++ b/Documentation/scsi/hptiop.txt | |||
@@ -0,0 +1,92 @@ | |||
1 | HIGHPOINT ROCKETRAID 3xxx RAID DRIVER (hptiop) | ||
2 | |||
3 | Controller Register Map | ||
4 | ------------------------- | ||
5 | |||
6 | The controller IOP is accessed via PCI BAR0. | ||
7 | |||
8 | BAR0 offset Register | ||
9 | 0x10 Inbound Message Register 0 | ||
10 | 0x14 Inbound Message Register 1 | ||
11 | 0x18 Outbound Message Register 0 | ||
12 | 0x1C Outbound Message Register 1 | ||
13 | 0x20 Inbound Doorbell Register | ||
14 | 0x24 Inbound Interrupt Status Register | ||
15 | 0x28 Inbound Interrupt Mask Register | ||
16 | 0x30 Outbound Interrupt Status Register | ||
17 | 0x34 Outbound Interrupt Mask Register | ||
18 | 0x40 Inbound Queue Port | ||
19 | 0x44 Outbound Queue Port | ||
20 | |||
21 | |||
22 | I/O Request Workflow | ||
23 | ---------------------- | ||
24 | |||
25 | All queued requests are handled via inbound/outbound queue port. | ||
26 | A request packet can be allocated in either IOP or host memory. | ||
27 | |||
28 | To send a request to the controller: | ||
29 | |||
30 | - Get a free request packet by reading the inbound queue port or | ||
31 | allocate a free request in host DMA coherent memory. | ||
32 | |||
33 | The value returned from the inbound queue port is an offset | ||
34 | relative to the IOP BAR0. | ||
35 | |||
36 | Requests allocated in host memory must be aligned on 32-bytes boundary. | ||
37 | |||
38 | - Fill the packet. | ||
39 | |||
40 | - Post the packet to IOP by writing it to inbound queue. For requests | ||
41 | allocated in IOP memory, write the offset to inbound queue port. For | ||
42 | requests allocated in host memory, write (0x80000000|(bus_addr>>5)) | ||
43 | to the inbound queue port. | ||
44 | |||
45 | - The IOP process the request. When the request is completed, it | ||
46 | will be put into outbound queue. An outbound interrupt will be | ||
47 | generated. | ||
48 | |||
49 | For requests allocated in IOP memory, the request offset is posted to | ||
50 | outbound queue. | ||
51 | |||
52 | For requests allocated in host memory, (0x80000000|(bus_addr>>5)) | ||
53 | is posted to the outbound queue. If IOP_REQUEST_FLAG_OUTPUT_CONTEXT | ||
54 | flag is set in the request, the low 32-bit context value will be | ||
55 | posted instead. | ||
56 | |||
57 | - The host read the outbound queue and complete the request. | ||
58 | |||
59 | For requests allocated in IOP memory, the host driver free the request | ||
60 | by writing it to the outbound queue. | ||
61 | |||
62 | Non-queued requests (reset/flush etc) can be sent via inbound message | ||
63 | register 0. An outbound message with the same value indicates the completion | ||
64 | of an inbound message. | ||
65 | |||
66 | |||
67 | User-level Interface | ||
68 | --------------------- | ||
69 | |||
70 | The driver exposes following sysfs attributes: | ||
71 | |||
72 | NAME R/W Description | ||
73 | driver-version R driver version string | ||
74 | firmware-version R firmware version string | ||
75 | |||
76 | The driver registers char device "hptiop" to communicate with HighPoint RAID | ||
77 | management software. Its ioctl routine acts as a general binary interface | ||
78 | between the IOP firmware and HighPoint RAID management software. New management | ||
79 | functions can be implemented in application/firmware without modification | ||
80 | in driver code. | ||
81 | |||
82 | |||
83 | ----------------------------------------------------------------------------- | ||
84 | Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved. | ||
85 | |||
86 | This file is distributed in the hope that it will be useful, | ||
87 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
88 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
89 | GNU General Public License for more details. | ||
90 | |||
91 | linux@highpoint-tech.com | ||
92 | http://www.highpoint-tech.com | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 6d3c401ccdb6..db647bf8830b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1117,6 +1117,12 @@ L: linux-hams@vger.kernel.org | |||
1117 | W: http://www.nt.tuwien.ac.at/~kkudielk/Linux/ | 1117 | W: http://www.nt.tuwien.ac.at/~kkudielk/Linux/ |
1118 | S: Maintained | 1118 | S: Maintained |
1119 | 1119 | ||
1120 | HIGHPOINT ROCKETRAID 3xxx RAID DRIVER | ||
1121 | P: HighPoint Linux Team | ||
1122 | M: linux@highpoint-tech.com | ||
1123 | W: http://www.highpoint-tech.com | ||
1124 | S: Supported | ||
1125 | |||
1120 | HIPPI | 1126 | HIPPI |
1121 | P: Jes Sorensen | 1127 | P: Jes Sorensen |
1122 | M: jes@trained-monkey.org | 1128 | M: jes@trained-monkey.org |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 3e7302692dbe..13ad88a064b7 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -530,6 +530,16 @@ config SCSI_PDC_ADMA | |||
530 | 530 | ||
531 | If unsure, say N. | 531 | If unsure, say N. |
532 | 532 | ||
533 | config SCSI_HPTIOP | ||
534 | tristate "HighPoint RocketRAID 3xxx Controller support" | ||
535 | depends on SCSI && PCI | ||
536 | help | ||
537 | This option enables support for HighPoint RocketRAID 3xxx | ||
538 | controllers. | ||
539 | |||
540 | To compile this driver as a module, choose M here; the module | ||
541 | will be called hptiop. If unsure, say N. | ||
542 | |||
533 | config SCSI_SATA_QSTOR | 543 | config SCSI_SATA_QSTOR |
534 | tristate "Pacific Digital SATA QStor support" | 544 | tristate "Pacific Digital SATA QStor support" |
535 | depends on SCSI_SATA && PCI | 545 | depends on SCSI_SATA && PCI |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3ce80ab4824a..9ae4361e352c 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -136,6 +136,7 @@ obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o | |||
136 | obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o | 136 | obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o |
137 | obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o | 137 | obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o |
138 | obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o | 138 | obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o |
139 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o | ||
139 | 140 | ||
140 | obj-$(CONFIG_ARM) += arm/ | 141 | obj-$(CONFIG_ARM) += arm/ |
141 | 142 | ||
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c new file mode 100644 index 000000000000..8302f3ba31ce --- /dev/null +++ b/drivers/scsi/hptiop.c | |||
@@ -0,0 +1,1501 @@ | |||
1 | /* | ||
2 | * HighPoint RR3xxx controller driver for Linux | ||
3 | * Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * Please report bugs/comments/suggestions to linux@highpoint-tech.com | ||
15 | * | ||
16 | * For more information, visit http://www.highpoint-tech.com | ||
17 | */ | ||
18 | #include <linux/config.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/timer.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/hdreg.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/div64.h> | ||
33 | #include <scsi/scsi_cmnd.h> | ||
34 | #include <scsi/scsi_device.h> | ||
35 | #include <scsi/scsi.h> | ||
36 | #include <scsi/scsi_tcq.h> | ||
37 | #include <scsi/scsi_host.h> | ||
38 | |||
39 | #include "hptiop.h" | ||
40 | |||
41 | MODULE_AUTHOR("HighPoint Technologies, Inc."); | ||
42 | MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver"); | ||
43 | |||
44 | static char driver_name[] = "hptiop"; | ||
45 | static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; | ||
46 | static const char driver_ver[] = "v1.0 (060426)"; | ||
47 | |||
48 | static DEFINE_SPINLOCK(hptiop_hba_list_lock); | ||
49 | static LIST_HEAD(hptiop_hba_list); | ||
50 | static int hptiop_cdev_major = -1; | ||
51 | |||
52 | static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); | ||
53 | static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); | ||
54 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); | ||
55 | |||
56 | static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop) | ||
57 | { | ||
58 | readl(&iop->outbound_intstatus); | ||
59 | } | ||
60 | |||
61 | static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec) | ||
62 | { | ||
63 | u32 req = 0; | ||
64 | int i; | ||
65 | |||
66 | for (i = 0; i < millisec; i++) { | ||
67 | req = readl(&iop->inbound_queue); | ||
68 | if (req != IOPMU_QUEUE_EMPTY) | ||
69 | break; | ||
70 | msleep(1); | ||
71 | } | ||
72 | |||
73 | if (req != IOPMU_QUEUE_EMPTY) { | ||
74 | writel(req, &iop->outbound_queue); | ||
75 | hptiop_pci_posting_flush(iop); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | return -1; | ||
80 | } | ||
81 | |||
82 | static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag) | ||
83 | { | ||
84 | if ((tag & IOPMU_QUEUE_MASK_HOST_BITS) == IOPMU_QUEUE_ADDR_HOST_BIT) | ||
85 | return hptiop_host_request_callback(hba, | ||
86 | tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); | ||
87 | else | ||
88 | return hptiop_iop_request_callback(hba, tag); | ||
89 | } | ||
90 | |||
91 | static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba) | ||
92 | { | ||
93 | u32 req; | ||
94 | |||
95 | while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) { | ||
96 | |||
97 | if (req & IOPMU_QUEUE_MASK_HOST_BITS) | ||
98 | hptiop_request_callback(hba, req); | ||
99 | else { | ||
100 | struct hpt_iop_request_header __iomem * p; | ||
101 | |||
102 | p = (struct hpt_iop_request_header __iomem *) | ||
103 | ((char __iomem *)hba->iop + req); | ||
104 | |||
105 | if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { | ||
106 | if (readl(&p->context)) | ||
107 | hptiop_request_callback(hba, req); | ||
108 | else | ||
109 | writel(1, &p->context); | ||
110 | } | ||
111 | else | ||
112 | hptiop_request_callback(hba, req); | ||
113 | } | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static int __iop_intr(struct hptiop_hba *hba) | ||
118 | { | ||
119 | struct hpt_iopmu __iomem *iop = hba->iop; | ||
120 | u32 status; | ||
121 | int ret = 0; | ||
122 | |||
123 | status = readl(&iop->outbound_intstatus); | ||
124 | |||
125 | if (status & IOPMU_OUTBOUND_INT_MSG0) { | ||
126 | u32 msg = readl(&iop->outbound_msgaddr0); | ||
127 | dprintk("received outbound msg %x\n", msg); | ||
128 | writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); | ||
129 | hptiop_message_callback(hba, msg); | ||
130 | ret = 1; | ||
131 | } | ||
132 | |||
133 | if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { | ||
134 | hptiop_drain_outbound_queue(hba); | ||
135 | ret = 1; | ||
136 | } | ||
137 | |||
138 | return ret; | ||
139 | } | ||
140 | |||
141 | static int iop_send_sync_request(struct hptiop_hba *hba, | ||
142 | void __iomem *_req, u32 millisec) | ||
143 | { | ||
144 | struct hpt_iop_request_header __iomem *req = _req; | ||
145 | u32 i; | ||
146 | |||
147 | writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, | ||
148 | &req->flags); | ||
149 | |||
150 | writel(0, &req->context); | ||
151 | |||
152 | writel((unsigned long)req - (unsigned long)hba->iop, | ||
153 | &hba->iop->inbound_queue); | ||
154 | |||
155 | hptiop_pci_posting_flush(hba->iop); | ||
156 | |||
157 | for (i = 0; i < millisec; i++) { | ||
158 | __iop_intr(hba); | ||
159 | if (readl(&req->context)) | ||
160 | return 0; | ||
161 | msleep(1); | ||
162 | } | ||
163 | |||
164 | return -1; | ||
165 | } | ||
166 | |||
167 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) | ||
168 | { | ||
169 | u32 i; | ||
170 | |||
171 | hba->msg_done = 0; | ||
172 | |||
173 | writel(msg, &hba->iop->inbound_msgaddr0); | ||
174 | |||
175 | hptiop_pci_posting_flush(hba->iop); | ||
176 | |||
177 | for (i = 0; i < millisec; i++) { | ||
178 | spin_lock_irq(hba->host->host_lock); | ||
179 | __iop_intr(hba); | ||
180 | spin_unlock_irq(hba->host->host_lock); | ||
181 | if (hba->msg_done) | ||
182 | break; | ||
183 | msleep(1); | ||
184 | } | ||
185 | |||
186 | return hba->msg_done? 0 : -1; | ||
187 | } | ||
188 | |||
189 | static int iop_get_config(struct hptiop_hba *hba, | ||
190 | struct hpt_iop_request_get_config *config) | ||
191 | { | ||
192 | u32 req32; | ||
193 | struct hpt_iop_request_get_config __iomem *req; | ||
194 | |||
195 | req32 = readl(&hba->iop->inbound_queue); | ||
196 | if (req32 == IOPMU_QUEUE_EMPTY) | ||
197 | return -1; | ||
198 | |||
199 | req = (struct hpt_iop_request_get_config __iomem *) | ||
200 | ((unsigned long)hba->iop + req32); | ||
201 | |||
202 | writel(0, &req->header.flags); | ||
203 | writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); | ||
204 | writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); | ||
205 | writel(IOP_RESULT_PENDING, &req->header.result); | ||
206 | |||
207 | if (iop_send_sync_request(hba, req, 20000)) { | ||
208 | dprintk("Get config send cmd failed\n"); | ||
209 | return -1; | ||
210 | } | ||
211 | |||
212 | memcpy_fromio(config, req, sizeof(*config)); | ||
213 | writel(req32, &hba->iop->outbound_queue); | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static int iop_set_config(struct hptiop_hba *hba, | ||
218 | struct hpt_iop_request_set_config *config) | ||
219 | { | ||
220 | u32 req32; | ||
221 | struct hpt_iop_request_set_config __iomem *req; | ||
222 | |||
223 | req32 = readl(&hba->iop->inbound_queue); | ||
224 | if (req32 == IOPMU_QUEUE_EMPTY) | ||
225 | return -1; | ||
226 | |||
227 | req = (struct hpt_iop_request_set_config __iomem *) | ||
228 | ((unsigned long)hba->iop + req32); | ||
229 | |||
230 | memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), | ||
231 | (u8 *)config + sizeof(struct hpt_iop_request_header), | ||
232 | sizeof(struct hpt_iop_request_set_config) - | ||
233 | sizeof(struct hpt_iop_request_header)); | ||
234 | |||
235 | writel(0, &req->header.flags); | ||
236 | writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); | ||
237 | writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); | ||
238 | writel(IOP_RESULT_PENDING, &req->header.result); | ||
239 | |||
240 | if (iop_send_sync_request(hba, req, 20000)) { | ||
241 | dprintk("Set config send cmd failed\n"); | ||
242 | return -1; | ||
243 | } | ||
244 | |||
245 | writel(req32, &hba->iop->outbound_queue); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static int hptiop_initialize_iop(struct hptiop_hba *hba) | ||
250 | { | ||
251 | struct hpt_iopmu __iomem *iop = hba->iop; | ||
252 | |||
253 | /* enable interrupts */ | ||
254 | writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), | ||
255 | &iop->outbound_intmask); | ||
256 | |||
257 | hba->initialized = 1; | ||
258 | |||
259 | /* start background tasks */ | ||
260 | if (iop_send_sync_msg(hba, | ||
261 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | ||
262 | printk(KERN_ERR "scsi%d: fail to start background task\n", | ||
263 | hba->host->host_no); | ||
264 | return -1; | ||
265 | } | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int hptiop_map_pci_bar(struct hptiop_hba *hba) | ||
270 | { | ||
271 | u32 mem_base_phy, length; | ||
272 | void __iomem *mem_base_virt; | ||
273 | struct pci_dev *pcidev = hba->pcidev; | ||
274 | |||
275 | if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) { | ||
276 | printk(KERN_ERR "scsi%d: pci resource invalid\n", | ||
277 | hba->host->host_no); | ||
278 | return -1; | ||
279 | } | ||
280 | |||
281 | mem_base_phy = pci_resource_start(pcidev, 0); | ||
282 | length = pci_resource_len(pcidev, 0); | ||
283 | mem_base_virt = ioremap(mem_base_phy, length); | ||
284 | |||
285 | if (!mem_base_virt) { | ||
286 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", | ||
287 | hba->host->host_no); | ||
288 | return -1; | ||
289 | } | ||
290 | |||
291 | hba->iop = mem_base_virt; | ||
292 | dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) | ||
297 | { | ||
298 | dprintk("iop message 0x%x\n", msg); | ||
299 | |||
300 | if (!hba->initialized) | ||
301 | return; | ||
302 | |||
303 | if (msg == IOPMU_INBOUND_MSG0_RESET) { | ||
304 | atomic_set(&hba->resetting, 0); | ||
305 | wake_up(&hba->reset_wq); | ||
306 | } | ||
307 | else if (msg <= IOPMU_INBOUND_MSG0_MAX) | ||
308 | hba->msg_done = 1; | ||
309 | } | ||
310 | |||
311 | static inline struct hptiop_request *get_req(struct hptiop_hba *hba) | ||
312 | { | ||
313 | struct hptiop_request *ret; | ||
314 | |||
315 | dprintk("get_req : req=%p\n", hba->req_list); | ||
316 | |||
317 | ret = hba->req_list; | ||
318 | if (ret) | ||
319 | hba->req_list = ret->next; | ||
320 | |||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req) | ||
325 | { | ||
326 | dprintk("free_req(%d, %p)\n", req->index, req); | ||
327 | req->next = hba->req_list; | ||
328 | hba->req_list = req; | ||
329 | } | ||
330 | |||
331 | static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag) | ||
332 | { | ||
333 | struct hpt_iop_request_scsi_command *req; | ||
334 | struct scsi_cmnd *scp; | ||
335 | |||
336 | req = (struct hpt_iop_request_scsi_command *)hba->reqs[tag].req_virt; | ||
337 | dprintk("hptiop_host_request_callback: req=%p, type=%d, " | ||
338 | "result=%d, context=0x%x tag=%d\n", | ||
339 | req, req->header.type, req->header.result, | ||
340 | req->header.context, tag); | ||
341 | |||
342 | BUG_ON(!req->header.result); | ||
343 | BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); | ||
344 | |||
345 | scp = hba->reqs[tag].scp; | ||
346 | |||
347 | if (HPT_SCP(scp)->mapped) { | ||
348 | if (scp->use_sg) | ||
349 | pci_unmap_sg(hba->pcidev, | ||
350 | (struct scatterlist *)scp->request_buffer, | ||
351 | scp->use_sg, | ||
352 | scp->sc_data_direction | ||
353 | ); | ||
354 | else | ||
355 | pci_unmap_single(hba->pcidev, | ||
356 | HPT_SCP(scp)->dma_handle, | ||
357 | scp->request_bufflen, | ||
358 | scp->sc_data_direction | ||
359 | ); | ||
360 | } | ||
361 | |||
362 | switch (le32_to_cpu(req->header.result)) { | ||
363 | case IOP_RESULT_SUCCESS: | ||
364 | scp->result = (DID_OK<<16); | ||
365 | break; | ||
366 | case IOP_RESULT_BAD_TARGET: | ||
367 | scp->result = (DID_BAD_TARGET<<16); | ||
368 | break; | ||
369 | case IOP_RESULT_BUSY: | ||
370 | scp->result = (DID_BUS_BUSY<<16); | ||
371 | break; | ||
372 | case IOP_RESULT_RESET: | ||
373 | scp->result = (DID_RESET<<16); | ||
374 | break; | ||
375 | case IOP_RESULT_FAIL: | ||
376 | scp->result = (DID_ERROR<<16); | ||
377 | break; | ||
378 | case IOP_RESULT_INVALID_REQUEST: | ||
379 | scp->result = (DID_ABORT<<16); | ||
380 | break; | ||
381 | case IOP_RESULT_MODE_SENSE_CHECK_CONDITION: | ||
382 | scp->result = SAM_STAT_CHECK_CONDITION; | ||
383 | memset(&scp->sense_buffer, | ||
384 | 0, sizeof(scp->sense_buffer)); | ||
385 | memcpy(&scp->sense_buffer, | ||
386 | &req->sg_list, le32_to_cpu(req->dataxfer_length)); | ||
387 | break; | ||
388 | |||
389 | default: | ||
390 | scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) | | ||
391 | (DID_ABORT<<16); | ||
392 | break; | ||
393 | } | ||
394 | |||
395 | dprintk("scsi_done(%p)\n", scp); | ||
396 | scp->scsi_done(scp); | ||
397 | free_req(hba, &hba->reqs[tag]); | ||
398 | } | ||
399 | |||
400 | void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag) | ||
401 | { | ||
402 | struct hpt_iop_request_header __iomem *req; | ||
403 | struct hpt_iop_request_ioctl_command __iomem *p; | ||
404 | struct hpt_ioctl_k *arg; | ||
405 | |||
406 | req = (struct hpt_iop_request_header __iomem *) | ||
407 | ((unsigned long)hba->iop + tag); | ||
408 | dprintk("hptiop_iop_request_callback: req=%p, type=%d, " | ||
409 | "result=%d, context=0x%x tag=%d\n", | ||
410 | req, readl(&req->type), readl(&req->result), | ||
411 | readl(&req->context), tag); | ||
412 | |||
413 | BUG_ON(!readl(&req->result)); | ||
414 | BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); | ||
415 | |||
416 | p = (struct hpt_iop_request_ioctl_command __iomem *)req; | ||
417 | arg = (struct hpt_ioctl_k *)(unsigned long) | ||
418 | (readl(&req->context) | | ||
419 | ((u64)readl(&req->context_hi32)<<32)); | ||
420 | |||
421 | if (readl(&req->result) == IOP_RESULT_SUCCESS) { | ||
422 | arg->result = HPT_IOCTL_RESULT_OK; | ||
423 | |||
424 | if (arg->outbuf_size) | ||
425 | memcpy_fromio(arg->outbuf, | ||
426 | &p->buf[(readl(&p->inbuf_size) + 3)& ~3], | ||
427 | arg->outbuf_size); | ||
428 | |||
429 | if (arg->bytes_returned) | ||
430 | *arg->bytes_returned = arg->outbuf_size; | ||
431 | } | ||
432 | else | ||
433 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
434 | |||
435 | arg->done(arg); | ||
436 | writel(tag, &hba->iop->outbound_queue); | ||
437 | } | ||
438 | |||
439 | static irqreturn_t hptiop_intr(int irq, void *dev_id, struct pt_regs *regs) | ||
440 | { | ||
441 | struct hptiop_hba *hba = dev_id; | ||
442 | int handled; | ||
443 | unsigned long flags; | ||
444 | |||
445 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
446 | handled = __iop_intr(hba); | ||
447 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
448 | |||
449 | return handled; | ||
450 | } | ||
451 | |||
452 | static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) | ||
453 | { | ||
454 | struct Scsi_Host *host = scp->device->host; | ||
455 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | ||
456 | struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer; | ||
457 | |||
458 | /* | ||
459 | * though we'll not get non-use_sg fields anymore, | ||
460 | * keep use_sg checking anyway | ||
461 | */ | ||
462 | if (scp->use_sg) { | ||
463 | int idx; | ||
464 | |||
465 | HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev, | ||
466 | sglist, scp->use_sg, | ||
467 | scp->sc_data_direction); | ||
468 | HPT_SCP(scp)->mapped = 1; | ||
469 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); | ||
470 | |||
471 | for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) { | ||
472 | psg[idx].pci_address = | ||
473 | cpu_to_le64(sg_dma_address(&sglist[idx])); | ||
474 | psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx])); | ||
475 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? | ||
476 | cpu_to_le32(1) : 0; | ||
477 | } | ||
478 | |||
479 | return HPT_SCP(scp)->sgcnt; | ||
480 | } else { | ||
481 | HPT_SCP(scp)->dma_handle = pci_map_single( | ||
482 | hba->pcidev, | ||
483 | scp->request_buffer, | ||
484 | scp->request_bufflen, | ||
485 | scp->sc_data_direction | ||
486 | ); | ||
487 | HPT_SCP(scp)->mapped = 1; | ||
488 | psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle); | ||
489 | psg->size = cpu_to_le32(scp->request_bufflen); | ||
490 | psg->eot = cpu_to_le32(1); | ||
491 | return 1; | ||
492 | } | ||
493 | } | ||
494 | |||
495 | static int hptiop_queuecommand(struct scsi_cmnd *scp, | ||
496 | void (*done)(struct scsi_cmnd *)) | ||
497 | { | ||
498 | struct Scsi_Host *host = scp->device->host; | ||
499 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | ||
500 | struct hpt_iop_request_scsi_command *req; | ||
501 | int sg_count = 0; | ||
502 | struct hptiop_request *_req; | ||
503 | |||
504 | BUG_ON(!done); | ||
505 | scp->scsi_done = done; | ||
506 | |||
507 | /* | ||
508 | * hptiop_shutdown will flash controller cache. | ||
509 | */ | ||
510 | if (scp->cmnd[0] == SYNCHRONIZE_CACHE) { | ||
511 | scp->result = DID_OK<<16; | ||
512 | goto cmd_done; | ||
513 | } | ||
514 | |||
515 | _req = get_req(hba); | ||
516 | if (_req == NULL) { | ||
517 | dprintk("hptiop_queuecmd : no free req\n"); | ||
518 | scp->result = DID_BUS_BUSY << 16; | ||
519 | goto cmd_done; | ||
520 | } | ||
521 | |||
522 | _req->scp = scp; | ||
523 | |||
524 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) " | ||
525 | "req_index=%d, req=%p\n", | ||
526 | scp, | ||
527 | host->host_no, scp->device->channel, | ||
528 | scp->device->id, scp->device->lun, | ||
529 | *((u32 *)&scp->cmnd), | ||
530 | *((u32 *)&scp->cmnd + 1), | ||
531 | *((u32 *)&scp->cmnd + 2), | ||
532 | _req->index, _req->req_virt); | ||
533 | |||
534 | scp->result = 0; | ||
535 | |||
536 | if (scp->device->channel || scp->device->lun || | ||
537 | scp->device->id > hba->max_devices) { | ||
538 | scp->result = DID_BAD_TARGET << 16; | ||
539 | free_req(hba, _req); | ||
540 | goto cmd_done; | ||
541 | } | ||
542 | |||
543 | req = (struct hpt_iop_request_scsi_command *)_req->req_virt; | ||
544 | |||
545 | /* build S/G table */ | ||
546 | if (scp->request_bufflen) | ||
547 | sg_count = hptiop_buildsgl(scp, req->sg_list); | ||
548 | else | ||
549 | HPT_SCP(scp)->mapped = 0; | ||
550 | |||
551 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); | ||
552 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); | ||
553 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); | ||
554 | req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | | ||
555 | (u32)_req->index); | ||
556 | req->header.context_hi32 = 0; | ||
557 | req->dataxfer_length = cpu_to_le32(scp->bufflen); | ||
558 | req->channel = scp->device->channel; | ||
559 | req->target = scp->device->id; | ||
560 | req->lun = scp->device->lun; | ||
561 | req->header.size = cpu_to_le32( | ||
562 | sizeof(struct hpt_iop_request_scsi_command) | ||
563 | - sizeof(struct hpt_iopsg) | ||
564 | + sg_count * sizeof(struct hpt_iopsg)); | ||
565 | |||
566 | memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); | ||
567 | |||
568 | writel(IOPMU_QUEUE_ADDR_HOST_BIT | _req->req_shifted_phy, | ||
569 | &hba->iop->inbound_queue); | ||
570 | |||
571 | return 0; | ||
572 | |||
573 | cmd_done: | ||
574 | dprintk("scsi_done(scp=%p)\n", scp); | ||
575 | scp->scsi_done(scp); | ||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | static const char *hptiop_info(struct Scsi_Host *host) | ||
580 | { | ||
581 | return driver_name_long; | ||
582 | } | ||
583 | |||
584 | static int hptiop_reset_hba(struct hptiop_hba *hba) | ||
585 | { | ||
586 | if (atomic_xchg(&hba->resetting, 1) == 0) { | ||
587 | atomic_inc(&hba->reset_count); | ||
588 | writel(IOPMU_INBOUND_MSG0_RESET, | ||
589 | &hba->iop->outbound_msgaddr0); | ||
590 | hptiop_pci_posting_flush(hba->iop); | ||
591 | } | ||
592 | |||
593 | wait_event_timeout(hba->reset_wq, | ||
594 | atomic_read(&hba->resetting) == 0, 60 * HZ); | ||
595 | |||
596 | if (atomic_read(&hba->resetting)) { | ||
597 | /* IOP is in unkown state, abort reset */ | ||
598 | printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); | ||
599 | return -1; | ||
600 | } | ||
601 | |||
602 | if (iop_send_sync_msg(hba, | ||
603 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { | ||
604 | dprintk("scsi%d: fail to start background task\n", | ||
605 | hba->host->host_no); | ||
606 | } | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int hptiop_reset(struct scsi_cmnd *scp) | ||
612 | { | ||
613 | struct Scsi_Host * host = scp->device->host; | ||
614 | struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata; | ||
615 | |||
616 | printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n", | ||
617 | scp->device->host->host_no, scp->device->channel, | ||
618 | scp->device->id, scp); | ||
619 | |||
620 | return hptiop_reset_hba(hba)? FAILED : SUCCESS; | ||
621 | } | ||
622 | |||
623 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, | ||
624 | int queue_depth) | ||
625 | { | ||
626 | if(queue_depth > 256) | ||
627 | queue_depth = 256; | ||
628 | scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); | ||
629 | return queue_depth; | ||
630 | } | ||
631 | |||
632 | struct hptiop_getinfo { | ||
633 | char __user *buffer; | ||
634 | loff_t buflength; | ||
635 | loff_t bufoffset; | ||
636 | loff_t buffillen; | ||
637 | loff_t filpos; | ||
638 | }; | ||
639 | |||
640 | static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo, | ||
641 | char *data, int datalen) | ||
642 | { | ||
643 | if (pinfo->filpos < pinfo->bufoffset) { | ||
644 | if (pinfo->filpos + datalen <= pinfo->bufoffset) { | ||
645 | pinfo->filpos += datalen; | ||
646 | return; | ||
647 | } else { | ||
648 | data += (pinfo->bufoffset - pinfo->filpos); | ||
649 | datalen -= (pinfo->bufoffset - pinfo->filpos); | ||
650 | pinfo->filpos = pinfo->bufoffset; | ||
651 | } | ||
652 | } | ||
653 | |||
654 | pinfo->filpos += datalen; | ||
655 | if (pinfo->buffillen == pinfo->buflength) | ||
656 | return; | ||
657 | |||
658 | if (pinfo->buflength - pinfo->buffillen < datalen) | ||
659 | datalen = pinfo->buflength - pinfo->buffillen; | ||
660 | |||
661 | if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen)) | ||
662 | return; | ||
663 | |||
664 | pinfo->buffillen += datalen; | ||
665 | } | ||
666 | |||
667 | static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...) | ||
668 | { | ||
669 | va_list args; | ||
670 | char buf[128]; | ||
671 | int len; | ||
672 | |||
673 | va_start(args, fmt); | ||
674 | len = vsnprintf(buf, sizeof(buf), fmt, args); | ||
675 | va_end(args); | ||
676 | hptiop_copy_mem_info(pinfo, buf, len); | ||
677 | return len; | ||
678 | } | ||
679 | |||
680 | static void hptiop_ioctl_done(struct hpt_ioctl_k *arg) | ||
681 | { | ||
682 | arg->done = NULL; | ||
683 | wake_up(&arg->hba->ioctl_wq); | ||
684 | } | ||
685 | |||
686 | static void hptiop_do_ioctl(struct hpt_ioctl_k *arg) | ||
687 | { | ||
688 | struct hptiop_hba *hba = arg->hba; | ||
689 | u32 val; | ||
690 | struct hpt_iop_request_ioctl_command __iomem *req; | ||
691 | int ioctl_retry = 0; | ||
692 | |||
693 | dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no); | ||
694 | |||
695 | /* | ||
696 | * check (in + out) buff size from application. | ||
697 | * outbuf must be dword aligned. | ||
698 | */ | ||
699 | if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size > | ||
700 | hba->max_request_size | ||
701 | - sizeof(struct hpt_iop_request_header) | ||
702 | - 4 * sizeof(u32)) { | ||
703 | dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n", | ||
704 | hba->host->host_no, | ||
705 | arg->inbuf_size, arg->outbuf_size); | ||
706 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
707 | return; | ||
708 | } | ||
709 | |||
710 | retry: | ||
711 | spin_lock_irq(hba->host->host_lock); | ||
712 | |||
713 | val = readl(&hba->iop->inbound_queue); | ||
714 | if (val == IOPMU_QUEUE_EMPTY) { | ||
715 | spin_unlock_irq(hba->host->host_lock); | ||
716 | dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no); | ||
717 | arg->result = -1; | ||
718 | return; | ||
719 | } | ||
720 | |||
721 | req = (struct hpt_iop_request_ioctl_command __iomem *) | ||
722 | ((unsigned long)hba->iop + val); | ||
723 | |||
724 | writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code), | ||
725 | &req->ioctl_code); | ||
726 | writel(arg->inbuf_size, &req->inbuf_size); | ||
727 | writel(arg->outbuf_size, &req->outbuf_size); | ||
728 | |||
729 | /* | ||
730 | * use the buffer on the IOP local memory first, then copy it | ||
731 | * back to host. | ||
732 | * the caller's request buffer shoudl be little-endian. | ||
733 | */ | ||
734 | if (arg->inbuf_size) | ||
735 | memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size); | ||
736 | |||
737 | /* correct the controller ID for IOP */ | ||
738 | if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO || | ||
739 | arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 || | ||
740 | arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO) | ||
741 | && arg->inbuf_size >= sizeof(u32)) | ||
742 | writel(0, req->buf); | ||
743 | |||
744 | writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type); | ||
745 | writel(0, &req->header.flags); | ||
746 | writel(offsetof(struct hpt_iop_request_ioctl_command, buf) | ||
747 | + arg->inbuf_size, &req->header.size); | ||
748 | writel((u32)(unsigned long)arg, &req->header.context); | ||
749 | writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0, | ||
750 | &req->header.context_hi32); | ||
751 | writel(IOP_RESULT_PENDING, &req->header.result); | ||
752 | |||
753 | arg->result = HPT_IOCTL_RESULT_FAILED; | ||
754 | arg->done = hptiop_ioctl_done; | ||
755 | |||
756 | writel(val, &hba->iop->inbound_queue); | ||
757 | hptiop_pci_posting_flush(hba->iop); | ||
758 | |||
759 | spin_unlock_irq(hba->host->host_lock); | ||
760 | |||
761 | wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ); | ||
762 | |||
763 | if (arg->done != NULL) { | ||
764 | hptiop_reset_hba(hba); | ||
765 | if (ioctl_retry++ < 3) | ||
766 | goto retry; | ||
767 | } | ||
768 | |||
769 | dprintk("hpt_iop_ioctl %x result %d\n", | ||
770 | arg->ioctl_code, arg->result); | ||
771 | } | ||
772 | |||
773 | static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf, | ||
774 | u32 insize, void *outbuf, u32 outsize) | ||
775 | { | ||
776 | struct hpt_ioctl_k arg; | ||
777 | arg.hba = hba; | ||
778 | arg.ioctl_code = code; | ||
779 | arg.inbuf = inbuf; | ||
780 | arg.outbuf = outbuf; | ||
781 | arg.inbuf_size = insize; | ||
782 | arg.outbuf_size = outsize; | ||
783 | arg.bytes_returned = NULL; | ||
784 | hptiop_do_ioctl(&arg); | ||
785 | return arg.result; | ||
786 | } | ||
787 | |||
788 | static inline int hpt_id_valid(__le32 id) | ||
789 | { | ||
790 | return id != 0 && id != cpu_to_le32(0xffffffff); | ||
791 | } | ||
792 | |||
793 | static int hptiop_get_controller_info(struct hptiop_hba *hba, | ||
794 | struct hpt_controller_info *pinfo) | ||
795 | { | ||
796 | int id = 0; | ||
797 | |||
798 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO, | ||
799 | &id, sizeof(int), pinfo, sizeof(*pinfo)); | ||
800 | } | ||
801 | |||
802 | |||
803 | static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus, | ||
804 | struct hpt_channel_info *pinfo) | ||
805 | { | ||
806 | u32 ids[2]; | ||
807 | |||
808 | ids[0] = 0; | ||
809 | ids[1] = bus; | ||
810 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO, | ||
811 | ids, sizeof(ids), pinfo, sizeof(*pinfo)); | ||
812 | |||
813 | } | ||
814 | |||
815 | static int hptiop_get_logical_devices(struct hptiop_hba *hba, | ||
816 | __le32 *pids, int maxcount) | ||
817 | { | ||
818 | int i; | ||
819 | u32 count = maxcount - 1; | ||
820 | |||
821 | if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES, | ||
822 | &count, sizeof(u32), | ||
823 | pids, sizeof(u32) * maxcount)) | ||
824 | return -1; | ||
825 | |||
826 | maxcount = le32_to_cpu(pids[0]); | ||
827 | for (i = 0; i < maxcount; i++) | ||
828 | pids[i] = pids[i+1]; | ||
829 | |||
830 | return maxcount; | ||
831 | } | ||
832 | |||
833 | static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id, | ||
834 | struct hpt_logical_device_info_v3 *pinfo) | ||
835 | { | ||
836 | return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3, | ||
837 | &id, sizeof(u32), | ||
838 | pinfo, sizeof(*pinfo)); | ||
839 | } | ||
840 | |||
841 | static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo) | ||
842 | { | ||
843 | static char s[64]; | ||
844 | u32 flags = le32_to_cpu(devinfo->u.array.flags); | ||
845 | u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress); | ||
846 | u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress); | ||
847 | |||
848 | if (flags & ARRAY_FLAG_DISABLED) | ||
849 | return "Disabled"; | ||
850 | else if (flags & ARRAY_FLAG_TRANSFORMING) | ||
851 | sprintf(s, "Expanding/Migrating %d.%d%%%s%s", | ||
852 | trans_prog / 100, | ||
853 | trans_prog % 100, | ||
854 | (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))? | ||
855 | ", Critical" : "", | ||
856 | ((flags & ARRAY_FLAG_NEEDINITIALIZING) && | ||
857 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
858 | !(flags & ARRAY_FLAG_INITIALIZING))? | ||
859 | ", Unintialized" : ""); | ||
860 | else if ((flags & ARRAY_FLAG_BROKEN) && | ||
861 | devinfo->u.array.array_type != AT_RAID6) | ||
862 | return "Critical"; | ||
863 | else if (flags & ARRAY_FLAG_REBUILDING) | ||
864 | sprintf(s, | ||
865 | (flags & ARRAY_FLAG_NEEDINITIALIZING)? | ||
866 | "%sBackground initializing %d.%d%%" : | ||
867 | "%sRebuilding %d.%d%%", | ||
868 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
869 | reb_prog / 100, | ||
870 | reb_prog % 100); | ||
871 | else if (flags & ARRAY_FLAG_VERIFYING) | ||
872 | sprintf(s, "%sVerifying %d.%d%%", | ||
873 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
874 | reb_prog / 100, | ||
875 | reb_prog % 100); | ||
876 | else if (flags & ARRAY_FLAG_INITIALIZING) | ||
877 | sprintf(s, "%sForground initializing %d.%d%%", | ||
878 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
879 | reb_prog / 100, | ||
880 | reb_prog % 100); | ||
881 | else if (flags & ARRAY_FLAG_NEEDTRANSFORM) | ||
882 | sprintf(s,"%s%s%s", "Need Expanding/Migrating", | ||
883 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "", | ||
884 | ((flags & ARRAY_FLAG_NEEDINITIALIZING) && | ||
885 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
886 | !(flags & ARRAY_FLAG_INITIALIZING))? | ||
887 | ", Unintialized" : ""); | ||
888 | else if (flags & ARRAY_FLAG_NEEDINITIALIZING && | ||
889 | !(flags & ARRAY_FLAG_REBUILDING) && | ||
890 | !(flags & ARRAY_FLAG_INITIALIZING)) | ||
891 | sprintf(s,"%sUninitialized", | ||
892 | (flags & ARRAY_FLAG_BROKEN)? "Critical, " : ""); | ||
893 | else if ((flags & ARRAY_FLAG_NEEDBUILDING) || | ||
894 | (flags & ARRAY_FLAG_BROKEN)) | ||
895 | return "Critical"; | ||
896 | else | ||
897 | return "Normal"; | ||
898 | return s; | ||
899 | } | ||
900 | |||
901 | static void hptiop_dump_devinfo(struct hptiop_hba *hba, | ||
902 | struct hptiop_getinfo *pinfo, __le32 id, int indent) | ||
903 | { | ||
904 | struct hpt_logical_device_info_v3 devinfo; | ||
905 | int i; | ||
906 | u64 capacity; | ||
907 | |||
908 | for (i = 0; i < indent; i++) | ||
909 | hptiop_copy_info(pinfo, "\t"); | ||
910 | |||
911 | if (hptiop_get_device_info_v3(hba, id, &devinfo)) { | ||
912 | hptiop_copy_info(pinfo, "unknown\n"); | ||
913 | return; | ||
914 | } | ||
915 | |||
916 | switch (devinfo.type) { | ||
917 | |||
918 | case LDT_DEVICE: { | ||
919 | struct hd_driveid *driveid; | ||
920 | u32 flags = le32_to_cpu(devinfo.u.device.flags); | ||
921 | |||
922 | driveid = (struct hd_driveid *)devinfo.u.device.ident; | ||
923 | /* model[] is 40 chars long, but we just want 20 chars here */ | ||
924 | driveid->model[20] = 0; | ||
925 | |||
926 | if (indent) | ||
927 | if (flags & DEVICE_FLAG_DISABLED) | ||
928 | hptiop_copy_info(pinfo,"Missing\n"); | ||
929 | else | ||
930 | hptiop_copy_info(pinfo, "CH%d %s\n", | ||
931 | devinfo.u.device.path_id + 1, | ||
932 | driveid->model); | ||
933 | else { | ||
934 | capacity = le64_to_cpu(devinfo.capacity) * 512; | ||
935 | do_div(capacity, 1000000); | ||
936 | hptiop_copy_info(pinfo, | ||
937 | "CH%d %s, %lluMB, %s %s%s%s%s\n", | ||
938 | devinfo.u.device.path_id + 1, | ||
939 | driveid->model, | ||
940 | capacity, | ||
941 | (flags & DEVICE_FLAG_DISABLED)? | ||
942 | "Disabled" : "Normal", | ||
943 | devinfo.u.device.read_ahead_enabled? | ||
944 | "[RA]" : "", | ||
945 | devinfo.u.device.write_cache_enabled? | ||
946 | "[WC]" : "", | ||
947 | devinfo.u.device.TCQ_enabled? | ||
948 | "[TCQ]" : "", | ||
949 | devinfo.u.device.NCQ_enabled? | ||
950 | "[NCQ]" : "" | ||
951 | ); | ||
952 | } | ||
953 | break; | ||
954 | } | ||
955 | |||
956 | case LDT_ARRAY: | ||
957 | if (devinfo.target_id != INVALID_TARGET_ID) | ||
958 | hptiop_copy_info(pinfo, "[DISK %d_%d] ", | ||
959 | devinfo.vbus_id, devinfo.target_id); | ||
960 | |||
961 | capacity = le64_to_cpu(devinfo.capacity) * 512; | ||
962 | do_div(capacity, 1000000); | ||
963 | hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n", | ||
964 | devinfo.u.array.name, | ||
965 | devinfo.u.array.array_type==AT_RAID0? "RAID0" : | ||
966 | devinfo.u.array.array_type==AT_RAID1? "RAID1" : | ||
967 | devinfo.u.array.array_type==AT_RAID5? "RAID5" : | ||
968 | devinfo.u.array.array_type==AT_RAID6? "RAID6" : | ||
969 | devinfo.u.array.array_type==AT_JBOD? "JBOD" : | ||
970 | "unknown", | ||
971 | capacity, | ||
972 | get_array_status(&devinfo)); | ||
973 | for (i = 0; i < devinfo.u.array.ndisk; i++) { | ||
974 | if (hpt_id_valid(devinfo.u.array.members[i])) { | ||
975 | if (cpu_to_le16(1<<i) & | ||
976 | devinfo.u.array.critical_members) | ||
977 | hptiop_copy_info(pinfo, "\t*"); | ||
978 | hptiop_dump_devinfo(hba, pinfo, | ||
979 | devinfo.u.array.members[i], indent+1); | ||
980 | } | ||
981 | else | ||
982 | hptiop_copy_info(pinfo, "\tMissing\n"); | ||
983 | } | ||
984 | if (id == devinfo.u.array.transform_source) { | ||
985 | hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n"); | ||
986 | hptiop_dump_devinfo(hba, pinfo, | ||
987 | devinfo.u.array.transform_target, indent+1); | ||
988 | } | ||
989 | break; | ||
990 | } | ||
991 | } | ||
992 | |||
993 | static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) | ||
994 | { | ||
995 | return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); | ||
996 | } | ||
997 | |||
998 | static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf, | ||
999 | size_t count, loff_t *ppos) | ||
1000 | { | ||
1001 | struct hptiop_hba *hba = filp->private_data; | ||
1002 | struct hptiop_getinfo info; | ||
1003 | int i, j, ndev; | ||
1004 | struct hpt_controller_info con_info; | ||
1005 | struct hpt_channel_info chan_info; | ||
1006 | __le32 ids[32]; | ||
1007 | |||
1008 | info.buffer = buf; | ||
1009 | info.buflength = count; | ||
1010 | info.bufoffset = ppos ? *ppos : 0; | ||
1011 | info.filpos = 0; | ||
1012 | info.buffillen = 0; | ||
1013 | |||
1014 | if (hptiop_get_controller_info(hba, &con_info)) | ||
1015 | return -EIO; | ||
1016 | |||
1017 | for (i = 0; i < con_info.num_buses; i++) { | ||
1018 | if (hptiop_get_channel_info(hba, i, &chan_info) == 0) { | ||
1019 | if (hpt_id_valid(chan_info.devices[0])) | ||
1020 | hptiop_dump_devinfo(hba, &info, | ||
1021 | chan_info.devices[0], 0); | ||
1022 | if (hpt_id_valid(chan_info.devices[1])) | ||
1023 | hptiop_dump_devinfo(hba, &info, | ||
1024 | chan_info.devices[1], 0); | ||
1025 | } | ||
1026 | } | ||
1027 | |||
1028 | ndev = hptiop_get_logical_devices(hba, ids, | ||
1029 | sizeof(ids) / sizeof(ids[0])); | ||
1030 | |||
1031 | /* | ||
1032 | * if hptiop_get_logical_devices fails, ndev==-1 and it just | ||
1033 | * output nothing here | ||
1034 | */ | ||
1035 | for (j = 0; j < ndev; j++) | ||
1036 | hptiop_dump_devinfo(hba, &info, ids[j], 0); | ||
1037 | |||
1038 | if (ppos) | ||
1039 | *ppos += info.buffillen; | ||
1040 | |||
1041 | return info.buffillen; | ||
1042 | } | ||
1043 | |||
1044 | static int hptiop_cdev_ioctl(struct inode *inode, struct file *file, | ||
1045 | unsigned int cmd, unsigned long arg) | ||
1046 | { | ||
1047 | struct hptiop_hba *hba = file->private_data; | ||
1048 | struct hpt_ioctl_u ioctl_u; | ||
1049 | struct hpt_ioctl_k ioctl_k; | ||
1050 | u32 bytes_returned; | ||
1051 | int err = -EINVAL; | ||
1052 | |||
1053 | if (copy_from_user(&ioctl_u, | ||
1054 | (void __user *)arg, sizeof(struct hpt_ioctl_u))) | ||
1055 | return -EINVAL; | ||
1056 | |||
1057 | if (ioctl_u.magic != HPT_IOCTL_MAGIC) | ||
1058 | return -EINVAL; | ||
1059 | |||
1060 | ioctl_k.ioctl_code = ioctl_u.ioctl_code; | ||
1061 | ioctl_k.inbuf = NULL; | ||
1062 | ioctl_k.inbuf_size = ioctl_u.inbuf_size; | ||
1063 | ioctl_k.outbuf = NULL; | ||
1064 | ioctl_k.outbuf_size = ioctl_u.outbuf_size; | ||
1065 | ioctl_k.hba = hba; | ||
1066 | ioctl_k.bytes_returned = &bytes_returned; | ||
1067 | |||
1068 | /* verify user buffer */ | ||
1069 | if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ, | ||
1070 | ioctl_u.inbuf, ioctl_k.inbuf_size)) || | ||
1071 | (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE, | ||
1072 | ioctl_u.outbuf, ioctl_k.outbuf_size)) || | ||
1073 | (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE, | ||
1074 | ioctl_u.bytes_returned, sizeof(u32))) || | ||
1075 | ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) { | ||
1076 | |||
1077 | dprintk("scsi%d: got bad user address\n", hba->host->host_no); | ||
1078 | return -EINVAL; | ||
1079 | } | ||
1080 | |||
1081 | /* map buffer to kernel. */ | ||
1082 | if (ioctl_k.inbuf_size) { | ||
1083 | ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL); | ||
1084 | if (!ioctl_k.inbuf) { | ||
1085 | dprintk("scsi%d: fail to alloc inbuf\n", | ||
1086 | hba->host->host_no); | ||
1087 | err = -ENOMEM; | ||
1088 | goto err_exit; | ||
1089 | } | ||
1090 | |||
1091 | if (copy_from_user(ioctl_k.inbuf, | ||
1092 | ioctl_u.inbuf, ioctl_k.inbuf_size)) { | ||
1093 | goto err_exit; | ||
1094 | } | ||
1095 | } | ||
1096 | |||
1097 | if (ioctl_k.outbuf_size) { | ||
1098 | ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL); | ||
1099 | if (!ioctl_k.outbuf) { | ||
1100 | dprintk("scsi%d: fail to alloc outbuf\n", | ||
1101 | hba->host->host_no); | ||
1102 | err = -ENOMEM; | ||
1103 | goto err_exit; | ||
1104 | } | ||
1105 | } | ||
1106 | |||
1107 | hptiop_do_ioctl(&ioctl_k); | ||
1108 | |||
1109 | if (ioctl_k.result == HPT_IOCTL_RESULT_OK) { | ||
1110 | if (ioctl_k.outbuf_size && | ||
1111 | copy_to_user(ioctl_u.outbuf, | ||
1112 | ioctl_k.outbuf, ioctl_k.outbuf_size)) | ||
1113 | goto err_exit; | ||
1114 | |||
1115 | if (ioctl_u.bytes_returned && | ||
1116 | copy_to_user(ioctl_u.bytes_returned, | ||
1117 | &bytes_returned, sizeof(u32))) | ||
1118 | goto err_exit; | ||
1119 | |||
1120 | err = 0; | ||
1121 | } | ||
1122 | |||
1123 | err_exit: | ||
1124 | kfree(ioctl_k.inbuf); | ||
1125 | kfree(ioctl_k.outbuf); | ||
1126 | |||
1127 | return err; | ||
1128 | } | ||
1129 | |||
1130 | static int hptiop_cdev_open(struct inode *inode, struct file *file) | ||
1131 | { | ||
1132 | struct hptiop_hba *hba; | ||
1133 | unsigned i = 0, minor = iminor(inode); | ||
1134 | int ret = -ENODEV; | ||
1135 | |||
1136 | spin_lock(&hptiop_hba_list_lock); | ||
1137 | list_for_each_entry(hba, &hptiop_hba_list, link) { | ||
1138 | if (i == minor) { | ||
1139 | file->private_data = hba; | ||
1140 | ret = 0; | ||
1141 | goto out; | ||
1142 | } | ||
1143 | i++; | ||
1144 | } | ||
1145 | |||
1146 | out: | ||
1147 | spin_unlock(&hptiop_hba_list_lock); | ||
1148 | return ret; | ||
1149 | } | ||
1150 | |||
1151 | static struct file_operations hptiop_cdev_fops = { | ||
1152 | .owner = THIS_MODULE, | ||
1153 | .read = hptiop_cdev_read, | ||
1154 | .ioctl = hptiop_cdev_ioctl, | ||
1155 | .open = hptiop_cdev_open, | ||
1156 | }; | ||
1157 | |||
1158 | static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) | ||
1159 | { | ||
1160 | struct Scsi_Host *host = class_to_shost(class_dev); | ||
1161 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | ||
1162 | |||
1163 | return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", | ||
1164 | hba->firmware_version >> 24, | ||
1165 | (hba->firmware_version >> 16) & 0xff, | ||
1166 | (hba->firmware_version >> 8) & 0xff, | ||
1167 | hba->firmware_version & 0xff); | ||
1168 | } | ||
1169 | |||
1170 | static struct class_device_attribute hptiop_attr_version = { | ||
1171 | .attr = { | ||
1172 | .name = "driver-version", | ||
1173 | .mode = S_IRUGO, | ||
1174 | }, | ||
1175 | .show = hptiop_show_version, | ||
1176 | }; | ||
1177 | |||
1178 | static struct class_device_attribute hptiop_attr_fw_version = { | ||
1179 | .attr = { | ||
1180 | .name = "firmware-version", | ||
1181 | .mode = S_IRUGO, | ||
1182 | }, | ||
1183 | .show = hptiop_show_fw_version, | ||
1184 | }; | ||
1185 | |||
1186 | static struct class_device_attribute *hptiop_attrs[] = { | ||
1187 | &hptiop_attr_version, | ||
1188 | &hptiop_attr_fw_version, | ||
1189 | NULL | ||
1190 | }; | ||
1191 | |||
1192 | static struct scsi_host_template driver_template = { | ||
1193 | .module = THIS_MODULE, | ||
1194 | .name = driver_name, | ||
1195 | .queuecommand = hptiop_queuecommand, | ||
1196 | .eh_device_reset_handler = hptiop_reset, | ||
1197 | .eh_bus_reset_handler = hptiop_reset, | ||
1198 | .info = hptiop_info, | ||
1199 | .unchecked_isa_dma = 0, | ||
1200 | .emulated = 0, | ||
1201 | .use_clustering = ENABLE_CLUSTERING, | ||
1202 | .proc_name = driver_name, | ||
1203 | .shost_attrs = hptiop_attrs, | ||
1204 | .this_id = -1, | ||
1205 | .change_queue_depth = hptiop_adjust_disk_queue_depth, | ||
1206 | }; | ||
1207 | |||
1208 | static int __devinit hptiop_probe(struct pci_dev *pcidev, | ||
1209 | const struct pci_device_id *id) | ||
1210 | { | ||
1211 | struct Scsi_Host *host = NULL; | ||
1212 | struct hptiop_hba *hba; | ||
1213 | struct hpt_iop_request_get_config iop_config; | ||
1214 | struct hpt_iop_request_set_config set_config; | ||
1215 | dma_addr_t start_phy; | ||
1216 | void *start_virt; | ||
1217 | u32 offset, i, req_size; | ||
1218 | |||
1219 | dprintk("hptiop_probe(%p)\n", pcidev); | ||
1220 | |||
1221 | if (pci_enable_device(pcidev)) { | ||
1222 | printk(KERN_ERR "hptiop: fail to enable pci device\n"); | ||
1223 | return -ENODEV; | ||
1224 | } | ||
1225 | |||
1226 | printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n", | ||
1227 | pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, | ||
1228 | pcidev->irq); | ||
1229 | |||
1230 | pci_set_master(pcidev); | ||
1231 | |||
1232 | /* Enable 64bit DMA if possible */ | ||
1233 | if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) { | ||
1234 | if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) { | ||
1235 | printk(KERN_ERR "hptiop: fail to set dma_mask\n"); | ||
1236 | goto disable_pci_device; | ||
1237 | } | ||
1238 | } | ||
1239 | |||
1240 | if (pci_request_regions(pcidev, driver_name)) { | ||
1241 | printk(KERN_ERR "hptiop: pci_request_regions failed\n"); | ||
1242 | goto disable_pci_device; | ||
1243 | } | ||
1244 | |||
1245 | host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); | ||
1246 | if (!host) { | ||
1247 | printk(KERN_ERR "hptiop: fail to alloc scsi host\n"); | ||
1248 | goto free_pci_regions; | ||
1249 | } | ||
1250 | |||
1251 | hba = (struct hptiop_hba *)host->hostdata; | ||
1252 | |||
1253 | hba->pcidev = pcidev; | ||
1254 | hba->host = host; | ||
1255 | hba->initialized = 0; | ||
1256 | |||
1257 | atomic_set(&hba->resetting, 0); | ||
1258 | atomic_set(&hba->reset_count, 0); | ||
1259 | |||
1260 | init_waitqueue_head(&hba->reset_wq); | ||
1261 | init_waitqueue_head(&hba->ioctl_wq); | ||
1262 | |||
1263 | host->max_lun = 1; | ||
1264 | host->max_channel = 0; | ||
1265 | host->io_port = 0; | ||
1266 | host->n_io_port = 0; | ||
1267 | host->irq = pcidev->irq; | ||
1268 | |||
1269 | if (hptiop_map_pci_bar(hba)) | ||
1270 | goto free_scsi_host; | ||
1271 | |||
1272 | if (iop_wait_ready(hba->iop, 20000)) { | ||
1273 | printk(KERN_ERR "scsi%d: firmware not ready\n", | ||
1274 | hba->host->host_no); | ||
1275 | goto unmap_pci_bar; | ||
1276 | } | ||
1277 | |||
1278 | if (iop_get_config(hba, &iop_config)) { | ||
1279 | printk(KERN_ERR "scsi%d: get config failed\n", | ||
1280 | hba->host->host_no); | ||
1281 | goto unmap_pci_bar; | ||
1282 | } | ||
1283 | |||
1284 | hba->max_requests = min(le32_to_cpu(iop_config.max_requests), | ||
1285 | HPTIOP_MAX_REQUESTS); | ||
1286 | hba->max_devices = le32_to_cpu(iop_config.max_devices); | ||
1287 | hba->max_request_size = le32_to_cpu(iop_config.request_size); | ||
1288 | hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); | ||
1289 | hba->firmware_version = le32_to_cpu(iop_config.firmware_version); | ||
1290 | hba->sdram_size = le32_to_cpu(iop_config.sdram_size); | ||
1291 | |||
1292 | host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; | ||
1293 | host->max_id = le32_to_cpu(iop_config.max_devices); | ||
1294 | host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); | ||
1295 | host->can_queue = le32_to_cpu(iop_config.max_requests); | ||
1296 | host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); | ||
1297 | host->max_cmd_len = 16; | ||
1298 | |||
1299 | set_config.vbus_id = cpu_to_le32(host->host_no); | ||
1300 | set_config.iop_id = cpu_to_le32(host->host_no); | ||
1301 | |||
1302 | if (iop_set_config(hba, &set_config)) { | ||
1303 | printk(KERN_ERR "scsi%d: set config failed\n", | ||
1304 | hba->host->host_no); | ||
1305 | goto unmap_pci_bar; | ||
1306 | } | ||
1307 | |||
1308 | if (scsi_add_host(host, &pcidev->dev)) { | ||
1309 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n", | ||
1310 | hba->host->host_no); | ||
1311 | goto unmap_pci_bar; | ||
1312 | } | ||
1313 | |||
1314 | pci_set_drvdata(pcidev, host); | ||
1315 | |||
1316 | if (request_irq(pcidev->irq, hptiop_intr, SA_SHIRQ, | ||
1317 | driver_name, hba)) { | ||
1318 | printk(KERN_ERR "scsi%d: request irq %d failed\n", | ||
1319 | hba->host->host_no, pcidev->irq); | ||
1320 | goto remove_scsi_host; | ||
1321 | } | ||
1322 | |||
1323 | /* Allocate request mem */ | ||
1324 | req_size = sizeof(struct hpt_iop_request_scsi_command) | ||
1325 | + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1); | ||
1326 | if ((req_size& 0x1f) != 0) | ||
1327 | req_size = (req_size + 0x1f) & ~0x1f; | ||
1328 | |||
1329 | dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); | ||
1330 | |||
1331 | hba->req_size = req_size; | ||
1332 | start_virt = dma_alloc_coherent(&pcidev->dev, | ||
1333 | hba->req_size*hba->max_requests + 0x20, | ||
1334 | &start_phy, GFP_KERNEL); | ||
1335 | |||
1336 | if (!start_virt) { | ||
1337 | printk(KERN_ERR "scsi%d: fail to alloc request mem\n", | ||
1338 | hba->host->host_no); | ||
1339 | goto free_request_irq; | ||
1340 | } | ||
1341 | |||
1342 | hba->dma_coherent = start_virt; | ||
1343 | hba->dma_coherent_handle = start_phy; | ||
1344 | |||
1345 | if ((start_phy & 0x1f) != 0) | ||
1346 | { | ||
1347 | offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; | ||
1348 | start_phy += offset; | ||
1349 | start_virt += offset; | ||
1350 | } | ||
1351 | |||
1352 | hba->req_list = start_virt; | ||
1353 | for (i = 0; i < hba->max_requests; i++) { | ||
1354 | hba->reqs[i].next = NULL; | ||
1355 | hba->reqs[i].req_virt = start_virt; | ||
1356 | hba->reqs[i].req_shifted_phy = start_phy >> 5; | ||
1357 | hba->reqs[i].index = i; | ||
1358 | free_req(hba, &hba->reqs[i]); | ||
1359 | start_virt = (char *)start_virt + hba->req_size; | ||
1360 | start_phy = start_phy + hba->req_size; | ||
1361 | } | ||
1362 | |||
1363 | /* Enable Interrupt and start background task */ | ||
1364 | if (hptiop_initialize_iop(hba)) | ||
1365 | goto free_request_mem; | ||
1366 | |||
1367 | spin_lock(&hptiop_hba_list_lock); | ||
1368 | list_add_tail(&hba->link, &hptiop_hba_list); | ||
1369 | spin_unlock(&hptiop_hba_list_lock); | ||
1370 | |||
1371 | scsi_scan_host(host); | ||
1372 | |||
1373 | dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no); | ||
1374 | return 0; | ||
1375 | |||
1376 | free_request_mem: | ||
1377 | dma_free_coherent(&hba->pcidev->dev, | ||
1378 | hba->req_size*hba->max_requests + 0x20, | ||
1379 | hba->dma_coherent, hba->dma_coherent_handle); | ||
1380 | |||
1381 | free_request_irq: | ||
1382 | free_irq(hba->pcidev->irq, hba); | ||
1383 | |||
1384 | remove_scsi_host: | ||
1385 | scsi_remove_host(host); | ||
1386 | |||
1387 | unmap_pci_bar: | ||
1388 | iounmap(hba->iop); | ||
1389 | |||
1390 | free_pci_regions: | ||
1391 | pci_release_regions(pcidev) ; | ||
1392 | |||
1393 | free_scsi_host: | ||
1394 | scsi_host_put(host); | ||
1395 | |||
1396 | disable_pci_device: | ||
1397 | pci_disable_device(pcidev); | ||
1398 | |||
1399 | dprintk("scsi%d: hptiop_probe fail\n", host->host_no); | ||
1400 | return -ENODEV; | ||
1401 | } | ||
1402 | |||
1403 | static void hptiop_shutdown(struct pci_dev *pcidev) | ||
1404 | { | ||
1405 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | ||
1406 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | ||
1407 | struct hpt_iopmu __iomem *iop = hba->iop; | ||
1408 | u32 int_mask; | ||
1409 | |||
1410 | dprintk("hptiop_shutdown(%p)\n", hba); | ||
1411 | |||
1412 | /* stop the iop */ | ||
1413 | if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) | ||
1414 | printk(KERN_ERR "scsi%d: shutdown the iop timeout\n", | ||
1415 | hba->host->host_no); | ||
1416 | |||
1417 | /* disable all outbound interrupts */ | ||
1418 | int_mask = readl(&iop->outbound_intmask); | ||
1419 | writel(int_mask | | ||
1420 | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, | ||
1421 | &iop->outbound_intmask); | ||
1422 | hptiop_pci_posting_flush(iop); | ||
1423 | } | ||
1424 | |||
1425 | static void hptiop_remove(struct pci_dev *pcidev) | ||
1426 | { | ||
1427 | struct Scsi_Host *host = pci_get_drvdata(pcidev); | ||
1428 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; | ||
1429 | |||
1430 | dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); | ||
1431 | |||
1432 | spin_lock(&hptiop_hba_list_lock); | ||
1433 | list_del_init(&hba->link); | ||
1434 | spin_unlock(&hptiop_hba_list_lock); | ||
1435 | |||
1436 | hptiop_shutdown(pcidev); | ||
1437 | |||
1438 | free_irq(hba->pcidev->irq, hba); | ||
1439 | |||
1440 | dma_free_coherent(&hba->pcidev->dev, | ||
1441 | hba->req_size * hba->max_requests + 0x20, | ||
1442 | hba->dma_coherent, | ||
1443 | hba->dma_coherent_handle); | ||
1444 | |||
1445 | iounmap(hba->iop); | ||
1446 | |||
1447 | pci_release_regions(hba->pcidev); | ||
1448 | pci_set_drvdata(hba->pcidev, NULL); | ||
1449 | pci_disable_device(hba->pcidev); | ||
1450 | |||
1451 | scsi_remove_host(host); | ||
1452 | scsi_host_put(host); | ||
1453 | } | ||
1454 | |||
1455 | static struct pci_device_id hptiop_id_table[] = { | ||
1456 | { PCI_DEVICE(0x1103, 0x3220) }, | ||
1457 | { PCI_DEVICE(0x1103, 0x3320) }, | ||
1458 | {}, | ||
1459 | }; | ||
1460 | |||
1461 | MODULE_DEVICE_TABLE(pci, hptiop_id_table); | ||
1462 | |||
1463 | static struct pci_driver hptiop_pci_driver = { | ||
1464 | .name = driver_name, | ||
1465 | .id_table = hptiop_id_table, | ||
1466 | .probe = hptiop_probe, | ||
1467 | .remove = hptiop_remove, | ||
1468 | .shutdown = hptiop_shutdown, | ||
1469 | }; | ||
1470 | |||
1471 | static int __init hptiop_module_init(void) | ||
1472 | { | ||
1473 | int error; | ||
1474 | |||
1475 | printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); | ||
1476 | |||
1477 | error = pci_register_driver(&hptiop_pci_driver); | ||
1478 | if (error < 0) | ||
1479 | return error; | ||
1480 | |||
1481 | hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops); | ||
1482 | if (hptiop_cdev_major < 0) { | ||
1483 | printk(KERN_WARNING "unable to register hptiop device.\n"); | ||
1484 | return hptiop_cdev_major; | ||
1485 | } | ||
1486 | |||
1487 | return 0; | ||
1488 | } | ||
1489 | |||
1490 | static void __exit hptiop_module_exit(void) | ||
1491 | { | ||
1492 | dprintk("hptiop_module_exit\n"); | ||
1493 | unregister_chrdev(hptiop_cdev_major, "hptiop"); | ||
1494 | pci_unregister_driver(&hptiop_pci_driver); | ||
1495 | } | ||
1496 | |||
1497 | |||
1498 | module_init(hptiop_module_init); | ||
1499 | module_exit(hptiop_module_exit); | ||
1500 | |||
1501 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h new file mode 100644 index 000000000000..f04f7e81d1ae --- /dev/null +++ b/drivers/scsi/hptiop.h | |||
@@ -0,0 +1,465 @@ | |||
1 | /* | ||
2 | * HighPoint RR3xxx controller driver for Linux | ||
3 | * Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * Please report bugs/comments/suggestions to linux@highpoint-tech.com | ||
15 | * | ||
16 | * For more information, visit http://www.highpoint-tech.com | ||
17 | */ | ||
18 | #ifndef _HPTIOP_H_ | ||
19 | #define _HPTIOP_H_ | ||
20 | |||
21 | /* | ||
22 | * logical device type. | ||
23 | * Identify array (logical device) and physical device. | ||
24 | */ | ||
25 | #define LDT_ARRAY 1 | ||
26 | #define LDT_DEVICE 2 | ||
27 | |||
28 | /* | ||
29 | * Array types | ||
30 | */ | ||
31 | #define AT_UNKNOWN 0 | ||
32 | #define AT_RAID0 1 | ||
33 | #define AT_RAID1 2 | ||
34 | #define AT_RAID5 3 | ||
35 | #define AT_RAID6 4 | ||
36 | #define AT_JBOD 7 | ||
37 | |||
38 | #define MAX_NAME_LENGTH 36 | ||
39 | #define MAX_ARRAYNAME_LEN 16 | ||
40 | |||
41 | #define MAX_ARRAY_MEMBERS_V1 8 | ||
42 | #define MAX_ARRAY_MEMBERS_V2 16 | ||
43 | |||
44 | /* keep definition for source code compatiblity */ | ||
45 | #define MAX_ARRAY_MEMBERS MAX_ARRAY_MEMBERS_V1 | ||
46 | |||
47 | /* | ||
48 | * array flags | ||
49 | */ | ||
50 | #define ARRAY_FLAG_DISABLED 0x00000001 /* The array is disabled */ | ||
51 | #define ARRAY_FLAG_NEEDBUILDING 0x00000002 /* need to be rebuilt */ | ||
52 | #define ARRAY_FLAG_REBUILDING 0x00000004 /* in rebuilding process */ | ||
53 | #define ARRAY_FLAG_BROKEN 0x00000008 /* broken but still working */ | ||
54 | #define ARRAY_FLAG_BOOTDISK 0x00000010 /* has a active partition */ | ||
55 | #define ARRAY_FLAG_BOOTMARK 0x00000040 /* array has boot mark set */ | ||
56 | #define ARRAY_FLAG_NEED_AUTOREBUILD 0x00000080 /* auto-rebuild should start */ | ||
57 | #define ARRAY_FLAG_VERIFYING 0x00000100 /* is being verified */ | ||
58 | #define ARRAY_FLAG_INITIALIZING 0x00000200 /* is being initialized */ | ||
59 | #define ARRAY_FLAG_TRANSFORMING 0x00000400 /* tranform in progress */ | ||
60 | #define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need tranform */ | ||
61 | #define ARRAY_FLAG_NEEDINITIALIZING 0x00001000 /* initialization not done */ | ||
62 | #define ARRAY_FLAG_BROKEN_REDUNDANT 0x00002000 /* broken but redundant */ | ||
63 | |||
64 | /* | ||
65 | * device flags | ||
66 | */ | ||
67 | #define DEVICE_FLAG_DISABLED 0x00000001 /* device is disabled */ | ||
68 | #define DEVICE_FLAG_UNINITIALIZED 0x00010000 /* device is not initialized */ | ||
69 | #define DEVICE_FLAG_LEGACY 0x00020000 /* lagacy drive */ | ||
70 | #define DEVICE_FLAG_IS_SPARE 0x80000000 /* is a spare disk */ | ||
71 | |||
72 | /* | ||
73 | * ioctl codes | ||
74 | */ | ||
75 | #define HPT_CTL_CODE(x) (x+0xFF00) | ||
76 | #define HPT_CTL_CODE_LINUX_TO_IOP(x) ((x)-0xff00) | ||
77 | |||
78 | #define HPT_IOCTL_GET_CONTROLLER_INFO HPT_CTL_CODE(2) | ||
79 | #define HPT_IOCTL_GET_CHANNEL_INFO HPT_CTL_CODE(3) | ||
80 | #define HPT_IOCTL_GET_LOGICAL_DEVICES HPT_CTL_CODE(4) | ||
81 | #define HPT_IOCTL_GET_DRIVER_CAPABILITIES HPT_CTL_CODE(19) | ||
82 | #define HPT_IOCTL_GET_DEVICE_INFO_V3 HPT_CTL_CODE(46) | ||
83 | #define HPT_IOCTL_GET_CONTROLLER_INFO_V2 HPT_CTL_CODE(47) | ||
84 | |||
85 | /* | ||
86 | * Controller information. | ||
87 | */ | ||
88 | struct hpt_controller_info { | ||
89 | u8 chip_type; /* chip type */ | ||
90 | u8 interrupt_level; /* IRQ level */ | ||
91 | u8 num_buses; /* bus count */ | ||
92 | u8 chip_flags; | ||
93 | |||
94 | u8 product_id[MAX_NAME_LENGTH];/* product name */ | ||
95 | u8 vendor_id[MAX_NAME_LENGTH]; /* vendor name */ | ||
96 | } | ||
97 | __attribute__((packed)); | ||
98 | |||
99 | /* | ||
100 | * Channel information. | ||
101 | */ | ||
102 | struct hpt_channel_info { | ||
103 | __le32 io_port; /* IDE Base Port Address */ | ||
104 | __le32 control_port; /* IDE Control Port Address */ | ||
105 | __le32 devices[2]; /* device connected to this channel */ | ||
106 | } | ||
107 | __attribute__((packed)); | ||
108 | |||
109 | /* | ||
110 | * Array information. | ||
111 | */ | ||
112 | struct hpt_array_info_v3 { | ||
113 | u8 name[MAX_ARRAYNAME_LEN]; /* array name */ | ||
114 | u8 description[64]; /* array description */ | ||
115 | u8 create_manager[16]; /* who created it */ | ||
116 | __le32 create_time; /* when created it */ | ||
117 | |||
118 | u8 array_type; /* array type */ | ||
119 | u8 block_size_shift; /* stripe size */ | ||
120 | u8 ndisk; /* Number of ID in Members[] */ | ||
121 | u8 reserved; | ||
122 | |||
123 | __le32 flags; /* working flags, see ARRAY_FLAG_XXX */ | ||
124 | __le32 members[MAX_ARRAY_MEMBERS_V2]; /* member array/disks */ | ||
125 | |||
126 | __le32 rebuilding_progress; | ||
127 | __le64 rebuilt_sectors; /* rebuilding point (LBA) for single member */ | ||
128 | |||
129 | __le32 transform_source; | ||
130 | __le32 transform_target; /* destination device ID */ | ||
131 | __le32 transforming_progress; | ||
132 | __le32 signature; /* persistent identification*/ | ||
133 | __le16 critical_members; /* bit mask of critical members */ | ||
134 | __le16 reserve2; | ||
135 | __le32 reserve; | ||
136 | } | ||
137 | __attribute__((packed)); | ||
138 | |||
139 | /* | ||
140 | * physical device information. | ||
141 | */ | ||
142 | #define MAX_PARENTS_PER_DISK 8 | ||
143 | |||
144 | struct hpt_device_info_v2 { | ||
145 | u8 ctlr_id; /* controller id */ | ||
146 | u8 path_id; /* bus */ | ||
147 | u8 target_id; /* id */ | ||
148 | u8 device_mode_setting; /* Current Data Transfer mode: 0-4 PIO0-4 */ | ||
149 | /* 5-7 MW DMA0-2, 8-13 UDMA0-5 */ | ||
150 | u8 device_type; /* device type */ | ||
151 | u8 usable_mode; /* highest usable mode */ | ||
152 | |||
153 | #ifdef __BIG_ENDIAN_BITFIELD | ||
154 | u8 NCQ_enabled: 1; | ||
155 | u8 NCQ_supported: 1; | ||
156 | u8 TCQ_enabled: 1; | ||
157 | u8 TCQ_supported: 1; | ||
158 | u8 write_cache_enabled: 1; | ||
159 | u8 write_cache_supported: 1; | ||
160 | u8 read_ahead_enabled: 1; | ||
161 | u8 read_ahead_supported: 1; | ||
162 | u8 reserved6: 6; | ||
163 | u8 spin_up_mode: 2; | ||
164 | #else | ||
165 | u8 read_ahead_supported: 1; | ||
166 | u8 read_ahead_enabled: 1; | ||
167 | u8 write_cache_supported: 1; | ||
168 | u8 write_cache_enabled: 1; | ||
169 | u8 TCQ_supported: 1; | ||
170 | u8 TCQ_enabled: 1; | ||
171 | u8 NCQ_supported: 1; | ||
172 | u8 NCQ_enabled: 1; | ||
173 | u8 spin_up_mode: 2; | ||
174 | u8 reserved6: 6; | ||
175 | #endif | ||
176 | |||
177 | __le32 flags; /* working flags, see DEVICE_FLAG_XXX */ | ||
178 | u8 ident[150]; /* (partitial) Identify Data of this device */ | ||
179 | |||
180 | __le64 total_free; | ||
181 | __le64 max_free; | ||
182 | __le64 bad_sectors; | ||
183 | __le32 parent_arrays[MAX_PARENTS_PER_DISK]; | ||
184 | } | ||
185 | __attribute__((packed)); | ||
186 | |||
187 | /* | ||
188 | * Logical device information. | ||
189 | */ | ||
190 | #define INVALID_TARGET_ID 0xFF | ||
191 | #define INVALID_BUS_ID 0xFF | ||
192 | |||
193 | struct hpt_logical_device_info_v3 { | ||
194 | u8 type; /* LDT_ARRAY or LDT_DEVICE */ | ||
195 | u8 cache_policy; /* refer to CACHE_POLICY_xxx */ | ||
196 | u8 vbus_id; /* vbus sequence in vbus_list */ | ||
197 | u8 target_id; /* OS target id. 0xFF is invalid */ | ||
198 | /* OS name: DISK $VBusId_$TargetId */ | ||
199 | __le64 capacity; /* array capacity */ | ||
200 | __le32 parent_array; /* don't use this field for physical | ||
201 | device. use ParentArrays field in | ||
202 | hpt_device_info_v2 */ | ||
203 | /* reserved statistic fields */ | ||
204 | __le32 stat1; | ||
205 | __le32 stat2; | ||
206 | __le32 stat3; | ||
207 | __le32 stat4; | ||
208 | |||
209 | union { | ||
210 | struct hpt_array_info_v3 array; | ||
211 | struct hpt_device_info_v2 device; | ||
212 | } __attribute__((packed)) u; | ||
213 | |||
214 | } | ||
215 | __attribute__((packed)); | ||
216 | |||
217 | /* | ||
218 | * ioctl structure | ||
219 | */ | ||
220 | #define HPT_IOCTL_MAGIC 0xA1B2C3D4 | ||
221 | |||
222 | struct hpt_ioctl_u { | ||
223 | u32 magic; /* used to check if it's a valid ioctl packet */ | ||
224 | u32 ioctl_code; /* operation control code */ | ||
225 | void __user *inbuf; /* input data buffer */ | ||
226 | u32 inbuf_size; /* size of input data buffer */ | ||
227 | void __user *outbuf; /* output data buffer */ | ||
228 | u32 outbuf_size; /* size of output data buffer */ | ||
229 | void __user *bytes_returned; /* count of bytes returned */ | ||
230 | } | ||
231 | __attribute__((packed)); | ||
232 | |||
233 | |||
234 | struct hpt_iopmu | ||
235 | { | ||
236 | __le32 resrved0[4]; | ||
237 | __le32 inbound_msgaddr0; | ||
238 | __le32 inbound_msgaddr1; | ||
239 | __le32 outbound_msgaddr0; | ||
240 | __le32 outbound_msgaddr1; | ||
241 | __le32 inbound_doorbell; | ||
242 | __le32 inbound_intstatus; | ||
243 | __le32 inbound_intmask; | ||
244 | __le32 outbound_doorbell; | ||
245 | __le32 outbound_intstatus; | ||
246 | __le32 outbound_intmask; | ||
247 | __le32 reserved1[2]; | ||
248 | __le32 inbound_queue; | ||
249 | __le32 outbound_queue; | ||
250 | }; | ||
251 | |||
252 | #define IOPMU_QUEUE_EMPTY 0xffffffff | ||
253 | #define IOPMU_QUEUE_MASK_HOST_BITS 0xf0000000 | ||
254 | #define IOPMU_QUEUE_ADDR_HOST_BIT 0x80000000 | ||
255 | |||
256 | #define IOPMU_OUTBOUND_INT_MSG0 1 | ||
257 | #define IOPMU_OUTBOUND_INT_MSG1 2 | ||
258 | #define IOPMU_OUTBOUND_INT_DOORBELL 4 | ||
259 | #define IOPMU_OUTBOUND_INT_POSTQUEUE 8 | ||
260 | #define IOPMU_OUTBOUND_INT_PCI 0x10 | ||
261 | |||
262 | #define IOPMU_INBOUND_INT_MSG0 1 | ||
263 | #define IOPMU_INBOUND_INT_MSG1 2 | ||
264 | #define IOPMU_INBOUND_INT_DOORBELL 4 | ||
265 | #define IOPMU_INBOUND_INT_ERROR 8 | ||
266 | #define IOPMU_INBOUND_INT_POSTQUEUE 0x10 | ||
267 | |||
268 | enum hpt_iopmu_message { | ||
269 | /* host-to-iop messages */ | ||
270 | IOPMU_INBOUND_MSG0_NOP = 0, | ||
271 | IOPMU_INBOUND_MSG0_RESET, | ||
272 | IOPMU_INBOUND_MSG0_FLUSH, | ||
273 | IOPMU_INBOUND_MSG0_SHUTDOWN, | ||
274 | IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, | ||
275 | IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, | ||
276 | IOPMU_INBOUND_MSG0_MAX = 0xff, | ||
277 | /* iop-to-host messages */ | ||
278 | IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100, | ||
279 | IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_MAX = 0x1ff, | ||
280 | IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_0 = 0x200, | ||
281 | IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_MAX = 0x2ff, | ||
282 | IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_0 = 0x300, | ||
283 | IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff, | ||
284 | }; | ||
285 | |||
286 | struct hpt_iop_request_header | ||
287 | { | ||
288 | __le32 size; | ||
289 | __le32 type; | ||
290 | __le32 flags; | ||
291 | __le32 result; | ||
292 | __le32 context; /* host context */ | ||
293 | __le32 context_hi32; | ||
294 | }; | ||
295 | |||
296 | #define IOP_REQUEST_FLAG_SYNC_REQUEST 1 | ||
297 | #define IOP_REQUEST_FLAG_BIST_REQUEST 2 | ||
298 | #define IOP_REQUEST_FLAG_REMAPPED 4 | ||
299 | #define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8 | ||
300 | |||
301 | enum hpt_iop_request_type { | ||
302 | IOP_REQUEST_TYPE_GET_CONFIG = 0, | ||
303 | IOP_REQUEST_TYPE_SET_CONFIG, | ||
304 | IOP_REQUEST_TYPE_BLOCK_COMMAND, | ||
305 | IOP_REQUEST_TYPE_SCSI_COMMAND, | ||
306 | IOP_REQUEST_TYPE_IOCTL_COMMAND, | ||
307 | IOP_REQUEST_TYPE_MAX | ||
308 | }; | ||
309 | |||
310 | enum hpt_iop_result_type { | ||
311 | IOP_RESULT_PENDING = 0, | ||
312 | IOP_RESULT_SUCCESS, | ||
313 | IOP_RESULT_FAIL, | ||
314 | IOP_RESULT_BUSY, | ||
315 | IOP_RESULT_RESET, | ||
316 | IOP_RESULT_INVALID_REQUEST, | ||
317 | IOP_RESULT_BAD_TARGET, | ||
318 | IOP_RESULT_MODE_SENSE_CHECK_CONDITION, | ||
319 | }; | ||
320 | |||
321 | struct hpt_iop_request_get_config | ||
322 | { | ||
323 | struct hpt_iop_request_header header; | ||
324 | __le32 interface_version; | ||
325 | __le32 firmware_version; | ||
326 | __le32 max_requests; | ||
327 | __le32 request_size; | ||
328 | __le32 max_sg_count; | ||
329 | __le32 data_transfer_length; | ||
330 | __le32 alignment_mask; | ||
331 | __le32 max_devices; | ||
332 | __le32 sdram_size; | ||
333 | }; | ||
334 | |||
335 | struct hpt_iop_request_set_config | ||
336 | { | ||
337 | struct hpt_iop_request_header header; | ||
338 | __le32 iop_id; | ||
339 | __le32 vbus_id; | ||
340 | __le32 reserve[6]; | ||
341 | }; | ||
342 | |||
343 | struct hpt_iopsg | ||
344 | { | ||
345 | __le32 size; | ||
346 | __le32 eot; /* non-zero: end of table */ | ||
347 | __le64 pci_address; | ||
348 | }; | ||
349 | |||
350 | struct hpt_iop_request_block_command | ||
351 | { | ||
352 | struct hpt_iop_request_header header; | ||
353 | u8 channel; | ||
354 | u8 target; | ||
355 | u8 lun; | ||
356 | u8 pad1; | ||
357 | __le16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */ | ||
358 | __le16 sectors; | ||
359 | __le64 lba; | ||
360 | struct hpt_iopsg sg_list[1]; | ||
361 | }; | ||
362 | |||
363 | #define IOP_BLOCK_COMMAND_READ 1 | ||
364 | #define IOP_BLOCK_COMMAND_WRITE 2 | ||
365 | #define IOP_BLOCK_COMMAND_VERIFY 3 | ||
366 | #define IOP_BLOCK_COMMAND_FLUSH 4 | ||
367 | #define IOP_BLOCK_COMMAND_SHUTDOWN 5 | ||
368 | |||
369 | struct hpt_iop_request_scsi_command | ||
370 | { | ||
371 | struct hpt_iop_request_header header; | ||
372 | u8 channel; | ||
373 | u8 target; | ||
374 | u8 lun; | ||
375 | u8 pad1; | ||
376 | u8 cdb[16]; | ||
377 | __le32 dataxfer_length; | ||
378 | struct hpt_iopsg sg_list[1]; | ||
379 | }; | ||
380 | |||
381 | struct hpt_iop_request_ioctl_command | ||
382 | { | ||
383 | struct hpt_iop_request_header header; | ||
384 | __le32 ioctl_code; | ||
385 | __le32 inbuf_size; | ||
386 | __le32 outbuf_size; | ||
387 | __le32 bytes_returned; | ||
388 | u8 buf[1]; | ||
389 | /* out data should be put at buf[(inbuf_size+3)&~3] */ | ||
390 | }; | ||
391 | |||
392 | #define HPTIOP_MAX_REQUESTS 256u | ||
393 | |||
394 | struct hptiop_request { | ||
395 | struct hptiop_request * next; | ||
396 | void * req_virt; | ||
397 | u32 req_shifted_phy; | ||
398 | struct scsi_cmnd * scp; | ||
399 | int index; | ||
400 | }; | ||
401 | |||
402 | struct hpt_scsi_pointer { | ||
403 | int mapped; | ||
404 | int sgcnt; | ||
405 | dma_addr_t dma_handle; | ||
406 | }; | ||
407 | |||
408 | #define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp) | ||
409 | |||
410 | struct hptiop_hba { | ||
411 | struct hpt_iopmu __iomem * iop; | ||
412 | struct Scsi_Host * host; | ||
413 | struct pci_dev * pcidev; | ||
414 | |||
415 | struct list_head link; | ||
416 | |||
417 | /* IOP config info */ | ||
418 | u32 firmware_version; | ||
419 | u32 sdram_size; | ||
420 | u32 max_devices; | ||
421 | u32 max_requests; | ||
422 | u32 max_request_size; | ||
423 | u32 max_sg_descriptors; | ||
424 | |||
425 | u32 req_size; /* host-allocated request buffer size */ | ||
426 | int initialized; | ||
427 | int msg_done; | ||
428 | |||
429 | struct hptiop_request * req_list; | ||
430 | struct hptiop_request reqs[HPTIOP_MAX_REQUESTS]; | ||
431 | |||
432 | /* used to free allocated dma area */ | ||
433 | void * dma_coherent; | ||
434 | dma_addr_t dma_coherent_handle; | ||
435 | |||
436 | atomic_t reset_count; | ||
437 | atomic_t resetting; | ||
438 | |||
439 | wait_queue_head_t reset_wq; | ||
440 | wait_queue_head_t ioctl_wq; | ||
441 | }; | ||
442 | |||
443 | struct hpt_ioctl_k | ||
444 | { | ||
445 | struct hptiop_hba * hba; | ||
446 | u32 ioctl_code; | ||
447 | u32 inbuf_size; | ||
448 | u32 outbuf_size; | ||
449 | void * inbuf; | ||
450 | void * outbuf; | ||
451 | u32 * bytes_returned; | ||
452 | void (*done)(struct hpt_ioctl_k *); | ||
453 | int result; /* HPT_IOCTL_RESULT_ */ | ||
454 | }; | ||
455 | |||
456 | #define HPT_IOCTL_RESULT_OK 0 | ||
457 | #define HPT_IOCTL_RESULT_FAILED (-1) | ||
458 | |||
459 | #if 0 | ||
460 | #define dprintk(fmt, args...) do { printk(fmt, ##args); } while(0) | ||
461 | #else | ||
462 | #define dprintk(fmt, args...) | ||
463 | #endif | ||
464 | |||
465 | #endif | ||