aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHighPoint Linux Team <linux@highpoint-tech.com>2007-12-13 19:14:26 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-01-11 19:28:06 -0500
commit00f5970193e22c48f399a2430635d6416b51befe (patch)
treeccab0b0ba3e9c093e5edf16f2d23e9f979d2b6d4
parenta7ed0448e28ce6154390bf690b8b5c37853732dc (diff)
[SCSI] hptiop: add more adapter models and other fixes
Most code changes were made to support adapters based on Marvell IOP, plus some other fixes. - add more PCI device IDs - support for adapters based on Marvell IOP - fix a result code translation error on big-endian systems - fix resource releasing bug when scsi_host_alloc() fail in hptiop_probe() - update scsi_cmnd.resid when finishing a request - correct some coding style issues [akpm@linux-foundation.org: type fixes] Signed-off-by: HighPoint Linux Team <linux@highpoint-tech.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r--Documentation/scsi/hptiop.txt30
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/hptiop.c593
-rw-r--r--drivers/scsi/hptiop.h124
4 files changed, 576 insertions, 175 deletions
diff --git a/Documentation/scsi/hptiop.txt b/Documentation/scsi/hptiop.txt
index d28a31247d4c..a6eb4add1be6 100644
--- a/Documentation/scsi/hptiop.txt
+++ b/Documentation/scsi/hptiop.txt
@@ -1,9 +1,9 @@
1HIGHPOINT ROCKETRAID 3xxx RAID DRIVER (hptiop) 1HIGHPOINT ROCKETRAID 3xxx/4xxx ADAPTER DRIVER (hptiop)
2 2
3Controller Register Map 3Controller Register Map
4------------------------- 4-------------------------
5 5
6The controller IOP is accessed via PCI BAR0. 6For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
7 7
8 BAR0 offset Register 8 BAR0 offset Register
9 0x10 Inbound Message Register 0 9 0x10 Inbound Message Register 0
@@ -18,6 +18,24 @@ The controller IOP is accessed via PCI BAR0.
18 0x40 Inbound Queue Port 18 0x40 Inbound Queue Port
19 0x44 Outbound Queue Port 19 0x44 Outbound Queue Port
20 20
21For Marvell IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
22
23 BAR0 offset Register
24 0x20400 Inbound Doorbell Register
25 0x20404 Inbound Interrupt Mask Register
26 0x20408 Outbound Doorbell Register
27 0x2040C Outbound Interrupt Mask Register
28
29 BAR1 offset Register
30 0x0 Inbound Queue Head Pointer
31 0x4 Inbound Queue Tail Pointer
32 0x8 Outbound Queue Head Pointer
33 0xC Outbound Queue Tail Pointer
34 0x10 Inbound Message Register
35 0x14 Outbound Message Register
36 0x40-0x1040 Inbound Queue
37 0x1040-0x2040 Outbound Queue
38
21 39
22I/O Request Workflow 40I/O Request Workflow
23---------------------- 41----------------------
@@ -73,15 +91,9 @@ The driver exposes following sysfs attributes:
73 driver-version R driver version string 91 driver-version R driver version string
74 firmware-version R firmware version string 92 firmware-version R firmware version string
75 93
76The driver registers char device "hptiop" to communicate with HighPoint RAID
77management software. Its ioctl routine acts as a general binary interface
78between the IOP firmware and HighPoint RAID management software. New management
79functions can be implemented in application/firmware without modification
80in driver code.
81
82 94
83----------------------------------------------------------------------------- 95-----------------------------------------------------------------------------
84Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved. 96Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
85 97
86 This file is distributed in the hope that it will be useful, 98 This file is distributed in the hope that it will be useful,
87 but WITHOUT ANY WARRANTY; without even the implied warranty of 99 but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index becbb09d4c9b..e397599d54e3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -573,10 +573,10 @@ config SCSI_ARCMSR_AER
573source "drivers/scsi/megaraid/Kconfig.megaraid" 573source "drivers/scsi/megaraid/Kconfig.megaraid"
574 574
575config SCSI_HPTIOP 575config SCSI_HPTIOP
576 tristate "HighPoint RocketRAID 3xxx Controller support" 576 tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
577 depends on SCSI && PCI 577 depends on SCSI && PCI
578 help 578 help
579 This option enables support for HighPoint RocketRAID 3xxx 579 This option enables support for HighPoint RocketRAID 3xxx/4xxx
580 controllers. 580 controllers.
581 581
582 To compile this driver as a module, choose M here; the module 582 To compile this driver as a module, choose M here; the module
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 0844331abb87..df1a76438e29 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * HighPoint RR3xxx controller driver for Linux 2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. 3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -38,80 +38,84 @@
38#include "hptiop.h" 38#include "hptiop.h"
39 39
40MODULE_AUTHOR("HighPoint Technologies, Inc."); 40MODULE_AUTHOR("HighPoint Technologies, Inc.");
41MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver"); 41MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
42 42
43static char driver_name[] = "hptiop"; 43static char driver_name[] = "hptiop";
44static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; 44static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
45static const char driver_ver[] = "v1.2 (070830)"; 45static const char driver_ver[] = "v1.3 (071203)";
46 46
47static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); 47static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
48static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); 48static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
49 struct hpt_iop_request_scsi_command *req);
50static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
51static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
49static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); 52static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
50 53
51static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop) 54static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
52{
53 readl(&iop->outbound_intstatus);
54}
55
56static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
57{ 55{
58 u32 req = 0; 56 u32 req = 0;
59 int i; 57 int i;
60 58
61 for (i = 0; i < millisec; i++) { 59 for (i = 0; i < millisec; i++) {
62 req = readl(&iop->inbound_queue); 60 req = readl(&hba->u.itl.iop->inbound_queue);
63 if (req != IOPMU_QUEUE_EMPTY) 61 if (req != IOPMU_QUEUE_EMPTY)
64 break; 62 break;
65 msleep(1); 63 msleep(1);
66 } 64 }
67 65
68 if (req != IOPMU_QUEUE_EMPTY) { 66 if (req != IOPMU_QUEUE_EMPTY) {
69 writel(req, &iop->outbound_queue); 67 writel(req, &hba->u.itl.iop->outbound_queue);
70 hptiop_pci_posting_flush(iop); 68 readl(&hba->u.itl.iop->outbound_intstatus);
71 return 0; 69 return 0;
72 } 70 }
73 71
74 return -1; 72 return -1;
75} 73}
76 74
77static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag) 75static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
76{
77 return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
78}
79
80static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
78{ 81{
79 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) 82 if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
80 return hptiop_host_request_callback(hba, 83 hptiop_host_request_callback_itl(hba,
81 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); 84 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
82 else 85 else
83 return hptiop_iop_request_callback(hba, tag); 86 hptiop_iop_request_callback_itl(hba, tag);
84} 87}
85 88
86static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba) 89static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
87{ 90{
88 u32 req; 91 u32 req;
89 92
90 while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) { 93 while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
94 IOPMU_QUEUE_EMPTY) {
91 95
92 if (req & IOPMU_QUEUE_MASK_HOST_BITS) 96 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
93 hptiop_request_callback(hba, req); 97 hptiop_request_callback_itl(hba, req);
94 else { 98 else {
95 struct hpt_iop_request_header __iomem * p; 99 struct hpt_iop_request_header __iomem * p;
96 100
97 p = (struct hpt_iop_request_header __iomem *) 101 p = (struct hpt_iop_request_header __iomem *)
98 ((char __iomem *)hba->iop + req); 102 ((char __iomem *)hba->u.itl.iop + req);
99 103
100 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { 104 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
101 if (readl(&p->context)) 105 if (readl(&p->context))
102 hptiop_request_callback(hba, req); 106 hptiop_request_callback_itl(hba, req);
103 else 107 else
104 writel(1, &p->context); 108 writel(1, &p->context);
105 } 109 }
106 else 110 else
107 hptiop_request_callback(hba, req); 111 hptiop_request_callback_itl(hba, req);
108 } 112 }
109 } 113 }
110} 114}
111 115
112static int __iop_intr(struct hptiop_hba *hba) 116static int iop_intr_itl(struct hptiop_hba *hba)
113{ 117{
114 struct hpt_iopmu __iomem *iop = hba->iop; 118 struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
115 u32 status; 119 u32 status;
116 int ret = 0; 120 int ret = 0;
117 121
@@ -119,6 +123,7 @@ static int __iop_intr(struct hptiop_hba *hba)
119 123
120 if (status & IOPMU_OUTBOUND_INT_MSG0) { 124 if (status & IOPMU_OUTBOUND_INT_MSG0) {
121 u32 msg = readl(&iop->outbound_msgaddr0); 125 u32 msg = readl(&iop->outbound_msgaddr0);
126
122 dprintk("received outbound msg %x\n", msg); 127 dprintk("received outbound msg %x\n", msg);
123 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus); 128 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
124 hptiop_message_callback(hba, msg); 129 hptiop_message_callback(hba, msg);
@@ -126,31 +131,115 @@ static int __iop_intr(struct hptiop_hba *hba)
126 } 131 }
127 132
128 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { 133 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
129 hptiop_drain_outbound_queue(hba); 134 hptiop_drain_outbound_queue_itl(hba);
135 ret = 1;
136 }
137
138 return ret;
139}
140
141static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
142{
143 u32 outbound_tail = readl(&mu->outbound_tail);
144 u32 outbound_head = readl(&mu->outbound_head);
145
146 if (outbound_tail != outbound_head) {
147 u64 p;
148
149 memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
150 outbound_tail++;
151
152 if (outbound_tail == MVIOP_QUEUE_LEN)
153 outbound_tail = 0;
154 writel(outbound_tail, &mu->outbound_tail);
155 return p;
156 } else
157 return 0;
158}
159
160static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
161{
162 u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
163 u32 head = inbound_head + 1;
164
165 if (head == MVIOP_QUEUE_LEN)
166 head = 0;
167
168 memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
169 writel(head, &hba->u.mv.mu->inbound_head);
170 writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
171 &hba->u.mv.regs->inbound_doorbell);
172}
173
174static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
175{
176 u32 req_type = (tag >> 5) & 0x7;
177 struct hpt_iop_request_scsi_command *req;
178
179 dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
180
181 BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
182
183 switch (req_type) {
184 case IOP_REQUEST_TYPE_GET_CONFIG:
185 case IOP_REQUEST_TYPE_SET_CONFIG:
186 hba->msg_done = 1;
187 break;
188
189 case IOP_REQUEST_TYPE_SCSI_COMMAND:
190 req = hba->reqs[tag >> 8].req_virt;
191 if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
192 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
193
194 hptiop_finish_scsi_req(hba, tag>>8, req);
195 break;
196
197 default:
198 break;
199 }
200}
201
202static int iop_intr_mv(struct hptiop_hba *hba)
203{
204 u32 status;
205 int ret = 0;
206
207 status = readl(&hba->u.mv.regs->outbound_doorbell);
208 writel(~status, &hba->u.mv.regs->outbound_doorbell);
209
210 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
211 u32 msg;
212 msg = readl(&hba->u.mv.mu->outbound_msg);
213 dprintk("received outbound msg %x\n", msg);
214 hptiop_message_callback(hba, msg);
215 ret = 1;
216 }
217
218 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
219 u64 tag;
220
221 while ((tag = mv_outbound_read(hba->u.mv.mu)))
222 hptiop_request_callback_mv(hba, tag);
130 ret = 1; 223 ret = 1;
131 } 224 }
132 225
133 return ret; 226 return ret;
134} 227}
135 228
136static int iop_send_sync_request(struct hptiop_hba *hba, 229static int iop_send_sync_request_itl(struct hptiop_hba *hba,
137 void __iomem *_req, u32 millisec) 230 void __iomem *_req, u32 millisec)
138{ 231{
139 struct hpt_iop_request_header __iomem *req = _req; 232 struct hpt_iop_request_header __iomem *req = _req;
140 u32 i; 233 u32 i;
141 234
142 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, 235 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
143 &req->flags);
144
145 writel(0, &req->context); 236 writel(0, &req->context);
146 237 writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
147 writel((unsigned long)req - (unsigned long)hba->iop, 238 &hba->u.itl.iop->inbound_queue);
148 &hba->iop->inbound_queue); 239 readl(&hba->u.itl.iop->outbound_intstatus);
149
150 hptiop_pci_posting_flush(hba->iop);
151 240
152 for (i = 0; i < millisec; i++) { 241 for (i = 0; i < millisec; i++) {
153 __iop_intr(hba); 242 iop_intr_itl(hba);
154 if (readl(&req->context)) 243 if (readl(&req->context))
155 return 0; 244 return 0;
156 msleep(1); 245 msleep(1);
@@ -159,19 +248,49 @@ static int iop_send_sync_request(struct hptiop_hba *hba,
159 return -1; 248 return -1;
160} 249}
161 250
162static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) 251static int iop_send_sync_request_mv(struct hptiop_hba *hba,
252 u32 size_bits, u32 millisec)
163{ 253{
254 struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
164 u32 i; 255 u32 i;
165 256
166 hba->msg_done = 0; 257 hba->msg_done = 0;
258 reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
259 mv_inbound_write(hba->u.mv.internal_req_phy |
260 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
261
262 for (i = 0; i < millisec; i++) {
263 iop_intr_mv(hba);
264 if (hba->msg_done)
265 return 0;
266 msleep(1);
267 }
268 return -1;
269}
270
271static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
272{
273 writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
274 readl(&hba->u.itl.iop->outbound_intstatus);
275}
276
277static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
278{
279 writel(msg, &hba->u.mv.mu->inbound_msg);
280 writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
281 readl(&hba->u.mv.regs->inbound_doorbell);
282}
167 283
168 writel(msg, &hba->iop->inbound_msgaddr0); 284static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
285{
286 u32 i;
169 287
170 hptiop_pci_posting_flush(hba->iop); 288 hba->msg_done = 0;
289 hba->ops->post_msg(hba, msg);
171 290
172 for (i = 0; i < millisec; i++) { 291 for (i = 0; i < millisec; i++) {
173 spin_lock_irq(hba->host->host_lock); 292 spin_lock_irq(hba->host->host_lock);
174 __iop_intr(hba); 293 hba->ops->iop_intr(hba);
175 spin_unlock_irq(hba->host->host_lock); 294 spin_unlock_irq(hba->host->host_lock);
176 if (hba->msg_done) 295 if (hba->msg_done)
177 break; 296 break;
@@ -181,46 +300,67 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
181 return hba->msg_done? 0 : -1; 300 return hba->msg_done? 0 : -1;
182} 301}
183 302
184static int iop_get_config(struct hptiop_hba *hba, 303static int iop_get_config_itl(struct hptiop_hba *hba,
185 struct hpt_iop_request_get_config *config) 304 struct hpt_iop_request_get_config *config)
186{ 305{
187 u32 req32; 306 u32 req32;
188 struct hpt_iop_request_get_config __iomem *req; 307 struct hpt_iop_request_get_config __iomem *req;
189 308
190 req32 = readl(&hba->iop->inbound_queue); 309 req32 = readl(&hba->u.itl.iop->inbound_queue);
191 if (req32 == IOPMU_QUEUE_EMPTY) 310 if (req32 == IOPMU_QUEUE_EMPTY)
192 return -1; 311 return -1;
193 312
194 req = (struct hpt_iop_request_get_config __iomem *) 313 req = (struct hpt_iop_request_get_config __iomem *)
195 ((unsigned long)hba->iop + req32); 314 ((unsigned long)hba->u.itl.iop + req32);
196 315
197 writel(0, &req->header.flags); 316 writel(0, &req->header.flags);
198 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); 317 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
199 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); 318 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
200 writel(IOP_RESULT_PENDING, &req->header.result); 319 writel(IOP_RESULT_PENDING, &req->header.result);
201 320
202 if (iop_send_sync_request(hba, req, 20000)) { 321 if (iop_send_sync_request_itl(hba, req, 20000)) {
203 dprintk("Get config send cmd failed\n"); 322 dprintk("Get config send cmd failed\n");
204 return -1; 323 return -1;
205 } 324 }
206 325
207 memcpy_fromio(config, req, sizeof(*config)); 326 memcpy_fromio(config, req, sizeof(*config));
208 writel(req32, &hba->iop->outbound_queue); 327 writel(req32, &hba->u.itl.iop->outbound_queue);
328 return 0;
329}
330
331static int iop_get_config_mv(struct hptiop_hba *hba,
332 struct hpt_iop_request_get_config *config)
333{
334 struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
335
336 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
337 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
338 req->header.size =
339 cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
340 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
341 req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_GET_CONFIG<<5);
342
343 if (iop_send_sync_request_mv(hba, 0, 20000)) {
344 dprintk("Get config send cmd failed\n");
345 return -1;
346 }
347
348 memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
209 return 0; 349 return 0;
210} 350}
211 351
212static int iop_set_config(struct hptiop_hba *hba, 352static int iop_set_config_itl(struct hptiop_hba *hba,
213 struct hpt_iop_request_set_config *config) 353 struct hpt_iop_request_set_config *config)
214{ 354{
215 u32 req32; 355 u32 req32;
216 struct hpt_iop_request_set_config __iomem *req; 356 struct hpt_iop_request_set_config __iomem *req;
217 357
218 req32 = readl(&hba->iop->inbound_queue); 358 req32 = readl(&hba->u.itl.iop->inbound_queue);
219 if (req32 == IOPMU_QUEUE_EMPTY) 359 if (req32 == IOPMU_QUEUE_EMPTY)
220 return -1; 360 return -1;
221 361
222 req = (struct hpt_iop_request_set_config __iomem *) 362 req = (struct hpt_iop_request_set_config __iomem *)
223 ((unsigned long)hba->iop + req32); 363 ((unsigned long)hba->u.itl.iop + req32);
224 364
225 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), 365 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
226 (u8 *)config + sizeof(struct hpt_iop_request_header), 366 (u8 *)config + sizeof(struct hpt_iop_request_header),
@@ -232,22 +372,52 @@ static int iop_set_config(struct hptiop_hba *hba,
232 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); 372 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
233 writel(IOP_RESULT_PENDING, &req->header.result); 373 writel(IOP_RESULT_PENDING, &req->header.result);
234 374
235 if (iop_send_sync_request(hba, req, 20000)) { 375 if (iop_send_sync_request_itl(hba, req, 20000)) {
236 dprintk("Set config send cmd failed\n"); 376 dprintk("Set config send cmd failed\n");
237 return -1; 377 return -1;
238 } 378 }
239 379
240 writel(req32, &hba->iop->outbound_queue); 380 writel(req32, &hba->u.itl.iop->outbound_queue);
241 return 0; 381 return 0;
242} 382}
243 383
244static int hptiop_initialize_iop(struct hptiop_hba *hba) 384static int iop_set_config_mv(struct hptiop_hba *hba,
385 struct hpt_iop_request_set_config *config)
245{ 386{
246 struct hpt_iopmu __iomem *iop = hba->iop; 387 struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
247 388
248 /* enable interrupts */ 389 memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
390 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
391 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
392 req->header.size =
393 cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
394 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
395 req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_SET_CONFIG<<5);
396
397 if (iop_send_sync_request_mv(hba, 0, 20000)) {
398 dprintk("Set config send cmd failed\n");
399 return -1;
400 }
401
402 return 0;
403}
404
405static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
406{
249 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), 407 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
250 &iop->outbound_intmask); 408 &hba->u.itl.iop->outbound_intmask);
409}
410
411static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
412{
413 writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
414 &hba->u.mv.regs->outbound_intmask);
415}
416
417static int hptiop_initialize_iop(struct hptiop_hba *hba)
418{
419 /* enable interrupts */
420 hba->ops->enable_intr(hba);
251 421
252 hba->initialized = 1; 422 hba->initialized = 1;
253 423
@@ -261,37 +431,74 @@ static int hptiop_initialize_iop(struct hptiop_hba *hba)
261 return 0; 431 return 0;
262} 432}
263 433
264static int hptiop_map_pci_bar(struct hptiop_hba *hba) 434static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
265{ 435{
266 u32 mem_base_phy, length; 436 u32 mem_base_phy, length;
267 void __iomem *mem_base_virt; 437 void __iomem *mem_base_virt;
438
268 struct pci_dev *pcidev = hba->pcidev; 439 struct pci_dev *pcidev = hba->pcidev;
269 440
270 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) { 441
442 if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
271 printk(KERN_ERR "scsi%d: pci resource invalid\n", 443 printk(KERN_ERR "scsi%d: pci resource invalid\n",
272 hba->host->host_no); 444 hba->host->host_no);
273 return -1; 445 return 0;
274 } 446 }
275 447
276 mem_base_phy = pci_resource_start(pcidev, 0); 448 mem_base_phy = pci_resource_start(pcidev, index);
277 length = pci_resource_len(pcidev, 0); 449 length = pci_resource_len(pcidev, index);
278 mem_base_virt = ioremap(mem_base_phy, length); 450 mem_base_virt = ioremap(mem_base_phy, length);
279 451
280 if (!mem_base_virt) { 452 if (!mem_base_virt) {
281 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", 453 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
282 hba->host->host_no); 454 hba->host->host_no);
455 return 0;
456 }
457 return mem_base_virt;
458}
459
460static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
461{
462 hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
463 if (hba->u.itl.iop)
464 return 0;
465 else
466 return -1;
467}
468
469static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
470{
471 iounmap(hba->u.itl.iop);
472}
473
474static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
475{
476 hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
477 if (hba->u.mv.regs == 0)
478 return -1;
479
480 hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
481 if (hba->u.mv.mu == 0) {
482 iounmap(hba->u.mv.regs);
283 return -1; 483 return -1;
284 } 484 }
285 485
286 hba->iop = mem_base_virt;
287 dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
288 return 0; 486 return 0;
289} 487}
290 488
489static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
490{
491 iounmap(hba->u.mv.regs);
492 iounmap(hba->u.mv.mu);
493}
494
291static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) 495static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
292{ 496{
293 dprintk("iop message 0x%x\n", msg); 497 dprintk("iop message 0x%x\n", msg);
294 498
499 if (msg == IOPMU_INBOUND_MSG0_NOP)
500 hba->msg_done = 1;
501
295 if (!hba->initialized) 502 if (!hba->initialized)
296 return; 503 return;
297 504
@@ -303,7 +510,7 @@ static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
303 hba->msg_done = 1; 510 hba->msg_done = 1;
304} 511}
305 512
306static inline struct hptiop_request *get_req(struct hptiop_hba *hba) 513static struct hptiop_request *get_req(struct hptiop_hba *hba)
307{ 514{
308 struct hptiop_request *ret; 515 struct hptiop_request *ret;
309 516
@@ -316,30 +523,19 @@ static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
316 return ret; 523 return ret;
317} 524}
318 525
319static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req) 526static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
320{ 527{
321 dprintk("free_req(%d, %p)\n", req->index, req); 528 dprintk("free_req(%d, %p)\n", req->index, req);
322 req->next = hba->req_list; 529 req->next = hba->req_list;
323 hba->req_list = req; 530 hba->req_list = req;
324} 531}
325 532
326static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag) 533static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
534 struct hpt_iop_request_scsi_command *req)
327{ 535{
328 struct hpt_iop_request_scsi_command *req;
329 struct scsi_cmnd *scp; 536 struct scsi_cmnd *scp;
330 u32 tag;
331
332 if (hba->iopintf_v2) {
333 tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;
334 req = hba->reqs[tag].req_virt;
335 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
336 req->header.result = IOP_RESULT_SUCCESS;
337 } else {
338 tag = _tag;
339 req = hba->reqs[tag].req_virt;
340 }
341 537
342 dprintk("hptiop_host_request_callback: req=%p, type=%d, " 538 dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
343 "result=%d, context=0x%x tag=%d\n", 539 "result=%d, context=0x%x tag=%d\n",
344 req, req->header.type, req->header.result, 540 req, req->header.type, req->header.result,
345 req->header.context, tag); 541 req->header.context, tag);
@@ -354,6 +550,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
354 550
355 switch (le32_to_cpu(req->header.result)) { 551 switch (le32_to_cpu(req->header.result)) {
356 case IOP_RESULT_SUCCESS: 552 case IOP_RESULT_SUCCESS:
553 scsi_set_resid(scp,
554 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
357 scp->result = (DID_OK<<16); 555 scp->result = (DID_OK<<16);
358 break; 556 break;
359 case IOP_RESULT_BAD_TARGET: 557 case IOP_RESULT_BAD_TARGET:
@@ -371,12 +569,12 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
371 case IOP_RESULT_INVALID_REQUEST: 569 case IOP_RESULT_INVALID_REQUEST:
372 scp->result = (DID_ABORT<<16); 570 scp->result = (DID_ABORT<<16);
373 break; 571 break;
374 case IOP_RESULT_MODE_SENSE_CHECK_CONDITION: 572 case IOP_RESULT_CHECK_CONDITION:
573 scsi_set_resid(scp,
574 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
375 scp->result = SAM_STAT_CHECK_CONDITION; 575 scp->result = SAM_STAT_CHECK_CONDITION;
376 memset(&scp->sense_buffer,
377 0, sizeof(scp->sense_buffer));
378 memcpy(&scp->sense_buffer, &req->sg_list, 576 memcpy(&scp->sense_buffer, &req->sg_list,
379 min(sizeof(scp->sense_buffer), 577 min_t(size_t, sizeof(scp->sense_buffer),
380 le32_to_cpu(req->dataxfer_length))); 578 le32_to_cpu(req->dataxfer_length)));
381 break; 579 break;
382 580
@@ -391,15 +589,33 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
391 free_req(hba, &hba->reqs[tag]); 589 free_req(hba, &hba->reqs[tag]);
392} 590}
393 591
394void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag) 592static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
593{
594 struct hpt_iop_request_scsi_command *req;
595 u32 tag;
596
597 if (hba->iopintf_v2) {
598 tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
599 req = hba->reqs[tag].req_virt;
600 if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
601 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
602 } else {
603 tag = _tag;
604 req = hba->reqs[tag].req_virt;
605 }
606
607 hptiop_finish_scsi_req(hba, tag, req);
608}
609
610void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
395{ 611{
396 struct hpt_iop_request_header __iomem *req; 612 struct hpt_iop_request_header __iomem *req;
397 struct hpt_iop_request_ioctl_command __iomem *p; 613 struct hpt_iop_request_ioctl_command __iomem *p;
398 struct hpt_ioctl_k *arg; 614 struct hpt_ioctl_k *arg;
399 615
400 req = (struct hpt_iop_request_header __iomem *) 616 req = (struct hpt_iop_request_header __iomem *)
401 ((unsigned long)hba->iop + tag); 617 ((unsigned long)hba->u.itl.iop + tag);
402 dprintk("hptiop_iop_request_callback: req=%p, type=%d, " 618 dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
403 "result=%d, context=0x%x tag=%d\n", 619 "result=%d, context=0x%x tag=%d\n",
404 req, readl(&req->type), readl(&req->result), 620 req, readl(&req->type), readl(&req->result),
405 readl(&req->context), tag); 621 readl(&req->context), tag);
@@ -427,7 +643,7 @@ void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
427 arg->result = HPT_IOCTL_RESULT_FAILED; 643 arg->result = HPT_IOCTL_RESULT_FAILED;
428 644
429 arg->done(arg); 645 arg->done(arg);
430 writel(tag, &hba->iop->outbound_queue); 646 writel(tag, &hba->u.itl.iop->outbound_queue);
431} 647}
432 648
433static irqreturn_t hptiop_intr(int irq, void *dev_id) 649static irqreturn_t hptiop_intr(int irq, void *dev_id)
@@ -437,7 +653,7 @@ static irqreturn_t hptiop_intr(int irq, void *dev_id)
437 unsigned long flags; 653 unsigned long flags;
438 654
439 spin_lock_irqsave(hba->host->host_lock, flags); 655 spin_lock_irqsave(hba->host->host_lock, flags);
440 handled = __iop_intr(hba); 656 handled = hba->ops->iop_intr(hba);
441 spin_unlock_irqrestore(hba->host->host_lock, flags); 657 spin_unlock_irqrestore(hba->host->host_lock, flags);
442 658
443 return handled; 659 return handled;
@@ -469,6 +685,57 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
469 return HPT_SCP(scp)->sgcnt; 685 return HPT_SCP(scp)->sgcnt;
470} 686}
471 687
688static void hptiop_post_req_itl(struct hptiop_hba *hba,
689 struct hptiop_request *_req)
690{
691 struct hpt_iop_request_header *reqhdr = _req->req_virt;
692
693 reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
694 (u32)_req->index);
695 reqhdr->context_hi32 = 0;
696
697 if (hba->iopintf_v2) {
698 u32 size, size_bits;
699
700 size = le32_to_cpu(reqhdr->size);
701 if (size < 256)
702 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
703 else if (size < 512)
704 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
705 else
706 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
707 IOPMU_QUEUE_ADDR_HOST_BIT;
708 writel(_req->req_shifted_phy | size_bits,
709 &hba->u.itl.iop->inbound_queue);
710 } else
711 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
712 &hba->u.itl.iop->inbound_queue);
713}
714
715static void hptiop_post_req_mv(struct hptiop_hba *hba,
716 struct hptiop_request *_req)
717{
718 struct hpt_iop_request_header *reqhdr = _req->req_virt;
719 u32 size, size_bit;
720
721 reqhdr->context = cpu_to_le32(_req->index<<8 |
722 IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
723 reqhdr->context_hi32 = 0;
724 size = le32_to_cpu(reqhdr->size);
725
726 if (size <= 256)
727 size_bit = 0;
728 else if (size <= 256*2)
729 size_bit = 1;
730 else if (size <= 256*3)
731 size_bit = 2;
732 else
733 size_bit = 3;
734
735 mv_inbound_write((_req->req_shifted_phy << 5) |
736 MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
737}
738
472static int hptiop_queuecommand(struct scsi_cmnd *scp, 739static int hptiop_queuecommand(struct scsi_cmnd *scp,
473 void (*done)(struct scsi_cmnd *)) 740 void (*done)(struct scsi_cmnd *))
474{ 741{
@@ -518,9 +785,6 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
518 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); 785 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
519 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); 786 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
520 req->header.result = cpu_to_le32(IOP_RESULT_PENDING); 787 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
521 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
522 (u32)_req->index);
523 req->header.context_hi32 = 0;
524 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); 788 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
525 req->channel = scp->device->channel; 789 req->channel = scp->device->channel;
526 req->target = scp->device->id; 790 req->target = scp->device->id;
@@ -531,21 +795,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
531 + sg_count * sizeof(struct hpt_iopsg)); 795 + sg_count * sizeof(struct hpt_iopsg));
532 796
533 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); 797 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
534 798 hba->ops->post_req(hba, _req);
535 if (hba->iopintf_v2) {
536 u32 size_bits;
537 if (req->header.size < 256)
538 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
539 else if (req->header.size < 512)
540 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
541 else
542 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
543 IOPMU_QUEUE_ADDR_HOST_BIT;
544 writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue);
545 } else
546 writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
547 &hba->iop->inbound_queue);
548
549 return 0; 799 return 0;
550 800
551cmd_done: 801cmd_done:
@@ -563,9 +813,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
563{ 813{
564 if (atomic_xchg(&hba->resetting, 1) == 0) { 814 if (atomic_xchg(&hba->resetting, 1) == 0) {
565 atomic_inc(&hba->reset_count); 815 atomic_inc(&hba->reset_count);
566 writel(IOPMU_INBOUND_MSG0_RESET, 816 hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
567 &hba->iop->inbound_msgaddr0);
568 hptiop_pci_posting_flush(hba->iop);
569 } 817 }
570 818
571 wait_event_timeout(hba->reset_wq, 819 wait_event_timeout(hba->reset_wq,
@@ -601,8 +849,10 @@ static int hptiop_reset(struct scsi_cmnd *scp)
601static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, 849static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
602 int queue_depth) 850 int queue_depth)
603{ 851{
604 if(queue_depth > 256) 852 struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
605 queue_depth = 256; 853
854 if (queue_depth > hba->max_requests)
855 queue_depth = hba->max_requests;
606 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); 856 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
607 return queue_depth; 857 return queue_depth;
608} 858}
@@ -663,6 +913,26 @@ static struct scsi_host_template driver_template = {
663 .change_queue_depth = hptiop_adjust_disk_queue_depth, 913 .change_queue_depth = hptiop_adjust_disk_queue_depth,
664}; 914};
665 915
916static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
917{
918 hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
919 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
920 if (hba->u.mv.internal_req)
921 return 0;
922 else
923 return -1;
924}
925
926static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
927{
928 if (hba->u.mv.internal_req) {
929 dma_free_coherent(&hba->pcidev->dev, 0x800,
930 hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
931 return 0;
932 } else
933 return -1;
934}
935
666static int __devinit hptiop_probe(struct pci_dev *pcidev, 936static int __devinit hptiop_probe(struct pci_dev *pcidev,
667 const struct pci_device_id *id) 937 const struct pci_device_id *id)
668{ 938{
@@ -708,6 +978,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
708 978
709 hba = (struct hptiop_hba *)host->hostdata; 979 hba = (struct hptiop_hba *)host->hostdata;
710 980
981 hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
711 hba->pcidev = pcidev; 982 hba->pcidev = pcidev;
712 hba->host = host; 983 hba->host = host;
713 hba->initialized = 0; 984 hba->initialized = 0;
@@ -725,16 +996,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
725 host->n_io_port = 0; 996 host->n_io_port = 0;
726 host->irq = pcidev->irq; 997 host->irq = pcidev->irq;
727 998
728 if (hptiop_map_pci_bar(hba)) 999 if (hba->ops->map_pci_bar(hba))
729 goto free_scsi_host; 1000 goto free_scsi_host;
730 1001
731 if (iop_wait_ready(hba->iop, 20000)) { 1002 if (hba->ops->iop_wait_ready(hba, 20000)) {
732 printk(KERN_ERR "scsi%d: firmware not ready\n", 1003 printk(KERN_ERR "scsi%d: firmware not ready\n",
733 hba->host->host_no); 1004 hba->host->host_no);
734 goto unmap_pci_bar; 1005 goto unmap_pci_bar;
735 } 1006 }
736 1007
737 if (iop_get_config(hba, &iop_config)) { 1008 if (hba->ops->internal_memalloc) {
1009 if (hba->ops->internal_memalloc(hba)) {
1010 printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
1011 hba->host->host_no);
1012 goto unmap_pci_bar;
1013 }
1014 }
1015
1016 if (hba->ops->get_config(hba, &iop_config)) {
738 printk(KERN_ERR "scsi%d: get config failed\n", 1017 printk(KERN_ERR "scsi%d: get config failed\n",
739 hba->host->host_no); 1018 hba->host->host_no);
740 goto unmap_pci_bar; 1019 goto unmap_pci_bar;
@@ -770,7 +1049,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
770 set_config.vbus_id = cpu_to_le16(host->host_no); 1049 set_config.vbus_id = cpu_to_le16(host->host_no);
771 set_config.max_host_request_size = cpu_to_le16(req_size); 1050 set_config.max_host_request_size = cpu_to_le16(req_size);
772 1051
773 if (iop_set_config(hba, &set_config)) { 1052 if (hba->ops->set_config(hba, &set_config)) {
774 printk(KERN_ERR "scsi%d: set config failed\n", 1053 printk(KERN_ERR "scsi%d: set config failed\n",
775 hba->host->host_no); 1054 hba->host->host_no);
776 goto unmap_pci_bar; 1055 goto unmap_pci_bar;
@@ -839,21 +1118,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
839 1118
840free_request_mem: 1119free_request_mem:
841 dma_free_coherent(&hba->pcidev->dev, 1120 dma_free_coherent(&hba->pcidev->dev,
842 hba->req_size*hba->max_requests + 0x20, 1121 hba->req_size * hba->max_requests + 0x20,
843 hba->dma_coherent, hba->dma_coherent_handle); 1122 hba->dma_coherent, hba->dma_coherent_handle);
844 1123
845free_request_irq: 1124free_request_irq:
846 free_irq(hba->pcidev->irq, hba); 1125 free_irq(hba->pcidev->irq, hba);
847 1126
848unmap_pci_bar: 1127unmap_pci_bar:
849 iounmap(hba->iop); 1128 if (hba->ops->internal_memfree)
1129 hba->ops->internal_memfree(hba);
850 1130
851free_pci_regions: 1131 hba->ops->unmap_pci_bar(hba);
852 pci_release_regions(pcidev) ;
853 1132
854free_scsi_host: 1133free_scsi_host:
855 scsi_host_put(host); 1134 scsi_host_put(host);
856 1135
1136free_pci_regions:
1137 pci_release_regions(pcidev);
1138
857disable_pci_device: 1139disable_pci_device:
858 pci_disable_device(pcidev); 1140 pci_disable_device(pcidev);
859 1141
@@ -865,8 +1147,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
865{ 1147{
866 struct Scsi_Host *host = pci_get_drvdata(pcidev); 1148 struct Scsi_Host *host = pci_get_drvdata(pcidev);
867 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 1149 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
868 struct hpt_iopmu __iomem *iop = hba->iop;
869 u32 int_mask;
870 1150
871 dprintk("hptiop_shutdown(%p)\n", hba); 1151 dprintk("hptiop_shutdown(%p)\n", hba);
872 1152
@@ -876,11 +1156,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
876 hba->host->host_no); 1156 hba->host->host_no);
877 1157
878 /* disable all outbound interrupts */ 1158 /* disable all outbound interrupts */
879 int_mask = readl(&iop->outbound_intmask); 1159 hba->ops->disable_intr(hba);
1160}
1161
1162static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
1163{
1164 u32 int_mask;
1165
1166 int_mask = readl(&hba->u.itl.iop->outbound_intmask);
880 writel(int_mask | 1167 writel(int_mask |
881 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, 1168 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
882 &iop->outbound_intmask); 1169 &hba->u.itl.iop->outbound_intmask);
883 hptiop_pci_posting_flush(iop); 1170 readl(&hba->u.itl.iop->outbound_intmask);
1171}
1172
1173static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
1174{
1175 writel(0, &hba->u.mv.regs->outbound_intmask);
1176 readl(&hba->u.mv.regs->outbound_intmask);
884} 1177}
885 1178
886static void hptiop_remove(struct pci_dev *pcidev) 1179static void hptiop_remove(struct pci_dev *pcidev)
@@ -901,7 +1194,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
901 hba->dma_coherent, 1194 hba->dma_coherent,
902 hba->dma_coherent_handle); 1195 hba->dma_coherent_handle);
903 1196
904 iounmap(hba->iop); 1197 if (hba->ops->internal_memfree)
1198 hba->ops->internal_memfree(hba);
1199
1200 hba->ops->unmap_pci_bar(hba);
905 1201
906 pci_release_regions(hba->pcidev); 1202 pci_release_regions(hba->pcidev);
907 pci_set_drvdata(hba->pcidev, NULL); 1203 pci_set_drvdata(hba->pcidev, NULL);
@@ -910,11 +1206,50 @@ static void hptiop_remove(struct pci_dev *pcidev)
910 scsi_host_put(host); 1206 scsi_host_put(host);
911} 1207}
912 1208
1209static struct hptiop_adapter_ops hptiop_itl_ops = {
1210 .iop_wait_ready = iop_wait_ready_itl,
1211 .internal_memalloc = 0,
1212 .internal_memfree = 0,
1213 .map_pci_bar = hptiop_map_pci_bar_itl,
1214 .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
1215 .enable_intr = hptiop_enable_intr_itl,
1216 .disable_intr = hptiop_disable_intr_itl,
1217 .get_config = iop_get_config_itl,
1218 .set_config = iop_set_config_itl,
1219 .iop_intr = iop_intr_itl,
1220 .post_msg = hptiop_post_msg_itl,
1221 .post_req = hptiop_post_req_itl,
1222};
1223
1224static struct hptiop_adapter_ops hptiop_mv_ops = {
1225 .iop_wait_ready = iop_wait_ready_mv,
1226 .internal_memalloc = hptiop_internal_memalloc_mv,
1227 .internal_memfree = hptiop_internal_memfree_mv,
1228 .map_pci_bar = hptiop_map_pci_bar_mv,
1229 .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
1230 .enable_intr = hptiop_enable_intr_mv,
1231 .disable_intr = hptiop_disable_intr_mv,
1232 .get_config = iop_get_config_mv,
1233 .set_config = iop_set_config_mv,
1234 .iop_intr = iop_intr_mv,
1235 .post_msg = hptiop_post_msg_mv,
1236 .post_req = hptiop_post_req_mv,
1237};
1238
913static struct pci_device_id hptiop_id_table[] = { 1239static struct pci_device_id hptiop_id_table[] = {
914 { PCI_VDEVICE(TTI, 0x3220) }, 1240 { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
915 { PCI_VDEVICE(TTI, 0x3320) }, 1241 { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
916 { PCI_VDEVICE(TTI, 0x3520) }, 1242 { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
917 { PCI_VDEVICE(TTI, 0x4320) }, 1243 { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
1244 { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
1245 { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
1246 { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
1247 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1248 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1249 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1250 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1251 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1252 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
918 {}, 1253 {},
919}; 1254};
920 1255
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 2a5e46e001cb..a0289f219752 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * HighPoint RR3xxx controller driver for Linux 2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved. 3 * Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -18,8 +18,7 @@
18#ifndef _HPTIOP_H_ 18#ifndef _HPTIOP_H_
19#define _HPTIOP_H_ 19#define _HPTIOP_H_
20 20
21struct hpt_iopmu 21struct hpt_iopmu_itl {
22{
23 __le32 resrved0[4]; 22 __le32 resrved0[4];
24 __le32 inbound_msgaddr0; 23 __le32 inbound_msgaddr0;
25 __le32 inbound_msgaddr1; 24 __le32 inbound_msgaddr1;
@@ -54,6 +53,40 @@ struct hpt_iopmu
54#define IOPMU_INBOUND_INT_ERROR 8 53#define IOPMU_INBOUND_INT_ERROR 8
55#define IOPMU_INBOUND_INT_POSTQUEUE 0x10 54#define IOPMU_INBOUND_INT_POSTQUEUE 0x10
56 55
56#define MVIOP_QUEUE_LEN 512
57
58struct hpt_iopmu_mv {
59 __le32 inbound_head;
60 __le32 inbound_tail;
61 __le32 outbound_head;
62 __le32 outbound_tail;
63 __le32 inbound_msg;
64 __le32 outbound_msg;
65 __le32 reserve[10];
66 __le64 inbound_q[MVIOP_QUEUE_LEN];
67 __le64 outbound_q[MVIOP_QUEUE_LEN];
68};
69
70struct hpt_iopmv_regs {
71 __le32 reserved[0x20400 / 4];
72 __le32 inbound_doorbell;
73 __le32 inbound_intmask;
74 __le32 outbound_doorbell;
75 __le32 outbound_intmask;
76};
77
78#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
79#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
80
81#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff
82#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1
83#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
84
85#define MVIOP_MU_INBOUND_INT_MSG 1
86#define MVIOP_MU_INBOUND_INT_POSTQUEUE 2
87#define MVIOP_MU_OUTBOUND_INT_MSG 1
88#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
89
57enum hpt_iopmu_message { 90enum hpt_iopmu_message {
58 /* host-to-iop messages */ 91 /* host-to-iop messages */
59 IOPMU_INBOUND_MSG0_NOP = 0, 92 IOPMU_INBOUND_MSG0_NOP = 0,
@@ -72,8 +105,7 @@ enum hpt_iopmu_message {
72 IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff, 105 IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
73}; 106};
74 107
75struct hpt_iop_request_header 108struct hpt_iop_request_header {
76{
77 __le32 size; 109 __le32 size;
78 __le32 type; 110 __le32 type;
79 __le32 flags; 111 __le32 flags;
@@ -104,11 +136,10 @@ enum hpt_iop_result_type {
104 IOP_RESULT_RESET, 136 IOP_RESULT_RESET,
105 IOP_RESULT_INVALID_REQUEST, 137 IOP_RESULT_INVALID_REQUEST,
106 IOP_RESULT_BAD_TARGET, 138 IOP_RESULT_BAD_TARGET,
107 IOP_RESULT_MODE_SENSE_CHECK_CONDITION, 139 IOP_RESULT_CHECK_CONDITION,
108}; 140};
109 141
110struct hpt_iop_request_get_config 142struct hpt_iop_request_get_config {
111{
112 struct hpt_iop_request_header header; 143 struct hpt_iop_request_header header;
113 __le32 interface_version; 144 __le32 interface_version;
114 __le32 firmware_version; 145 __le32 firmware_version;
@@ -121,8 +152,7 @@ struct hpt_iop_request_get_config
121 __le32 sdram_size; 152 __le32 sdram_size;
122}; 153};
123 154
124struct hpt_iop_request_set_config 155struct hpt_iop_request_set_config {
125{
126 struct hpt_iop_request_header header; 156 struct hpt_iop_request_header header;
127 __le32 iop_id; 157 __le32 iop_id;
128 __le16 vbus_id; 158 __le16 vbus_id;
@@ -130,15 +160,13 @@ struct hpt_iop_request_set_config
130 __le32 reserve[6]; 160 __le32 reserve[6];
131}; 161};
132 162
133struct hpt_iopsg 163struct hpt_iopsg {
134{
135 __le32 size; 164 __le32 size;
136 __le32 eot; /* non-zero: end of table */ 165 __le32 eot; /* non-zero: end of table */
137 __le64 pci_address; 166 __le64 pci_address;
138}; 167};
139 168
140struct hpt_iop_request_block_command 169struct hpt_iop_request_block_command {
141{
142 struct hpt_iop_request_header header; 170 struct hpt_iop_request_header header;
143 u8 channel; 171 u8 channel;
144 u8 target; 172 u8 target;
@@ -156,8 +184,7 @@ struct hpt_iop_request_block_command
156#define IOP_BLOCK_COMMAND_FLUSH 4 184#define IOP_BLOCK_COMMAND_FLUSH 4
157#define IOP_BLOCK_COMMAND_SHUTDOWN 5 185#define IOP_BLOCK_COMMAND_SHUTDOWN 5
158 186
159struct hpt_iop_request_scsi_command 187struct hpt_iop_request_scsi_command {
160{
161 struct hpt_iop_request_header header; 188 struct hpt_iop_request_header header;
162 u8 channel; 189 u8 channel;
163 u8 target; 190 u8 target;
@@ -168,8 +195,7 @@ struct hpt_iop_request_scsi_command
168 struct hpt_iopsg sg_list[1]; 195 struct hpt_iopsg sg_list[1];
169}; 196};
170 197
171struct hpt_iop_request_ioctl_command 198struct hpt_iop_request_ioctl_command {
172{
173 struct hpt_iop_request_header header; 199 struct hpt_iop_request_header header;
174 __le32 ioctl_code; 200 __le32 ioctl_code;
175 __le32 inbuf_size; 201 __le32 inbuf_size;
@@ -182,11 +208,11 @@ struct hpt_iop_request_ioctl_command
182#define HPTIOP_MAX_REQUESTS 256u 208#define HPTIOP_MAX_REQUESTS 256u
183 209
184struct hptiop_request { 210struct hptiop_request {
185 struct hptiop_request * next; 211 struct hptiop_request *next;
186 void * req_virt; 212 void *req_virt;
187 u32 req_shifted_phy; 213 u32 req_shifted_phy;
188 struct scsi_cmnd * scp; 214 struct scsi_cmnd *scp;
189 int index; 215 int index;
190}; 216};
191 217
192struct hpt_scsi_pointer { 218struct hpt_scsi_pointer {
@@ -198,9 +224,21 @@ struct hpt_scsi_pointer {
198#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp) 224#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
199 225
200struct hptiop_hba { 226struct hptiop_hba {
201 struct hpt_iopmu __iomem * iop; 227 struct hptiop_adapter_ops *ops;
202 struct Scsi_Host * host; 228 union {
203 struct pci_dev * pcidev; 229 struct {
230 struct hpt_iopmu_itl __iomem *iop;
231 } itl;
232 struct {
233 struct hpt_iopmv_regs *regs;
234 struct hpt_iopmu_mv __iomem *mu;
235 void *internal_req;
236 dma_addr_t internal_req_phy;
237 } mv;
238 } u;
239
240 struct Scsi_Host *host;
241 struct pci_dev *pcidev;
204 242
205 /* IOP config info */ 243 /* IOP config info */
206 u32 interface_version; 244 u32 interface_version;
@@ -213,15 +251,15 @@ struct hptiop_hba {
213 251
214 u32 req_size; /* host-allocated request buffer size */ 252 u32 req_size; /* host-allocated request buffer size */
215 253
216 int iopintf_v2: 1; 254 u32 iopintf_v2: 1;
217 int initialized: 1; 255 u32 initialized: 1;
218 int msg_done: 1; 256 u32 msg_done: 1;
219 257
220 struct hptiop_request * req_list; 258 struct hptiop_request * req_list;
221 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS]; 259 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
222 260
223 /* used to free allocated dma area */ 261 /* used to free allocated dma area */
224 void * dma_coherent; 262 void *dma_coherent;
225 dma_addr_t dma_coherent_handle; 263 dma_addr_t dma_coherent_handle;
226 264
227 atomic_t reset_count; 265 atomic_t reset_count;
@@ -231,19 +269,35 @@ struct hptiop_hba {
231 wait_queue_head_t ioctl_wq; 269 wait_queue_head_t ioctl_wq;
232}; 270};
233 271
234struct hpt_ioctl_k 272struct hpt_ioctl_k {
235{
236 struct hptiop_hba * hba; 273 struct hptiop_hba * hba;
237 u32 ioctl_code; 274 u32 ioctl_code;
238 u32 inbuf_size; 275 u32 inbuf_size;
239 u32 outbuf_size; 276 u32 outbuf_size;
240 void * inbuf; 277 void *inbuf;
241 void * outbuf; 278 void *outbuf;
242 u32 * bytes_returned; 279 u32 *bytes_returned;
243 void (*done)(struct hpt_ioctl_k *); 280 void (*done)(struct hpt_ioctl_k *);
244 int result; /* HPT_IOCTL_RESULT_ */ 281 int result; /* HPT_IOCTL_RESULT_ */
245}; 282};
246 283
284struct hptiop_adapter_ops {
285 int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
286 int (*internal_memalloc)(struct hptiop_hba *hba);
287 int (*internal_memfree)(struct hptiop_hba *hba);
288 int (*map_pci_bar)(struct hptiop_hba *hba);
289 void (*unmap_pci_bar)(struct hptiop_hba *hba);
290 void (*enable_intr)(struct hptiop_hba *hba);
291 void (*disable_intr)(struct hptiop_hba *hba);
292 int (*get_config)(struct hptiop_hba *hba,
293 struct hpt_iop_request_get_config *config);
294 int (*set_config)(struct hptiop_hba *hba,
295 struct hpt_iop_request_set_config *config);
296 int (*iop_intr)(struct hptiop_hba *hba);
297 void (*post_msg)(struct hptiop_hba *hba, u32 msg);
298 void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
299};
300
247#define HPT_IOCTL_RESULT_OK 0 301#define HPT_IOCTL_RESULT_OK 0
248#define HPT_IOCTL_RESULT_FAILED (-1) 302#define HPT_IOCTL_RESULT_FAILED (-1)
249 303