diff options
Diffstat (limited to 'drivers/scsi/aic94xx/aic94xx_hwi.c')
-rw-r--r-- | drivers/scsi/aic94xx/aic94xx_hwi.c | 1376 |
1 files changed, 1376 insertions, 0 deletions
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c new file mode 100644 index 000000000000..a24201351108 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c | |||
@@ -0,0 +1,1376 @@ | |||
1 | /* | ||
2 | * Aic94xx SAS/SATA driver hardware interface. | ||
3 | * | ||
4 | * Copyright (C) 2005 Adaptec, Inc. All rights reserved. | ||
5 | * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> | ||
6 | * | ||
7 | * This file is licensed under GPLv2. | ||
8 | * | ||
9 | * This file is part of the aic94xx driver. | ||
10 | * | ||
11 | * The aic94xx driver is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License as | ||
13 | * published by the Free Software Foundation; version 2 of the | ||
14 | * License. | ||
15 | * | ||
16 | * The aic94xx driver is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with the aic94xx driver; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/pci.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/module.h> | ||
30 | |||
31 | #include "aic94xx.h" | ||
32 | #include "aic94xx_reg.h" | ||
33 | #include "aic94xx_hwi.h" | ||
34 | #include "aic94xx_seq.h" | ||
35 | #include "aic94xx_dump.h" | ||
36 | |||
37 | u32 MBAR0_SWB_SIZE; | ||
38 | |||
39 | /* ---------- Initialization ---------- */ | ||
40 | |||
41 | static void asd_get_user_sas_addr(struct asd_ha_struct *asd_ha) | ||
42 | { | ||
43 | extern char sas_addr_str[]; | ||
44 | /* If the user has specified a WWN it overrides other settings | ||
45 | */ | ||
46 | if (sas_addr_str[0] != '\0') | ||
47 | asd_destringify_sas_addr(asd_ha->hw_prof.sas_addr, | ||
48 | sas_addr_str); | ||
49 | else if (asd_ha->hw_prof.sas_addr[0] != 0) | ||
50 | asd_stringify_sas_addr(sas_addr_str, asd_ha->hw_prof.sas_addr); | ||
51 | } | ||
52 | |||
53 | static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha) | ||
54 | { | ||
55 | int i; | ||
56 | |||
57 | for (i = 0; i < ASD_MAX_PHYS; i++) { | ||
58 | if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0) | ||
59 | continue; | ||
60 | /* Set a phy's address only if it has none. | ||
61 | */ | ||
62 | ASD_DPRINTK("setting phy%d addr to %llx\n", i, | ||
63 | SAS_ADDR(asd_ha->hw_prof.sas_addr)); | ||
64 | memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, | ||
65 | asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); | ||
66 | } | ||
67 | } | ||
68 | |||
69 | /* ---------- PHY initialization ---------- */ | ||
70 | |||
71 | static void asd_init_phy_identify(struct asd_phy *phy) | ||
72 | { | ||
73 | phy->identify_frame = phy->id_frm_tok->vaddr; | ||
74 | |||
75 | memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); | ||
76 | |||
77 | phy->identify_frame->dev_type = SAS_END_DEV; | ||
78 | if (phy->sas_phy.role & PHY_ROLE_INITIATOR) | ||
79 | phy->identify_frame->initiator_bits = phy->sas_phy.iproto; | ||
80 | if (phy->sas_phy.role & PHY_ROLE_TARGET) | ||
81 | phy->identify_frame->target_bits = phy->sas_phy.tproto; | ||
82 | memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr, | ||
83 | SAS_ADDR_SIZE); | ||
84 | phy->identify_frame->phy_id = phy->sas_phy.id; | ||
85 | } | ||
86 | |||
87 | static int asd_init_phy(struct asd_phy *phy) | ||
88 | { | ||
89 | struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; | ||
90 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | ||
91 | |||
92 | sas_phy->enabled = 1; | ||
93 | sas_phy->class = SAS; | ||
94 | sas_phy->iproto = SAS_PROTO_ALL; | ||
95 | sas_phy->tproto = 0; | ||
96 | sas_phy->type = PHY_TYPE_PHYSICAL; | ||
97 | sas_phy->role = PHY_ROLE_INITIATOR; | ||
98 | sas_phy->oob_mode = OOB_NOT_CONNECTED; | ||
99 | sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; | ||
100 | |||
101 | phy->id_frm_tok = asd_alloc_coherent(asd_ha, | ||
102 | sizeof(*phy->identify_frame), | ||
103 | GFP_KERNEL); | ||
104 | if (!phy->id_frm_tok) { | ||
105 | asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id); | ||
106 | return -ENOMEM; | ||
107 | } else | ||
108 | asd_init_phy_identify(phy); | ||
109 | |||
110 | memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd)); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int asd_init_phys(struct asd_ha_struct *asd_ha) | ||
116 | { | ||
117 | u8 i; | ||
118 | u8 phy_mask = asd_ha->hw_prof.enabled_phys; | ||
119 | |||
120 | for (i = 0; i < ASD_MAX_PHYS; i++) { | ||
121 | struct asd_phy *phy = &asd_ha->phys[i]; | ||
122 | |||
123 | phy->phy_desc = &asd_ha->hw_prof.phy_desc[i]; | ||
124 | |||
125 | phy->sas_phy.enabled = 0; | ||
126 | phy->sas_phy.id = i; | ||
127 | phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0]; | ||
128 | phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0]; | ||
129 | phy->sas_phy.ha = &asd_ha->sas_ha; | ||
130 | phy->sas_phy.lldd_phy = phy; | ||
131 | } | ||
132 | |||
133 | /* Now enable and initialize only the enabled phys. */ | ||
134 | for_each_phy(phy_mask, phy_mask, i) { | ||
135 | int err = asd_init_phy(&asd_ha->phys[i]); | ||
136 | if (err) | ||
137 | return err; | ||
138 | } | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | /* ---------- Sliding windows ---------- */ | ||
144 | |||
145 | static int asd_init_sw(struct asd_ha_struct *asd_ha) | ||
146 | { | ||
147 | struct pci_dev *pcidev = asd_ha->pcidev; | ||
148 | int err; | ||
149 | u32 v; | ||
150 | |||
151 | /* Unlock MBARs */ | ||
152 | err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v); | ||
153 | if (err) { | ||
154 | asd_printk("couldn't access conf. space of %s\n", | ||
155 | pci_name(pcidev)); | ||
156 | goto Err; | ||
157 | } | ||
158 | if (v) | ||
159 | err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v); | ||
160 | if (err) { | ||
161 | asd_printk("couldn't write to MBAR_KEY of %s\n", | ||
162 | pci_name(pcidev)); | ||
163 | goto Err; | ||
164 | } | ||
165 | |||
166 | /* Set sliding windows A, B and C to point to proper internal | ||
167 | * memory regions. | ||
168 | */ | ||
169 | pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR); | ||
170 | pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB, | ||
171 | REG_BASE_ADDR_CSEQCIO); | ||
172 | pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI); | ||
173 | asd_ha->io_handle[0].swa_base = REG_BASE_ADDR; | ||
174 | asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO; | ||
175 | asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI; | ||
176 | MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80; | ||
177 | if (!asd_ha->iospace) { | ||
178 | /* MBAR1 will point to OCM (On Chip Memory) */ | ||
179 | pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR); | ||
180 | asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR; | ||
181 | } | ||
182 | spin_lock_init(&asd_ha->iolock); | ||
183 | Err: | ||
184 | return err; | ||
185 | } | ||
186 | |||
187 | /* ---------- SCB initialization ---------- */ | ||
188 | |||
189 | /** | ||
190 | * asd_init_scbs - manually allocate the first SCB. | ||
191 | * @asd_ha: pointer to host adapter structure | ||
192 | * | ||
193 | * This allocates the very first SCB which would be sent to the | ||
194 | * sequencer for execution. Its bus address is written to | ||
195 | * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of | ||
196 | * the _next_ scb to be DMA-ed to the host adapter is read from the last | ||
197 | * SCB DMA-ed to the host adapter, we have to always stay one step | ||
198 | * ahead of the sequencer and keep one SCB already allocated. | ||
199 | */ | ||
200 | static int asd_init_scbs(struct asd_ha_struct *asd_ha) | ||
201 | { | ||
202 | struct asd_seq_data *seq = &asd_ha->seq; | ||
203 | int bitmap_bytes; | ||
204 | |||
205 | /* allocate the index array and bitmap */ | ||
206 | asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs; | ||
207 | asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits* | ||
208 | sizeof(void *), GFP_KERNEL); | ||
209 | if (!asd_ha->seq.tc_index_array) | ||
210 | return -ENOMEM; | ||
211 | |||
212 | bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8; | ||
213 | bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); | ||
214 | asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); | ||
215 | if (!asd_ha->seq.tc_index_bitmap) | ||
216 | return -ENOMEM; | ||
217 | |||
218 | spin_lock_init(&seq->tc_index_lock); | ||
219 | |||
220 | seq->next_scb.size = sizeof(struct scb); | ||
221 | seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL, | ||
222 | &seq->next_scb.dma_handle); | ||
223 | if (!seq->next_scb.vaddr) { | ||
224 | kfree(asd_ha->seq.tc_index_bitmap); | ||
225 | kfree(asd_ha->seq.tc_index_array); | ||
226 | asd_ha->seq.tc_index_bitmap = NULL; | ||
227 | asd_ha->seq.tc_index_array = NULL; | ||
228 | return -ENOMEM; | ||
229 | } | ||
230 | |||
231 | seq->pending = 0; | ||
232 | spin_lock_init(&seq->pend_q_lock); | ||
233 | INIT_LIST_HEAD(&seq->pend_q); | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static inline void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha) | ||
239 | { | ||
240 | asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE; | ||
241 | asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE; | ||
242 | ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n", | ||
243 | asd_ha->hw_prof.max_scbs, | ||
244 | asd_ha->hw_prof.max_ddbs); | ||
245 | } | ||
246 | |||
247 | /* ---------- Done List initialization ---------- */ | ||
248 | |||
249 | static void asd_dl_tasklet_handler(unsigned long); | ||
250 | |||
251 | static int asd_init_dl(struct asd_ha_struct *asd_ha) | ||
252 | { | ||
253 | asd_ha->seq.actual_dl | ||
254 | = asd_alloc_coherent(asd_ha, | ||
255 | ASD_DL_SIZE * sizeof(struct done_list_struct), | ||
256 | GFP_KERNEL); | ||
257 | if (!asd_ha->seq.actual_dl) | ||
258 | return -ENOMEM; | ||
259 | asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr; | ||
260 | asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE; | ||
261 | asd_ha->seq.dl_next = 0; | ||
262 | tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler, | ||
263 | (unsigned long) asd_ha); | ||
264 | |||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | /* ---------- EDB and ESCB init ---------- */ | ||
269 | |||
270 | static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, unsigned int gfp_flags) | ||
271 | { | ||
272 | struct asd_seq_data *seq = &asd_ha->seq; | ||
273 | int i; | ||
274 | |||
275 | seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags); | ||
276 | if (!seq->edb_arr) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | for (i = 0; i < seq->num_edbs; i++) { | ||
280 | seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE, | ||
281 | gfp_flags); | ||
282 | if (!seq->edb_arr[i]) | ||
283 | goto Err_unroll; | ||
284 | memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE); | ||
285 | } | ||
286 | |||
287 | ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs); | ||
288 | |||
289 | return 0; | ||
290 | |||
291 | Err_unroll: | ||
292 | for (i-- ; i >= 0; i--) | ||
293 | asd_free_coherent(asd_ha, seq->edb_arr[i]); | ||
294 | kfree(seq->edb_arr); | ||
295 | seq->edb_arr = NULL; | ||
296 | |||
297 | return -ENOMEM; | ||
298 | } | ||
299 | |||
300 | static int asd_alloc_escbs(struct asd_ha_struct *asd_ha, | ||
301 | unsigned int gfp_flags) | ||
302 | { | ||
303 | struct asd_seq_data *seq = &asd_ha->seq; | ||
304 | struct asd_ascb *escb; | ||
305 | int i, escbs; | ||
306 | |||
307 | seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr), | ||
308 | gfp_flags); | ||
309 | if (!seq->escb_arr) | ||
310 | return -ENOMEM; | ||
311 | |||
312 | escbs = seq->num_escbs; | ||
313 | escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags); | ||
314 | if (!escb) { | ||
315 | asd_printk("couldn't allocate list of escbs\n"); | ||
316 | goto Err; | ||
317 | } | ||
318 | seq->num_escbs -= escbs; /* subtract what was not allocated */ | ||
319 | ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs); | ||
320 | |||
321 | for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next, | ||
322 | struct asd_ascb, | ||
323 | list)) { | ||
324 | seq->escb_arr[i] = escb; | ||
325 | escb->scb->header.opcode = EMPTY_SCB; | ||
326 | } | ||
327 | |||
328 | return 0; | ||
329 | Err: | ||
330 | kfree(seq->escb_arr); | ||
331 | seq->escb_arr = NULL; | ||
332 | return -ENOMEM; | ||
333 | |||
334 | } | ||
335 | |||
336 | static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha) | ||
337 | { | ||
338 | struct asd_seq_data *seq = &asd_ha->seq; | ||
339 | int i, k, z = 0; | ||
340 | |||
341 | for (i = 0; i < seq->num_escbs; i++) { | ||
342 | struct asd_ascb *ascb = seq->escb_arr[i]; | ||
343 | struct empty_scb *escb = &ascb->scb->escb; | ||
344 | |||
345 | ascb->edb_index = z; | ||
346 | |||
347 | escb->num_valid = ASD_EDBS_PER_SCB; | ||
348 | |||
349 | for (k = 0; k < ASD_EDBS_PER_SCB; k++) { | ||
350 | struct sg_el *eb = &escb->eb[k]; | ||
351 | struct asd_dma_tok *edb = seq->edb_arr[z++]; | ||
352 | |||
353 | memset(eb, 0, sizeof(*eb)); | ||
354 | eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); | ||
355 | eb->size = cpu_to_le32(((u32) edb->size)); | ||
356 | } | ||
357 | } | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * asd_init_escbs -- allocate and initialize empty scbs | ||
362 | * @asd_ha: pointer to host adapter structure | ||
363 | * | ||
364 | * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers. | ||
365 | * They transport sense data, etc. | ||
366 | */ | ||
367 | static int asd_init_escbs(struct asd_ha_struct *asd_ha) | ||
368 | { | ||
369 | struct asd_seq_data *seq = &asd_ha->seq; | ||
370 | int err = 0; | ||
371 | |||
372 | /* Allocate two empty data buffers (edb) per sequencer. */ | ||
373 | int edbs = 2*(1+asd_ha->hw_prof.num_phys); | ||
374 | |||
375 | seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB; | ||
376 | seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB; | ||
377 | |||
378 | err = asd_alloc_edbs(asd_ha, GFP_KERNEL); | ||
379 | if (err) { | ||
380 | asd_printk("couldn't allocate edbs\n"); | ||
381 | return err; | ||
382 | } | ||
383 | |||
384 | err = asd_alloc_escbs(asd_ha, GFP_KERNEL); | ||
385 | if (err) { | ||
386 | asd_printk("couldn't allocate escbs\n"); | ||
387 | return err; | ||
388 | } | ||
389 | |||
390 | asd_assign_edbs2escbs(asd_ha); | ||
391 | /* In order to insure that normal SCBs do not overfill sequencer | ||
392 | * memory and leave no space for escbs (halting condition), | ||
393 | * we increment pending here by the number of escbs. However, | ||
394 | * escbs are never pending. | ||
395 | */ | ||
396 | seq->pending = seq->num_escbs; | ||
397 | seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2; | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | /* ---------- HW initialization ---------- */ | ||
403 | |||
404 | /** | ||
405 | * asd_chip_hardrst -- hard reset the chip | ||
406 | * @asd_ha: pointer to host adapter structure | ||
407 | * | ||
408 | * This takes 16 cycles and is synchronous to CFCLK, which runs | ||
409 | * at 200 MHz, so this should take at most 80 nanoseconds. | ||
410 | */ | ||
411 | int asd_chip_hardrst(struct asd_ha_struct *asd_ha) | ||
412 | { | ||
413 | int i; | ||
414 | int count = 100; | ||
415 | u32 reg; | ||
416 | |||
417 | for (i = 0 ; i < 4 ; i++) { | ||
418 | asd_write_reg_dword(asd_ha, COMBIST, HARDRST); | ||
419 | } | ||
420 | |||
421 | do { | ||
422 | udelay(1); | ||
423 | reg = asd_read_reg_dword(asd_ha, CHIMINT); | ||
424 | if (reg & HARDRSTDET) { | ||
425 | asd_write_reg_dword(asd_ha, CHIMINT, | ||
426 | HARDRSTDET|PORRSTDET); | ||
427 | return 0; | ||
428 | } | ||
429 | } while (--count > 0); | ||
430 | |||
431 | return -ENODEV; | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | * asd_init_chip -- initialize the chip | ||
436 | * @asd_ha: pointer to host adapter structure | ||
437 | * | ||
438 | * Hard resets the chip, disables HA interrupts, downloads the sequnecer | ||
439 | * microcode and starts the sequencers. The caller has to explicitly | ||
440 | * enable HA interrupts with asd_enable_ints(asd_ha). | ||
441 | */ | ||
442 | static int asd_init_chip(struct asd_ha_struct *asd_ha) | ||
443 | { | ||
444 | int err; | ||
445 | |||
446 | err = asd_chip_hardrst(asd_ha); | ||
447 | if (err) { | ||
448 | asd_printk("couldn't hard reset %s\n", | ||
449 | pci_name(asd_ha->pcidev)); | ||
450 | goto out; | ||
451 | } | ||
452 | |||
453 | asd_disable_ints(asd_ha); | ||
454 | |||
455 | err = asd_init_seqs(asd_ha); | ||
456 | if (err) { | ||
457 | asd_printk("couldn't init seqs for %s\n", | ||
458 | pci_name(asd_ha->pcidev)); | ||
459 | goto out; | ||
460 | } | ||
461 | |||
462 | err = asd_start_seqs(asd_ha); | ||
463 | if (err) { | ||
464 | asd_printk("coudln't start seqs for %s\n", | ||
465 | pci_name(asd_ha->pcidev)); | ||
466 | goto out; | ||
467 | } | ||
468 | out: | ||
469 | return err; | ||
470 | } | ||
471 | |||
472 | #define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE)) | ||
473 | |||
474 | static int max_devs = 0; | ||
475 | module_param_named(max_devs, max_devs, int, S_IRUGO); | ||
476 | MODULE_PARM_DESC(max_devs, "\n" | ||
477 | "\tMaximum number of SAS devices to support (not LUs).\n" | ||
478 | "\tDefault: 2176, Maximum: 65663.\n"); | ||
479 | |||
480 | static int max_cmnds = 0; | ||
481 | module_param_named(max_cmnds, max_cmnds, int, S_IRUGO); | ||
482 | MODULE_PARM_DESC(max_cmnds, "\n" | ||
483 | "\tMaximum number of commands queuable.\n" | ||
484 | "\tDefault: 512, Maximum: 66047.\n"); | ||
485 | |||
486 | static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha) | ||
487 | { | ||
488 | unsigned long dma_addr = OCM_BASE_ADDR; | ||
489 | u32 d; | ||
490 | |||
491 | dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; | ||
492 | asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr); | ||
493 | d = asd_read_reg_dword(asd_ha, CTXDOMAIN); | ||
494 | d |= 4; | ||
495 | asd_write_reg_dword(asd_ha, CTXDOMAIN, d); | ||
496 | asd_ha->hw_prof.max_ddbs += MAX_DEVS; | ||
497 | } | ||
498 | |||
499 | static int asd_extend_devctx(struct asd_ha_struct *asd_ha) | ||
500 | { | ||
501 | dma_addr_t dma_handle; | ||
502 | unsigned long dma_addr; | ||
503 | u32 d; | ||
504 | int size; | ||
505 | |||
506 | asd_extend_devctx_ocm(asd_ha); | ||
507 | |||
508 | asd_ha->hw_prof.ddb_ext = NULL; | ||
509 | if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) { | ||
510 | max_devs = asd_ha->hw_prof.max_ddbs; | ||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE; | ||
515 | |||
516 | asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); | ||
517 | if (!asd_ha->hw_prof.ddb_ext) { | ||
518 | asd_printk("couldn't allocate memory for %d devices\n", | ||
519 | max_devs); | ||
520 | max_devs = asd_ha->hw_prof.max_ddbs; | ||
521 | return -ENOMEM; | ||
522 | } | ||
523 | dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle; | ||
524 | dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE); | ||
525 | dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; | ||
526 | dma_handle = (dma_addr_t) dma_addr; | ||
527 | asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle); | ||
528 | d = asd_read_reg_dword(asd_ha, CTXDOMAIN); | ||
529 | d &= ~4; | ||
530 | asd_write_reg_dword(asd_ha, CTXDOMAIN, d); | ||
531 | |||
532 | asd_ha->hw_prof.max_ddbs = max_devs; | ||
533 | |||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha) | ||
538 | { | ||
539 | dma_addr_t dma_handle; | ||
540 | unsigned long dma_addr; | ||
541 | u32 d; | ||
542 | int size; | ||
543 | |||
544 | asd_ha->hw_prof.scb_ext = NULL; | ||
545 | if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) { | ||
546 | max_cmnds = asd_ha->hw_prof.max_scbs; | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE; | ||
551 | |||
552 | asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); | ||
553 | if (!asd_ha->hw_prof.scb_ext) { | ||
554 | asd_printk("couldn't allocate memory for %d commands\n", | ||
555 | max_cmnds); | ||
556 | max_cmnds = asd_ha->hw_prof.max_scbs; | ||
557 | return -ENOMEM; | ||
558 | } | ||
559 | dma_handle = asd_ha->hw_prof.scb_ext->dma_handle; | ||
560 | dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE); | ||
561 | dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE; | ||
562 | dma_handle = (dma_addr_t) dma_addr; | ||
563 | asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle); | ||
564 | d = asd_read_reg_dword(asd_ha, CTXDOMAIN); | ||
565 | d &= ~1; | ||
566 | asd_write_reg_dword(asd_ha, CTXDOMAIN, d); | ||
567 | |||
568 | asd_ha->hw_prof.max_scbs = max_cmnds; | ||
569 | |||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | /** | ||
574 | * asd_init_ctxmem -- initialize context memory | ||
575 | * asd_ha: pointer to host adapter structure | ||
576 | * | ||
577 | * This function sets the maximum number of SCBs and | ||
578 | * DDBs which can be used by the sequencer. This is normally | ||
579 | * 512 and 128 respectively. If support for more SCBs or more DDBs | ||
580 | * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are | ||
581 | * initialized here to extend context memory to point to host memory, | ||
582 | * thus allowing unlimited support for SCBs and DDBs -- only limited | ||
583 | * by host memory. | ||
584 | */ | ||
585 | static int asd_init_ctxmem(struct asd_ha_struct *asd_ha) | ||
586 | { | ||
587 | int bitmap_bytes; | ||
588 | |||
589 | asd_get_max_scb_ddb(asd_ha); | ||
590 | asd_extend_devctx(asd_ha); | ||
591 | asd_extend_cmdctx(asd_ha); | ||
592 | |||
593 | /* The kernel wants bitmaps to be unsigned long sized. */ | ||
594 | bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8; | ||
595 | bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); | ||
596 | asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); | ||
597 | if (!asd_ha->hw_prof.ddb_bitmap) | ||
598 | return -ENOMEM; | ||
599 | spin_lock_init(&asd_ha->hw_prof.ddb_lock); | ||
600 | |||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | int asd_init_hw(struct asd_ha_struct *asd_ha) | ||
605 | { | ||
606 | int err; | ||
607 | u32 v; | ||
608 | |||
609 | err = asd_init_sw(asd_ha); | ||
610 | if (err) | ||
611 | return err; | ||
612 | |||
613 | err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v); | ||
614 | if (err) { | ||
615 | asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n", | ||
616 | pci_name(asd_ha->pcidev)); | ||
617 | return err; | ||
618 | } | ||
619 | pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, | ||
620 | v | SC_TMR_DIS); | ||
621 | if (err) { | ||
622 | asd_printk("couldn't disable split completion timer of %s\n", | ||
623 | pci_name(asd_ha->pcidev)); | ||
624 | return err; | ||
625 | } | ||
626 | |||
627 | err = asd_read_ocm(asd_ha); | ||
628 | if (err) { | ||
629 | asd_printk("couldn't read ocm(%d)\n", err); | ||
630 | /* While suspicios, it is not an error that we | ||
631 | * couldn't read the OCM. */ | ||
632 | } | ||
633 | |||
634 | err = asd_read_flash(asd_ha); | ||
635 | if (err) { | ||
636 | asd_printk("couldn't read flash(%d)\n", err); | ||
637 | /* While suspicios, it is not an error that we | ||
638 | * couldn't read FLASH memory. | ||
639 | */ | ||
640 | } | ||
641 | |||
642 | asd_init_ctxmem(asd_ha); | ||
643 | |||
644 | asd_get_user_sas_addr(asd_ha); | ||
645 | if (!asd_ha->hw_prof.sas_addr[0]) { | ||
646 | asd_printk("No SAS Address provided for %s\n", | ||
647 | pci_name(asd_ha->pcidev)); | ||
648 | err = -ENODEV; | ||
649 | goto Out; | ||
650 | } | ||
651 | |||
652 | asd_propagate_sas_addr(asd_ha); | ||
653 | |||
654 | err = asd_init_phys(asd_ha); | ||
655 | if (err) { | ||
656 | asd_printk("couldn't initialize phys for %s\n", | ||
657 | pci_name(asd_ha->pcidev)); | ||
658 | goto Out; | ||
659 | } | ||
660 | |||
661 | err = asd_init_scbs(asd_ha); | ||
662 | if (err) { | ||
663 | asd_printk("couldn't initialize scbs for %s\n", | ||
664 | pci_name(asd_ha->pcidev)); | ||
665 | goto Out; | ||
666 | } | ||
667 | |||
668 | err = asd_init_dl(asd_ha); | ||
669 | if (err) { | ||
670 | asd_printk("couldn't initialize the done list:%d\n", | ||
671 | err); | ||
672 | goto Out; | ||
673 | } | ||
674 | |||
675 | err = asd_init_escbs(asd_ha); | ||
676 | if (err) { | ||
677 | asd_printk("couldn't initialize escbs\n"); | ||
678 | goto Out; | ||
679 | } | ||
680 | |||
681 | err = asd_init_chip(asd_ha); | ||
682 | if (err) { | ||
683 | asd_printk("couldn't init the chip\n"); | ||
684 | goto Out; | ||
685 | } | ||
686 | Out: | ||
687 | return err; | ||
688 | } | ||
689 | |||
690 | /* ---------- Chip reset ---------- */ | ||
691 | |||
692 | /** | ||
693 | * asd_chip_reset -- reset the host adapter, etc | ||
694 | * @asd_ha: pointer to host adapter structure of interest | ||
695 | * | ||
696 | * Called from the ISR. Hard reset the chip. Let everything | ||
697 | * timeout. This should be no different than hot-unplugging the | ||
698 | * host adapter. Once everything times out we'll init the chip with | ||
699 | * a call to asd_init_chip() and enable interrupts with asd_enable_ints(). | ||
700 | * XXX finish. | ||
701 | */ | ||
702 | static void asd_chip_reset(struct asd_ha_struct *asd_ha) | ||
703 | { | ||
704 | struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; | ||
705 | |||
706 | ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev)); | ||
707 | asd_chip_hardrst(asd_ha); | ||
708 | sas_ha->notify_ha_event(sas_ha, HAE_RESET); | ||
709 | } | ||
710 | |||
711 | /* ---------- Done List Routines ---------- */ | ||
712 | |||
713 | static void asd_dl_tasklet_handler(unsigned long data) | ||
714 | { | ||
715 | struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data; | ||
716 | struct asd_seq_data *seq = &asd_ha->seq; | ||
717 | unsigned long flags; | ||
718 | |||
719 | while (1) { | ||
720 | struct done_list_struct *dl = &seq->dl[seq->dl_next]; | ||
721 | struct asd_ascb *ascb; | ||
722 | |||
723 | if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle) | ||
724 | break; | ||
725 | |||
726 | /* find the aSCB */ | ||
727 | spin_lock_irqsave(&seq->tc_index_lock, flags); | ||
728 | ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index)); | ||
729 | spin_unlock_irqrestore(&seq->tc_index_lock, flags); | ||
730 | if (unlikely(!ascb)) { | ||
731 | ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n"); | ||
732 | goto next_1; | ||
733 | } else if (ascb->scb->header.opcode == EMPTY_SCB) { | ||
734 | goto out; | ||
735 | } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) { | ||
736 | goto next_1; | ||
737 | } | ||
738 | spin_lock_irqsave(&seq->pend_q_lock, flags); | ||
739 | list_del_init(&ascb->list); | ||
740 | seq->pending--; | ||
741 | spin_unlock_irqrestore(&seq->pend_q_lock, flags); | ||
742 | out: | ||
743 | ascb->tasklet_complete(ascb, dl); | ||
744 | |||
745 | next_1: | ||
746 | seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1); | ||
747 | if (!seq->dl_next) | ||
748 | seq->dl_toggle ^= DL_TOGGLE_MASK; | ||
749 | } | ||
750 | } | ||
751 | |||
752 | /* ---------- Interrupt Service Routines ---------- */ | ||
753 | |||
754 | /** | ||
755 | * asd_process_donelist_isr -- schedule processing of done list entries | ||
756 | * @asd_ha: pointer to host adapter structure | ||
757 | */ | ||
758 | static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) | ||
759 | { | ||
760 | tasklet_schedule(&asd_ha->seq.dl_tasklet); | ||
761 | } | ||
762 | |||
763 | /** | ||
764 | * asd_com_sas_isr -- process device communication interrupt (COMINT) | ||
765 | * @asd_ha: pointer to host adapter structure | ||
766 | */ | ||
767 | static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha) | ||
768 | { | ||
769 | u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT); | ||
770 | |||
771 | /* clear COMSTAT int */ | ||
772 | asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF); | ||
773 | |||
774 | if (comstat & CSBUFPERR) { | ||
775 | asd_printk("%s: command/status buffer dma parity error\n", | ||
776 | pci_name(asd_ha->pcidev)); | ||
777 | } else if (comstat & CSERR) { | ||
778 | int i; | ||
779 | u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); | ||
780 | dmaerr &= 0xFF; | ||
781 | asd_printk("%s: command/status dma error, DMAERR: 0x%02x, " | ||
782 | "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n", | ||
783 | pci_name(asd_ha->pcidev), | ||
784 | dmaerr, | ||
785 | asd_read_reg_dword(asd_ha, CSDMAADR), | ||
786 | asd_read_reg_dword(asd_ha, CSDMAADR+4)); | ||
787 | asd_printk("CSBUFFER:\n"); | ||
788 | for (i = 0; i < 8; i++) { | ||
789 | asd_printk("%08x %08x %08x %08x\n", | ||
790 | asd_read_reg_dword(asd_ha, CSBUFFER), | ||
791 | asd_read_reg_dword(asd_ha, CSBUFFER+4), | ||
792 | asd_read_reg_dword(asd_ha, CSBUFFER+8), | ||
793 | asd_read_reg_dword(asd_ha, CSBUFFER+12)); | ||
794 | } | ||
795 | asd_dump_seq_state(asd_ha, 0); | ||
796 | } else if (comstat & OVLYERR) { | ||
797 | u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); | ||
798 | dmaerr = (dmaerr >> 8) & 0xFF; | ||
799 | asd_printk("%s: overlay dma error:0x%x\n", | ||
800 | pci_name(asd_ha->pcidev), | ||
801 | dmaerr); | ||
802 | } | ||
803 | asd_chip_reset(asd_ha); | ||
804 | } | ||
805 | |||
806 | static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) | ||
807 | { | ||
808 | static const char *halt_code[256] = { | ||
809 | "UNEXPECTED_INTERRUPT0", | ||
810 | "UNEXPECTED_INTERRUPT1", | ||
811 | "UNEXPECTED_INTERRUPT2", | ||
812 | "UNEXPECTED_INTERRUPT3", | ||
813 | "UNEXPECTED_INTERRUPT4", | ||
814 | "UNEXPECTED_INTERRUPT5", | ||
815 | "UNEXPECTED_INTERRUPT6", | ||
816 | "UNEXPECTED_INTERRUPT7", | ||
817 | "UNEXPECTED_INTERRUPT8", | ||
818 | "UNEXPECTED_INTERRUPT9", | ||
819 | "UNEXPECTED_INTERRUPT10", | ||
820 | [11 ... 19] = "unknown[11,19]", | ||
821 | "NO_FREE_SCB_AVAILABLE", | ||
822 | "INVALID_SCB_OPCODE", | ||
823 | "INVALID_MBX_OPCODE", | ||
824 | "INVALID_ATA_STATE", | ||
825 | "ATA_QUEUE_FULL", | ||
826 | "ATA_TAG_TABLE_FAULT", | ||
827 | "ATA_TAG_MASK_FAULT", | ||
828 | "BAD_LINK_QUEUE_STATE", | ||
829 | "DMA2CHIM_QUEUE_ERROR", | ||
830 | "EMPTY_SCB_LIST_FULL", | ||
831 | "unknown[30]", | ||
832 | "IN_USE_SCB_ON_FREE_LIST", | ||
833 | "BAD_OPEN_WAIT_STATE", | ||
834 | "INVALID_STP_AFFILIATION", | ||
835 | "unknown[34]", | ||
836 | "EXEC_QUEUE_ERROR", | ||
837 | "TOO_MANY_EMPTIES_NEEDED", | ||
838 | "EMPTY_REQ_QUEUE_ERROR", | ||
839 | "Q_MONIRTT_MGMT_ERROR", | ||
840 | "TARGET_MODE_FLOW_ERROR", | ||
841 | "DEVICE_QUEUE_NOT_FOUND", | ||
842 | "START_IRTT_TIMER_ERROR", | ||
843 | "ABORT_TASK_ILLEGAL_REQ", | ||
844 | [43 ... 255] = "unknown[43,255]" | ||
845 | }; | ||
846 | |||
847 | if (dchstatus & CSEQINT) { | ||
848 | u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT); | ||
849 | |||
850 | if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) { | ||
851 | asd_printk("%s: CSEQ arp2int:0x%x\n", | ||
852 | pci_name(asd_ha->pcidev), | ||
853 | arp2int); | ||
854 | } else if (arp2int & ARP2HALTC) | ||
855 | asd_printk("%s: CSEQ halted: %s\n", | ||
856 | pci_name(asd_ha->pcidev), | ||
857 | halt_code[(arp2int>>16)&0xFF]); | ||
858 | else | ||
859 | asd_printk("%s: CARP2INT:0x%x\n", | ||
860 | pci_name(asd_ha->pcidev), | ||
861 | arp2int); | ||
862 | } | ||
863 | if (dchstatus & LSEQINT_MASK) { | ||
864 | int lseq; | ||
865 | u8 lseq_mask = dchstatus & LSEQINT_MASK; | ||
866 | |||
867 | for_each_sequencer(lseq_mask, lseq_mask, lseq) { | ||
868 | u32 arp2int = asd_read_reg_dword(asd_ha, | ||
869 | LmARP2INT(lseq)); | ||
870 | if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR | ||
871 | | ARP2CIOPERR)) { | ||
872 | asd_printk("%s: LSEQ%d arp2int:0x%x\n", | ||
873 | pci_name(asd_ha->pcidev), | ||
874 | lseq, arp2int); | ||
875 | /* XXX we should only do lseq reset */ | ||
876 | } else if (arp2int & ARP2HALTC) | ||
877 | asd_printk("%s: LSEQ%d halted: %s\n", | ||
878 | pci_name(asd_ha->pcidev), | ||
879 | lseq,halt_code[(arp2int>>16)&0xFF]); | ||
880 | else | ||
881 | asd_printk("%s: LSEQ%d ARP2INT:0x%x\n", | ||
882 | pci_name(asd_ha->pcidev), lseq, | ||
883 | arp2int); | ||
884 | } | ||
885 | } | ||
886 | asd_chip_reset(asd_ha); | ||
887 | } | ||
888 | |||
889 | /** | ||
890 | * asd_dch_sas_isr -- process device channel interrupt (DEVINT) | ||
891 | * @asd_ha: pointer to host adapter structure | ||
892 | */ | ||
893 | static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) | ||
894 | { | ||
895 | u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS); | ||
896 | |||
897 | if (dchstatus & CFIFTOERR) { | ||
898 | asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev)); | ||
899 | asd_chip_reset(asd_ha); | ||
900 | } else | ||
901 | asd_arp2_err(asd_ha, dchstatus); | ||
902 | } | ||
903 | |||
904 | /** | ||
905 | * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR) | ||
906 | * @asd_ha: pointer to host adapter structure | ||
907 | */ | ||
908 | static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) | ||
909 | { | ||
910 | u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R); | ||
911 | |||
912 | if (!(stat0r & ASIERR)) { | ||
913 | asd_printk("hmm, EXSI interrupted but no error?\n"); | ||
914 | return; | ||
915 | } | ||
916 | |||
917 | if (stat0r & ASIFMTERR) { | ||
918 | asd_printk("ASI SEEPROM format error for %s\n", | ||
919 | pci_name(asd_ha->pcidev)); | ||
920 | } else if (stat0r & ASISEECHKERR) { | ||
921 | u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R); | ||
922 | asd_printk("ASI SEEPROM checksum 0x%x error for %s\n", | ||
923 | stat1r & CHECKSUM_MASK, | ||
924 | pci_name(asd_ha->pcidev)); | ||
925 | } else { | ||
926 | u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR); | ||
927 | |||
928 | if (!(statr & CPI2ASIMSTERR_MASK)) { | ||
929 | ASD_DPRINTK("hmm, ASIERR?\n"); | ||
930 | return; | ||
931 | } else { | ||
932 | u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR); | ||
933 | u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR); | ||
934 | |||
935 | asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, " | ||
936 | "count: 0x%x, byteen: 0x%x, targerr: 0x%x " | ||
937 | "master id: 0x%x, master err: 0x%x\n", | ||
938 | pci_name(asd_ha->pcidev), | ||
939 | addr, data, | ||
940 | (statr & CPI2ASIBYTECNT_MASK) >> 16, | ||
941 | (statr & CPI2ASIBYTEEN_MASK) >> 12, | ||
942 | (statr & CPI2ASITARGERR_MASK) >> 8, | ||
943 | (statr & CPI2ASITARGMID_MASK) >> 4, | ||
944 | (statr & CPI2ASIMSTERR_MASK)); | ||
945 | } | ||
946 | } | ||
947 | asd_chip_reset(asd_ha); | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * asd_hst_pcix_isr -- process host interface interrupts | ||
952 | * @asd_ha: pointer to host adapter structure | ||
953 | * | ||
954 | * Asserted on PCIX errors: target abort, etc. | ||
955 | */ | ||
956 | static inline void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha) | ||
957 | { | ||
958 | u16 status; | ||
959 | u32 pcix_status; | ||
960 | u32 ecc_status; | ||
961 | |||
962 | pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status); | ||
963 | pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status); | ||
964 | pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status); | ||
965 | |||
966 | if (status & PCI_STATUS_DETECTED_PARITY) | ||
967 | asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev)); | ||
968 | else if (status & PCI_STATUS_REC_MASTER_ABORT) | ||
969 | asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev)); | ||
970 | else if (status & PCI_STATUS_REC_TARGET_ABORT) | ||
971 | asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev)); | ||
972 | else if (status & PCI_STATUS_PARITY) | ||
973 | asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev)); | ||
974 | else if (pcix_status & RCV_SCE) { | ||
975 | asd_printk("received split completion error for %s\n", | ||
976 | pci_name(asd_ha->pcidev)); | ||
977 | pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); | ||
978 | /* XXX: Abort task? */ | ||
979 | return; | ||
980 | } else if (pcix_status & UNEXP_SC) { | ||
981 | asd_printk("unexpected split completion for %s\n", | ||
982 | pci_name(asd_ha->pcidev)); | ||
983 | pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); | ||
984 | /* ignore */ | ||
985 | return; | ||
986 | } else if (pcix_status & SC_DISCARD) | ||
987 | asd_printk("split completion discarded for %s\n", | ||
988 | pci_name(asd_ha->pcidev)); | ||
989 | else if (ecc_status & UNCOR_ECCERR) | ||
990 | asd_printk("uncorrectable ECC error for %s\n", | ||
991 | pci_name(asd_ha->pcidev)); | ||
992 | asd_chip_reset(asd_ha); | ||
993 | } | ||
994 | |||
995 | /** | ||
996 | * asd_hw_isr -- host adapter interrupt service routine | ||
997 | * @irq: ignored | ||
998 | * @dev_id: pointer to host adapter structure | ||
999 | * @regs: ignored | ||
1000 | * | ||
1001 | * The ISR processes done list entries and level 3 error handling. | ||
1002 | */ | ||
1003 | irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs) | ||
1004 | { | ||
1005 | struct asd_ha_struct *asd_ha = dev_id; | ||
1006 | u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT); | ||
1007 | |||
1008 | if (!chimint) | ||
1009 | return IRQ_NONE; | ||
1010 | |||
1011 | asd_write_reg_dword(asd_ha, CHIMINT, chimint); | ||
1012 | (void) asd_read_reg_dword(asd_ha, CHIMINT); | ||
1013 | |||
1014 | if (chimint & DLAVAIL) | ||
1015 | asd_process_donelist_isr(asd_ha); | ||
1016 | if (chimint & COMINT) | ||
1017 | asd_com_sas_isr(asd_ha); | ||
1018 | if (chimint & DEVINT) | ||
1019 | asd_dch_sas_isr(asd_ha); | ||
1020 | if (chimint & INITERR) | ||
1021 | asd_rbi_exsi_isr(asd_ha); | ||
1022 | if (chimint & HOSTERR) | ||
1023 | asd_hst_pcix_isr(asd_ha); | ||
1024 | |||
1025 | return IRQ_HANDLED; | ||
1026 | } | ||
1027 | |||
1028 | /* ---------- SCB handling ---------- */ | ||
1029 | |||
1030 | static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, | ||
1031 | unsigned int gfp_flags) | ||
1032 | { | ||
1033 | extern kmem_cache_t *asd_ascb_cache; | ||
1034 | struct asd_seq_data *seq = &asd_ha->seq; | ||
1035 | struct asd_ascb *ascb; | ||
1036 | unsigned long flags; | ||
1037 | |||
1038 | ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags); | ||
1039 | |||
1040 | if (ascb) { | ||
1041 | memset(ascb, 0, sizeof(*ascb)); | ||
1042 | ascb->dma_scb.size = sizeof(struct scb); | ||
1043 | ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, | ||
1044 | gfp_flags, | ||
1045 | &ascb->dma_scb.dma_handle); | ||
1046 | if (!ascb->dma_scb.vaddr) { | ||
1047 | kmem_cache_free(asd_ascb_cache, ascb); | ||
1048 | return NULL; | ||
1049 | } | ||
1050 | memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb)); | ||
1051 | asd_init_ascb(asd_ha, ascb); | ||
1052 | |||
1053 | spin_lock_irqsave(&seq->tc_index_lock, flags); | ||
1054 | ascb->tc_index = asd_tc_index_get(seq, ascb); | ||
1055 | spin_unlock_irqrestore(&seq->tc_index_lock, flags); | ||
1056 | if (ascb->tc_index == -1) | ||
1057 | goto undo; | ||
1058 | |||
1059 | ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index); | ||
1060 | } | ||
1061 | |||
1062 | return ascb; | ||
1063 | undo: | ||
1064 | dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, | ||
1065 | ascb->dma_scb.dma_handle); | ||
1066 | kmem_cache_free(asd_ascb_cache, ascb); | ||
1067 | ASD_DPRINTK("no index for ascb\n"); | ||
1068 | return NULL; | ||
1069 | } | ||
1070 | |||
1071 | /** | ||
1072 | * asd_ascb_alloc_list -- allocate a list of aSCBs | ||
1073 | * @asd_ha: pointer to host adapter structure | ||
1074 | * @num: pointer to integer number of aSCBs | ||
1075 | * @gfp_flags: GFP_ flags. | ||
1076 | * | ||
1077 | * This is the only function which is used to allocate aSCBs. | ||
1078 | * It can allocate one or many. If more than one, then they form | ||
1079 | * a linked list in two ways: by their list field of the ascb struct | ||
1080 | * and by the next_scb field of the scb_header. | ||
1081 | * | ||
1082 | * Returns NULL if no memory was available, else pointer to a list | ||
1083 | * of ascbs. When this function returns, @num would be the number | ||
1084 | * of SCBs which were not able to be allocated, 0 if all requested | ||
1085 | * were able to be allocated. | ||
1086 | */ | ||
1087 | struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct | ||
1088 | *asd_ha, int *num, | ||
1089 | unsigned int gfp_flags) | ||
1090 | { | ||
1091 | struct asd_ascb *first = NULL; | ||
1092 | |||
1093 | for ( ; *num > 0; --*num) { | ||
1094 | struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags); | ||
1095 | |||
1096 | if (!ascb) | ||
1097 | break; | ||
1098 | else if (!first) | ||
1099 | first = ascb; | ||
1100 | else { | ||
1101 | struct asd_ascb *last = list_entry(first->list.prev, | ||
1102 | struct asd_ascb, | ||
1103 | list); | ||
1104 | list_add_tail(&ascb->list, &first->list); | ||
1105 | last->scb->header.next_scb = | ||
1106 | cpu_to_le64(((u64)ascb->dma_scb.dma_handle)); | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1110 | return first; | ||
1111 | } | ||
1112 | |||
1113 | /** | ||
1114 | * asd_swap_head_scb -- swap the head scb | ||
1115 | * @asd_ha: pointer to host adapter structure | ||
1116 | * @ascb: pointer to the head of an ascb list | ||
1117 | * | ||
1118 | * The sequencer knows the DMA address of the next SCB to be DMAed to | ||
1119 | * the host adapter, from initialization or from the last list DMAed. | ||
1120 | * seq->next_scb keeps the address of this SCB. The sequencer will | ||
1121 | * DMA to the host adapter this list of SCBs. But the head (first | ||
1122 | * element) of this list is not known to the sequencer. Here we swap | ||
1123 | * the head of the list with the known SCB (memcpy()). | ||
1124 | * Only one memcpy() is required per list so it is in our interest | ||
1125 | * to keep the list of SCB as long as possible so that the ratio | ||
1126 | * of number of memcpy calls to the number of SCB DMA-ed is as small | ||
1127 | * as possible. | ||
1128 | * | ||
1129 | * LOCKING: called with the pending list lock held. | ||
1130 | */ | ||
1131 | static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha, | ||
1132 | struct asd_ascb *ascb) | ||
1133 | { | ||
1134 | struct asd_seq_data *seq = &asd_ha->seq; | ||
1135 | struct asd_ascb *last = list_entry(ascb->list.prev, | ||
1136 | struct asd_ascb, | ||
1137 | list); | ||
1138 | struct asd_dma_tok t = ascb->dma_scb; | ||
1139 | |||
1140 | memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb)); | ||
1141 | ascb->dma_scb = seq->next_scb; | ||
1142 | ascb->scb = ascb->dma_scb.vaddr; | ||
1143 | seq->next_scb = t; | ||
1144 | last->scb->header.next_scb = | ||
1145 | cpu_to_le64(((u64)seq->next_scb.dma_handle)); | ||
1146 | } | ||
1147 | |||
1148 | /** | ||
1149 | * asd_start_timers -- (add and) start timers of SCBs | ||
1150 | * @list: pointer to struct list_head of the scbs | ||
1151 | * @to: timeout in jiffies | ||
1152 | * | ||
1153 | * If an SCB in the @list has no timer function, assign the default | ||
1154 | * one, then start the timer of the SCB. This function is | ||
1155 | * intended to be called from asd_post_ascb_list(), just prior to | ||
1156 | * posting the SCBs to the sequencer. | ||
1157 | */ | ||
1158 | static inline void asd_start_scb_timers(struct list_head *list) | ||
1159 | { | ||
1160 | struct asd_ascb *ascb; | ||
1161 | list_for_each_entry(ascb, list, list) { | ||
1162 | if (!ascb->uldd_timer) { | ||
1163 | ascb->timer.data = (unsigned long) ascb; | ||
1164 | ascb->timer.function = asd_ascb_timedout; | ||
1165 | ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; | ||
1166 | add_timer(&ascb->timer); | ||
1167 | } | ||
1168 | } | ||
1169 | } | ||
1170 | |||
1171 | /** | ||
1172 | * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter | ||
1173 | * @asd_ha: pointer to a host adapter structure | ||
1174 | * @ascb: pointer to the first aSCB in the list | ||
1175 | * @num: number of aSCBs in the list (to be posted) | ||
1176 | * | ||
1177 | * See queueing comment in asd_post_escb_list(). | ||
1178 | * | ||
1179 | * Additional note on queuing: In order to minimize the ratio of memcpy() | ||
1180 | * to the number of ascbs sent, we try to batch-send as many ascbs as possible | ||
1181 | * in one go. | ||
1182 | * Two cases are possible: | ||
1183 | * A) can_queue >= num, | ||
1184 | * B) can_queue < num. | ||
1185 | * Case A: we can send the whole batch at once. Increment "pending" | ||
1186 | * in the beginning of this function, when it is checked, in order to | ||
1187 | * eliminate races when this function is called by multiple processes. | ||
1188 | * Case B: should never happen if the managing layer considers | ||
1189 | * lldd_queue_size. | ||
1190 | */ | ||
1191 | int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, | ||
1192 | int num) | ||
1193 | { | ||
1194 | unsigned long flags; | ||
1195 | LIST_HEAD(list); | ||
1196 | int can_queue; | ||
1197 | |||
1198 | spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); | ||
1199 | can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending; | ||
1200 | if (can_queue >= num) | ||
1201 | asd_ha->seq.pending += num; | ||
1202 | else | ||
1203 | can_queue = 0; | ||
1204 | |||
1205 | if (!can_queue) { | ||
1206 | spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); | ||
1207 | asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev)); | ||
1208 | return -SAS_QUEUE_FULL; | ||
1209 | } | ||
1210 | |||
1211 | asd_swap_head_scb(asd_ha, ascb); | ||
1212 | |||
1213 | __list_add(&list, ascb->list.prev, &ascb->list); | ||
1214 | |||
1215 | asd_start_scb_timers(&list); | ||
1216 | |||
1217 | asd_ha->seq.scbpro += num; | ||
1218 | list_splice_init(&list, asd_ha->seq.pend_q.prev); | ||
1219 | asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); | ||
1220 | spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); | ||
1221 | |||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | /** | ||
1226 | * asd_post_escb_list -- post a list of 1 or more empty scb | ||
1227 | * @asd_ha: pointer to a host adapter structure | ||
1228 | * @ascb: pointer to the first empty SCB in the list | ||
1229 | * @num: number of aSCBs in the list (to be posted) | ||
1230 | * | ||
1231 | * This is essentially the same as asd_post_ascb_list, but we do not | ||
1232 | * increment pending, add those to the pending list or get indexes. | ||
1233 | * See asd_init_escbs() and asd_init_post_escbs(). | ||
1234 | * | ||
1235 | * Since sending a list of ascbs is a superset of sending a single | ||
1236 | * ascb, this function exists to generalize this. More specifically, | ||
1237 | * when sending a list of those, we want to do only a _single_ | ||
1238 | * memcpy() at swap head, as opposed to for each ascb sent (in the | ||
1239 | * case of sending them one by one). That is, we want to minimize the | ||
1240 | * ratio of memcpy() operations to the number of ascbs sent. The same | ||
1241 | * logic applies to asd_post_ascb_list(). | ||
1242 | */ | ||
1243 | int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, | ||
1244 | int num) | ||
1245 | { | ||
1246 | unsigned long flags; | ||
1247 | |||
1248 | spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); | ||
1249 | asd_swap_head_scb(asd_ha, ascb); | ||
1250 | asd_ha->seq.scbpro += num; | ||
1251 | asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); | ||
1252 | spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); | ||
1253 | |||
1254 | return 0; | ||
1255 | } | ||
1256 | |||
1257 | /* ---------- LED ---------- */ | ||
1258 | |||
1259 | /** | ||
1260 | * asd_turn_led -- turn on/off an LED | ||
1261 | * @asd_ha: pointer to host adapter structure | ||
1262 | * @phy_id: the PHY id whose LED we want to manupulate | ||
1263 | * @op: 1 to turn on, 0 to turn off | ||
1264 | */ | ||
1265 | void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op) | ||
1266 | { | ||
1267 | if (phy_id < ASD_MAX_PHYS) { | ||
1268 | u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id)); | ||
1269 | if (op) | ||
1270 | v |= LEDPOL; | ||
1271 | else | ||
1272 | v &= ~LEDPOL; | ||
1273 | asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v); | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1277 | /** | ||
1278 | * asd_control_led -- enable/disable an LED on the board | ||
1279 | * @asd_ha: pointer to host adapter structure | ||
1280 | * @phy_id: integer, the phy id | ||
1281 | * @op: integer, 1 to enable, 0 to disable the LED | ||
1282 | * | ||
1283 | * First we output enable the LED, then we set the source | ||
1284 | * to be an external module. | ||
1285 | */ | ||
1286 | void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op) | ||
1287 | { | ||
1288 | if (phy_id < ASD_MAX_PHYS) { | ||
1289 | u32 v; | ||
1290 | |||
1291 | v = asd_read_reg_dword(asd_ha, GPIOOER); | ||
1292 | if (op) | ||
1293 | v |= (1 << phy_id); | ||
1294 | else | ||
1295 | v &= ~(1 << phy_id); | ||
1296 | asd_write_reg_dword(asd_ha, GPIOOER, v); | ||
1297 | |||
1298 | v = asd_read_reg_dword(asd_ha, GPIOCNFGR); | ||
1299 | if (op) | ||
1300 | v |= (1 << phy_id); | ||
1301 | else | ||
1302 | v &= ~(1 << phy_id); | ||
1303 | asd_write_reg_dword(asd_ha, GPIOCNFGR, v); | ||
1304 | } | ||
1305 | } | ||
1306 | |||
1307 | /* ---------- PHY enable ---------- */ | ||
1308 | |||
1309 | static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id) | ||
1310 | { | ||
1311 | struct asd_phy *phy = &asd_ha->phys[phy_id]; | ||
1312 | |||
1313 | asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0); | ||
1314 | asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY), | ||
1315 | HOTPLUG_DELAY_TIMEOUT); | ||
1316 | |||
1317 | /* Get defaults from manuf. sector */ | ||
1318 | /* XXX we need defaults for those in case MS is broken. */ | ||
1319 | asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0), | ||
1320 | phy->phy_desc->phy_control_0); | ||
1321 | asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1), | ||
1322 | phy->phy_desc->phy_control_1); | ||
1323 | asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2), | ||
1324 | phy->phy_desc->phy_control_2); | ||
1325 | asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3), | ||
1326 | phy->phy_desc->phy_control_3); | ||
1327 | |||
1328 | asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id), | ||
1329 | ASD_COMINIT_TIMEOUT); | ||
1330 | |||
1331 | asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id), | ||
1332 | phy->id_frm_tok->dma_handle); | ||
1333 | |||
1334 | asd_control_led(asd_ha, phy_id, 1); | ||
1335 | |||
1336 | return 0; | ||
1337 | } | ||
1338 | |||
1339 | int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask) | ||
1340 | { | ||
1341 | u8 phy_m; | ||
1342 | u8 i; | ||
1343 | int num = 0, k; | ||
1344 | struct asd_ascb *ascb; | ||
1345 | struct asd_ascb *ascb_list; | ||
1346 | |||
1347 | if (!phy_mask) { | ||
1348 | asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__); | ||
1349 | return 0; | ||
1350 | } | ||
1351 | |||
1352 | for_each_phy(phy_mask, phy_m, i) { | ||
1353 | num++; | ||
1354 | asd_enable_phy(asd_ha, i); | ||
1355 | } | ||
1356 | |||
1357 | k = num; | ||
1358 | ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL); | ||
1359 | if (!ascb_list) { | ||
1360 | asd_printk("no memory for control phy ascb list\n"); | ||
1361 | return -ENOMEM; | ||
1362 | } | ||
1363 | num -= k; | ||
1364 | |||
1365 | ascb = ascb_list; | ||
1366 | for_each_phy(phy_mask, phy_m, i) { | ||
1367 | asd_build_control_phy(ascb, i, ENABLE_PHY); | ||
1368 | ascb = list_entry(ascb->list.next, struct asd_ascb, list); | ||
1369 | } | ||
1370 | ASD_DPRINTK("posting %d control phy scbs\n", num); | ||
1371 | k = asd_post_ascb_list(asd_ha, ascb_list, num); | ||
1372 | if (k) | ||
1373 | asd_ascb_free_list(ascb_list); | ||
1374 | |||
1375 | return k; | ||
1376 | } | ||