diff options
Diffstat (limited to 'drivers/scsi/aacraid/dpcsup.c')
-rw-r--r-- | drivers/scsi/aacraid/dpcsup.c | 115 |
1 files changed, 114 insertions, 1 deletions
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index be2e98de9fab..439948ef8251 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -73,7 +73,7 @@ unsigned int aac_response_normal(struct aac_queue * q) | |||
73 | int fast; | 73 | int fast; |
74 | u32 index = le32_to_cpu(entry->addr); | 74 | u32 index = le32_to_cpu(entry->addr); |
75 | fast = index & 0x01; | 75 | fast = index & 0x01; |
76 | fib = &dev->fibs[index >> 1]; | 76 | fib = &dev->fibs[index >> 2]; |
77 | hwfib = fib->hw_fib; | 77 | hwfib = fib->hw_fib; |
78 | 78 | ||
79 | aac_consumer_free(dev, q, HostNormRespQueue); | 79 | aac_consumer_free(dev, q, HostNormRespQueue); |
@@ -213,3 +213,116 @@ unsigned int aac_command_normal(struct aac_queue *q) | |||
213 | spin_unlock_irqrestore(q->lock, flags); | 213 | spin_unlock_irqrestore(q->lock, flags); |
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | |||
217 | |||
218 | /** | ||
219 | * aac_intr_normal - Handle command replies | ||
220 | * @dev: Device | ||
221 | * @index: completion reference | ||
222 | * | ||
223 | * This DPC routine will be run when the adapter interrupts us to let us | ||
224 | * know there is a response on our normal priority queue. We will pull off | ||
225 | * all QE there are and wake up all the waiters before exiting. | ||
226 | */ | ||
227 | |||
228 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index) | ||
229 | { | ||
230 | u32 index = le32_to_cpu(Index); | ||
231 | |||
232 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, Index)); | ||
233 | if ((index & 0x00000002L)) { | ||
234 | struct hw_fib * hw_fib; | ||
235 | struct fib * fib; | ||
236 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; | ||
237 | unsigned long flags; | ||
238 | |||
239 | if (index == 0xFFFFFFFEL) /* Special Case */ | ||
240 | return 0; /* Do nothing */ | ||
241 | /* | ||
242 | * Allocate a FIB. For non queued stuff we can just use | ||
243 | * the stack so we are happy. We need a fib object in order to | ||
244 | * manage the linked lists. | ||
245 | */ | ||
246 | if ((!dev->aif_thread) | ||
247 | || (!(fib = kmalloc(sizeof(struct fib),GFP_ATOMIC)))) | ||
248 | return 1; | ||
249 | if (!(hw_fib = kmalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { | ||
250 | kfree (fib); | ||
251 | return 1; | ||
252 | } | ||
253 | memset(hw_fib, 0, sizeof(struct hw_fib)); | ||
254 | memcpy(hw_fib, (struct hw_fib *)(((unsigned long)(dev->regs.sa)) + (index & ~0x00000002L)), sizeof(struct hw_fib)); | ||
255 | memset(fib, 0, sizeof(struct fib)); | ||
256 | INIT_LIST_HEAD(&fib->fiblink); | ||
257 | fib->type = FSAFS_NTC_FIB_CONTEXT; | ||
258 | fib->size = sizeof(struct fib); | ||
259 | fib->hw_fib = hw_fib; | ||
260 | fib->data = hw_fib->data; | ||
261 | fib->dev = dev; | ||
262 | |||
263 | spin_lock_irqsave(q->lock, flags); | ||
264 | list_add_tail(&fib->fiblink, &q->cmdq); | ||
265 | wake_up_interruptible(&q->cmdready); | ||
266 | spin_unlock_irqrestore(q->lock, flags); | ||
267 | return 1; | ||
268 | } else { | ||
269 | int fast = index & 0x01; | ||
270 | struct fib * fib = &dev->fibs[index >> 2]; | ||
271 | struct hw_fib * hwfib = fib->hw_fib; | ||
272 | |||
273 | /* | ||
274 | * Remove this fib from the Outstanding I/O queue. | ||
275 | * But only if it has not already been timed out. | ||
276 | * | ||
277 | * If the fib has been timed out already, then just | ||
278 | * continue. The caller has already been notified that | ||
279 | * the fib timed out. | ||
280 | */ | ||
281 | if ((fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { | ||
282 | printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags); | ||
283 | printk(KERN_DEBUG"aacraid: hwfib=%p index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | list_del(&fib->queue); | ||
288 | dev->queues->queue[AdapNormCmdQueue].numpending--; | ||
289 | |||
290 | if (fast) { | ||
291 | /* | ||
292 | * Doctor the fib | ||
293 | */ | ||
294 | *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); | ||
295 | hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); | ||
296 | } | ||
297 | |||
298 | FIB_COUNTER_INCREMENT(aac_config.FibRecved); | ||
299 | |||
300 | if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) | ||
301 | { | ||
302 | u32 *pstatus = (u32 *)hwfib->data; | ||
303 | if (*pstatus & cpu_to_le32(0xffff0000)) | ||
304 | *pstatus = cpu_to_le32(ST_OK); | ||
305 | } | ||
306 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) | ||
307 | { | ||
308 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) | ||
309 | FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); | ||
310 | else | ||
311 | FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); | ||
312 | /* | ||
313 | * NOTE: we cannot touch the fib after this | ||
314 | * call, because it may have been deallocated. | ||
315 | */ | ||
316 | fib->callback(fib->callback_data, fib); | ||
317 | } else { | ||
318 | unsigned long flagv; | ||
319 | dprintk((KERN_INFO "event_wait up\n")); | ||
320 | spin_lock_irqsave(&fib->event_lock, flagv); | ||
321 | fib->done = 1; | ||
322 | up(&fib->event_wait); | ||
323 | spin_unlock_irqrestore(&fib->event_lock, flagv); | ||
324 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | ||
325 | } | ||
326 | return 0; | ||
327 | } | ||
328 | } | ||