diff options
Diffstat (limited to 'drivers/scsi/aacraid/commsup.c')
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 201 |
1 files changed, 100 insertions, 101 deletions
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 6d88f30296e1..1dd2e57c3345 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -56,7 +56,7 @@ | |||
56 | * Allocate and map the shared PCI space for the FIB blocks used to | 56 | * Allocate and map the shared PCI space for the FIB blocks used to |
57 | * talk to the Adaptec firmware. | 57 | * talk to the Adaptec firmware. |
58 | */ | 58 | */ |
59 | 59 | ||
60 | static int fib_map_alloc(struct aac_dev *dev) | 60 | static int fib_map_alloc(struct aac_dev *dev) |
61 | { | 61 | { |
62 | dprintk((KERN_INFO | 62 | dprintk((KERN_INFO |
@@ -109,14 +109,16 @@ int aac_fib_setup(struct aac_dev * dev) | |||
109 | } | 109 | } |
110 | if (i<0) | 110 | if (i<0) |
111 | return -ENOMEM; | 111 | return -ENOMEM; |
112 | 112 | ||
113 | hw_fib = dev->hw_fib_va; | 113 | hw_fib = dev->hw_fib_va; |
114 | hw_fib_pa = dev->hw_fib_pa; | 114 | hw_fib_pa = dev->hw_fib_pa; |
115 | memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); | 115 | memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)); |
116 | /* | 116 | /* |
117 | * Initialise the fibs | 117 | * Initialise the fibs |
118 | */ | 118 | */ |
119 | for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++) | 119 | for (i = 0, fibptr = &dev->fibs[i]; |
120 | i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); | ||
121 | i++, fibptr++) | ||
120 | { | 122 | { |
121 | fibptr->dev = dev; | 123 | fibptr->dev = dev; |
122 | fibptr->hw_fib_va = hw_fib; | 124 | fibptr->hw_fib_va = hw_fib; |
@@ -148,13 +150,13 @@ int aac_fib_setup(struct aac_dev * dev) | |||
148 | * Allocate a fib from the adapter fib pool. If the pool is empty we | 150 | * Allocate a fib from the adapter fib pool. If the pool is empty we |
149 | * return NULL. | 151 | * return NULL. |
150 | */ | 152 | */ |
151 | 153 | ||
152 | struct fib *aac_fib_alloc(struct aac_dev *dev) | 154 | struct fib *aac_fib_alloc(struct aac_dev *dev) |
153 | { | 155 | { |
154 | struct fib * fibptr; | 156 | struct fib * fibptr; |
155 | unsigned long flags; | 157 | unsigned long flags; |
156 | spin_lock_irqsave(&dev->fib_lock, flags); | 158 | spin_lock_irqsave(&dev->fib_lock, flags); |
157 | fibptr = dev->free_fib; | 159 | fibptr = dev->free_fib; |
158 | if(!fibptr){ | 160 | if(!fibptr){ |
159 | spin_unlock_irqrestore(&dev->fib_lock, flags); | 161 | spin_unlock_irqrestore(&dev->fib_lock, flags); |
160 | return fibptr; | 162 | return fibptr; |
@@ -184,7 +186,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev) | |||
184 | * | 186 | * |
185 | * Frees up a fib and places it on the appropriate queue | 187 | * Frees up a fib and places it on the appropriate queue |
186 | */ | 188 | */ |
187 | 189 | ||
188 | void aac_fib_free(struct fib *fibptr) | 190 | void aac_fib_free(struct fib *fibptr) |
189 | { | 191 | { |
190 | unsigned long flags; | 192 | unsigned long flags; |
@@ -205,10 +207,10 @@ void aac_fib_free(struct fib *fibptr) | |||
205 | /** | 207 | /** |
206 | * aac_fib_init - initialise a fib | 208 | * aac_fib_init - initialise a fib |
207 | * @fibptr: The fib to initialize | 209 | * @fibptr: The fib to initialize |
208 | * | 210 | * |
209 | * Set up the generic fib fields ready for use | 211 | * Set up the generic fib fields ready for use |
210 | */ | 212 | */ |
211 | 213 | ||
212 | void aac_fib_init(struct fib *fibptr) | 214 | void aac_fib_init(struct fib *fibptr) |
213 | { | 215 | { |
214 | struct hw_fib *hw_fib = fibptr->hw_fib_va; | 216 | struct hw_fib *hw_fib = fibptr->hw_fib_va; |
@@ -228,12 +230,12 @@ void aac_fib_init(struct fib *fibptr) | |||
228 | * Will deallocate and return to the free pool the FIB pointed to by the | 230 | * Will deallocate and return to the free pool the FIB pointed to by the |
229 | * caller. | 231 | * caller. |
230 | */ | 232 | */ |
231 | 233 | ||
232 | static void fib_dealloc(struct fib * fibptr) | 234 | static void fib_dealloc(struct fib * fibptr) |
233 | { | 235 | { |
234 | struct hw_fib *hw_fib = fibptr->hw_fib_va; | 236 | struct hw_fib *hw_fib = fibptr->hw_fib_va; |
235 | BUG_ON(hw_fib->header.StructType != FIB_MAGIC); | 237 | BUG_ON(hw_fib->header.StructType != FIB_MAGIC); |
236 | hw_fib->header.XferState = 0; | 238 | hw_fib->header.XferState = 0; |
237 | } | 239 | } |
238 | 240 | ||
239 | /* | 241 | /* |
@@ -242,7 +244,7 @@ static void fib_dealloc(struct fib * fibptr) | |||
242 | * these routines and are the only routines which have a knowledge of the | 244 | * these routines and are the only routines which have a knowledge of the |
243 | * how these queues are implemented. | 245 | * how these queues are implemented. |
244 | */ | 246 | */ |
245 | 247 | ||
246 | /** | 248 | /** |
247 | * aac_get_entry - get a queue entry | 249 | * aac_get_entry - get a queue entry |
248 | * @dev: Adapter | 250 | * @dev: Adapter |
@@ -255,7 +257,7 @@ static void fib_dealloc(struct fib * fibptr) | |||
255 | * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is | 257 | * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is |
256 | * returned. | 258 | * returned. |
257 | */ | 259 | */ |
258 | 260 | ||
259 | static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) | 261 | static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) |
260 | { | 262 | { |
261 | struct aac_queue * q; | 263 | struct aac_queue * q; |
@@ -280,26 +282,27 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr | |||
280 | idx = ADAP_NORM_RESP_ENTRIES; | 282 | idx = ADAP_NORM_RESP_ENTRIES; |
281 | } | 283 | } |
282 | if (idx != le32_to_cpu(*(q->headers.consumer))) | 284 | if (idx != le32_to_cpu(*(q->headers.consumer))) |
283 | *nonotify = 1; | 285 | *nonotify = 1; |
284 | } | 286 | } |
285 | 287 | ||
286 | if (qid == AdapNormCmdQueue) { | 288 | if (qid == AdapNormCmdQueue) { |
287 | if (*index >= ADAP_NORM_CMD_ENTRIES) | 289 | if (*index >= ADAP_NORM_CMD_ENTRIES) |
288 | *index = 0; /* Wrap to front of the Producer Queue. */ | 290 | *index = 0; /* Wrap to front of the Producer Queue. */ |
289 | } else { | 291 | } else { |
290 | if (*index >= ADAP_NORM_RESP_ENTRIES) | 292 | if (*index >= ADAP_NORM_RESP_ENTRIES) |
291 | *index = 0; /* Wrap to front of the Producer Queue. */ | 293 | *index = 0; /* Wrap to front of the Producer Queue. */ |
292 | } | 294 | } |
293 | 295 | ||
294 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ | 296 | /* Queue is full */ |
297 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { | ||
295 | printk(KERN_WARNING "Queue %d full, %u outstanding.\n", | 298 | printk(KERN_WARNING "Queue %d full, %u outstanding.\n", |
296 | qid, q->numpending); | 299 | qid, q->numpending); |
297 | return 0; | 300 | return 0; |
298 | } else { | 301 | } else { |
299 | *entry = q->base + *index; | 302 | *entry = q->base + *index; |
300 | return 1; | 303 | return 1; |
301 | } | 304 | } |
302 | } | 305 | } |
303 | 306 | ||
304 | /** | 307 | /** |
305 | * aac_queue_get - get the next free QE | 308 | * aac_queue_get - get the next free QE |
@@ -321,31 +324,29 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw | |||
321 | { | 324 | { |
322 | struct aac_entry * entry = NULL; | 325 | struct aac_entry * entry = NULL; |
323 | int map = 0; | 326 | int map = 0; |
324 | 327 | ||
325 | if (qid == AdapNormCmdQueue) { | 328 | if (qid == AdapNormCmdQueue) { |
326 | /* if no entries wait for some if caller wants to */ | 329 | /* if no entries wait for some if caller wants to */ |
327 | while (!aac_get_entry(dev, qid, &entry, index, nonotify)) | 330 | while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { |
328 | { | ||
329 | printk(KERN_ERR "GetEntries failed\n"); | 331 | printk(KERN_ERR "GetEntries failed\n"); |
330 | } | 332 | } |
331 | /* | 333 | /* |
332 | * Setup queue entry with a command, status and fib mapped | 334 | * Setup queue entry with a command, status and fib mapped |
333 | */ | 335 | */ |
334 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); | 336 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); |
335 | map = 1; | 337 | map = 1; |
336 | } else { | 338 | } else { |
337 | while(!aac_get_entry(dev, qid, &entry, index, nonotify)) | 339 | while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { |
338 | { | ||
339 | /* if no entries wait for some if caller wants to */ | 340 | /* if no entries wait for some if caller wants to */ |
340 | } | 341 | } |
341 | /* | 342 | /* |
342 | * Setup queue entry with command, status and fib mapped | 343 | * Setup queue entry with command, status and fib mapped |
343 | */ | 344 | */ |
344 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); | 345 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); |
345 | entry->addr = hw_fib->header.SenderFibAddress; | 346 | entry->addr = hw_fib->header.SenderFibAddress; |
346 | /* Restore adapters pointer to the FIB */ | 347 | /* Restore adapters pointer to the FIB */ |
347 | hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ | 348 | hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ |
348 | map = 0; | 349 | map = 0; |
349 | } | 350 | } |
350 | /* | 351 | /* |
351 | * If MapFib is true than we need to map the Fib and put pointers | 352 | * If MapFib is true than we need to map the Fib and put pointers |
@@ -357,8 +358,8 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw | |||
357 | } | 358 | } |
358 | 359 | ||
359 | /* | 360 | /* |
360 | * Define the highest level of host to adapter communication routines. | 361 | * Define the highest level of host to adapter communication routines. |
361 | * These routines will support host to adapter FS commuication. These | 362 | * These routines will support host to adapter FS commuication. These |
362 | * routines have no knowledge of the commuication method used. This level | 363 | * routines have no knowledge of the commuication method used. This level |
363 | * sends and receives FIBs. This level has no knowledge of how these FIBs | 364 | * sends and receives FIBs. This level has no knowledge of how these FIBs |
364 | * get passed back and forth. | 365 | * get passed back and forth. |
@@ -380,7 +381,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw | |||
380 | * an event to wait on must be supplied. This event will be set when a | 381 | * an event to wait on must be supplied. This event will be set when a |
381 | * response FIB is received from the adapter. | 382 | * response FIB is received from the adapter. |
382 | */ | 383 | */ |
383 | 384 | ||
384 | int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | 385 | int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, |
385 | int priority, int wait, int reply, fib_callback callback, | 386 | int priority, int wait, int reply, fib_callback callback, |
386 | void *callback_data) | 387 | void *callback_data) |
@@ -393,13 +394,13 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
393 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) | 394 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) |
394 | return -EBUSY; | 395 | return -EBUSY; |
395 | /* | 396 | /* |
396 | * There are 5 cases with the wait and reponse requested flags. | 397 | * There are 5 cases with the wait and reponse requested flags. |
397 | * The only invalid cases are if the caller requests to wait and | 398 | * The only invalid cases are if the caller requests to wait and |
398 | * does not request a response and if the caller does not want a | 399 | * does not request a response and if the caller does not want a |
399 | * response and the Fib is not allocated from pool. If a response | 400 | * response and the Fib is not allocated from pool. If a response |
400 | * is not requesed the Fib will just be deallocaed by the DPC | 401 | * is not requesed the Fib will just be deallocaed by the DPC |
401 | * routine when the response comes back from the adapter. No | 402 | * routine when the response comes back from the adapter. No |
402 | * further processing will be done besides deleting the Fib. We | 403 | * further processing will be done besides deleting the Fib. We |
403 | * will have a debug mode where the adapter can notify the host | 404 | * will have a debug mode where the adapter can notify the host |
404 | * it had a problem and the host can log that fact. | 405 | * it had a problem and the host can log that fact. |
405 | */ | 406 | */ |
@@ -415,7 +416,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
415 | } else if (wait && reply) { | 416 | } else if (wait && reply) { |
416 | hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); | 417 | hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); |
417 | FIB_COUNTER_INCREMENT(aac_config.NormalSent); | 418 | FIB_COUNTER_INCREMENT(aac_config.NormalSent); |
418 | } | 419 | } |
419 | /* | 420 | /* |
420 | * Map the fib into 32bits by using the fib number | 421 | * Map the fib into 32bits by using the fib number |
421 | */ | 422 | */ |
@@ -438,7 +439,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
438 | hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); | 439 | hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); |
439 | if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { | 440 | if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { |
440 | return -EMSGSIZE; | 441 | return -EMSGSIZE; |
441 | } | 442 | } |
442 | /* | 443 | /* |
443 | * Get a queue entry connect the FIB to it and send an notify | 444 | * Get a queue entry connect the FIB to it and send an notify |
444 | * the adapter a command is ready. | 445 | * the adapter a command is ready. |
@@ -475,9 +476,9 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
475 | aac_adapter_deliver(fibptr); | 476 | aac_adapter_deliver(fibptr); |
476 | 477 | ||
477 | /* | 478 | /* |
478 | * If the caller wanted us to wait for response wait now. | 479 | * If the caller wanted us to wait for response wait now. |
479 | */ | 480 | */ |
480 | 481 | ||
481 | if (wait) { | 482 | if (wait) { |
482 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 483 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
483 | /* Only set for first known interruptable command */ | 484 | /* Only set for first known interruptable command */ |
@@ -524,7 +525,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
524 | } | 525 | } |
525 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | 526 | spin_unlock_irqrestore(&fibptr->event_lock, flags); |
526 | BUG_ON(fibptr->done == 0); | 527 | BUG_ON(fibptr->done == 0); |
527 | 528 | ||
528 | if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) | 529 | if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) |
529 | return -ETIMEDOUT; | 530 | return -ETIMEDOUT; |
530 | return 0; | 531 | return 0; |
@@ -539,15 +540,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
539 | return 0; | 540 | return 0; |
540 | } | 541 | } |
541 | 542 | ||
542 | /** | 543 | /** |
543 | * aac_consumer_get - get the top of the queue | 544 | * aac_consumer_get - get the top of the queue |
544 | * @dev: Adapter | 545 | * @dev: Adapter |
545 | * @q: Queue | 546 | * @q: Queue |
546 | * @entry: Return entry | 547 | * @entry: Return entry |
547 | * | 548 | * |
548 | * Will return a pointer to the entry on the top of the queue requested that | 549 | * Will return a pointer to the entry on the top of the queue requested that |
549 | * we are a consumer of, and return the address of the queue entry. It does | 550 | * we are a consumer of, and return the address of the queue entry. It does |
550 | * not change the state of the queue. | 551 | * not change the state of the queue. |
551 | */ | 552 | */ |
552 | 553 | ||
553 | int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) | 554 | int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) |
@@ -562,10 +563,10 @@ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entr | |||
562 | * the end of the queue, else we just use the entry | 563 | * the end of the queue, else we just use the entry |
563 | * pointed to by the header index | 564 | * pointed to by the header index |
564 | */ | 565 | */ |
565 | if (le32_to_cpu(*q->headers.consumer) >= q->entries) | 566 | if (le32_to_cpu(*q->headers.consumer) >= q->entries) |
566 | index = 0; | 567 | index = 0; |
567 | else | 568 | else |
568 | index = le32_to_cpu(*q->headers.consumer); | 569 | index = le32_to_cpu(*q->headers.consumer); |
569 | *entry = q->base + index; | 570 | *entry = q->base + index; |
570 | status = 1; | 571 | status = 1; |
571 | } | 572 | } |
@@ -589,12 +590,12 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) | |||
589 | 590 | ||
590 | if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) | 591 | if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) |
591 | wasfull = 1; | 592 | wasfull = 1; |
592 | 593 | ||
593 | if (le32_to_cpu(*q->headers.consumer) >= q->entries) | 594 | if (le32_to_cpu(*q->headers.consumer) >= q->entries) |
594 | *q->headers.consumer = cpu_to_le32(1); | 595 | *q->headers.consumer = cpu_to_le32(1); |
595 | else | 596 | else |
596 | *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); | 597 | *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); |
597 | 598 | ||
598 | if (wasfull) { | 599 | if (wasfull) { |
599 | switch (qid) { | 600 | switch (qid) { |
600 | 601 | ||
@@ -610,7 +611,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) | |||
610 | } | 611 | } |
611 | aac_adapter_notify(dev, notify); | 612 | aac_adapter_notify(dev, notify); |
612 | } | 613 | } |
613 | } | 614 | } |
614 | 615 | ||
615 | /** | 616 | /** |
616 | * aac_fib_adapter_complete - complete adapter issued fib | 617 | * aac_fib_adapter_complete - complete adapter issued fib |
@@ -632,32 +633,32 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
632 | if (hw_fib->header.XferState == 0) { | 633 | if (hw_fib->header.XferState == 0) { |
633 | if (dev->comm_interface == AAC_COMM_MESSAGE) | 634 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
634 | kfree (hw_fib); | 635 | kfree (hw_fib); |
635 | return 0; | 636 | return 0; |
636 | } | 637 | } |
637 | /* | 638 | /* |
638 | * If we plan to do anything check the structure type first. | 639 | * If we plan to do anything check the structure type first. |
639 | */ | 640 | */ |
640 | if ( hw_fib->header.StructType != FIB_MAGIC ) { | 641 | if (hw_fib->header.StructType != FIB_MAGIC) { |
641 | if (dev->comm_interface == AAC_COMM_MESSAGE) | 642 | if (dev->comm_interface == AAC_COMM_MESSAGE) |
642 | kfree (hw_fib); | 643 | kfree (hw_fib); |
643 | return -EINVAL; | 644 | return -EINVAL; |
644 | } | 645 | } |
645 | /* | 646 | /* |
646 | * This block handles the case where the adapter had sent us a | 647 | * This block handles the case where the adapter had sent us a |
647 | * command and we have finished processing the command. We | 648 | * command and we have finished processing the command. We |
648 | * call completeFib when we are done processing the command | 649 | * call completeFib when we are done processing the command |
649 | * and want to send a response back to the adapter. This will | 650 | * and want to send a response back to the adapter. This will |
650 | * send the completed cdb to the adapter. | 651 | * send the completed cdb to the adapter. |
651 | */ | 652 | */ |
652 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { | 653 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { |
653 | if (dev->comm_interface == AAC_COMM_MESSAGE) { | 654 | if (dev->comm_interface == AAC_COMM_MESSAGE) { |
654 | kfree (hw_fib); | 655 | kfree (hw_fib); |
655 | } else { | 656 | } else { |
656 | u32 index; | 657 | u32 index; |
657 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); | 658 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); |
658 | if (size) { | 659 | if (size) { |
659 | size += sizeof(struct aac_fibhdr); | 660 | size += sizeof(struct aac_fibhdr); |
660 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | 661 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) |
661 | return -EMSGSIZE; | 662 | return -EMSGSIZE; |
662 | hw_fib->header.Size = cpu_to_le16(size); | 663 | hw_fib->header.Size = cpu_to_le16(size); |
663 | } | 664 | } |
@@ -669,12 +670,11 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
669 | if (!(nointr & (int)aac_config.irq_mod)) | 670 | if (!(nointr & (int)aac_config.irq_mod)) |
670 | aac_adapter_notify(dev, AdapNormRespQueue); | 671 | aac_adapter_notify(dev, AdapNormRespQueue); |
671 | } | 672 | } |
673 | } else { | ||
674 | printk(KERN_WARNING "aac_fib_adapter_complete: " | ||
675 | "Unknown xferstate detected.\n"); | ||
676 | BUG(); | ||
672 | } | 677 | } |
673 | else | ||
674 | { | ||
675 | printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n"); | ||
676 | BUG(); | ||
677 | } | ||
678 | return 0; | 678 | return 0; |
679 | } | 679 | } |
680 | 680 | ||
@@ -684,7 +684,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) | |||
684 | * | 684 | * |
685 | * Will do all necessary work to complete a FIB. | 685 | * Will do all necessary work to complete a FIB. |
686 | */ | 686 | */ |
687 | 687 | ||
688 | int aac_fib_complete(struct fib *fibptr) | 688 | int aac_fib_complete(struct fib *fibptr) |
689 | { | 689 | { |
690 | struct hw_fib * hw_fib = fibptr->hw_fib_va; | 690 | struct hw_fib * hw_fib = fibptr->hw_fib_va; |
@@ -694,15 +694,15 @@ int aac_fib_complete(struct fib *fibptr) | |||
694 | */ | 694 | */ |
695 | 695 | ||
696 | if (hw_fib->header.XferState == 0) | 696 | if (hw_fib->header.XferState == 0) |
697 | return 0; | 697 | return 0; |
698 | /* | 698 | /* |
699 | * If we plan to do anything check the structure type first. | 699 | * If we plan to do anything check the structure type first. |
700 | */ | 700 | */ |
701 | 701 | ||
702 | if (hw_fib->header.StructType != FIB_MAGIC) | 702 | if (hw_fib->header.StructType != FIB_MAGIC) |
703 | return -EINVAL; | 703 | return -EINVAL; |
704 | /* | 704 | /* |
705 | * This block completes a cdb which orginated on the host and we | 705 | * This block completes a cdb which orginated on the host and we |
706 | * just need to deallocate the cdb or reinit it. At this point the | 706 | * just need to deallocate the cdb or reinit it. At this point the |
707 | * command is complete that we had sent to the adapter and this | 707 | * command is complete that we had sent to the adapter and this |
708 | * cdb could be reused. | 708 | * cdb could be reused. |
@@ -723,7 +723,7 @@ int aac_fib_complete(struct fib *fibptr) | |||
723 | fib_dealloc(fibptr); | 723 | fib_dealloc(fibptr); |
724 | } else { | 724 | } else { |
725 | BUG(); | 725 | BUG(); |
726 | } | 726 | } |
727 | return 0; | 727 | return 0; |
728 | } | 728 | } |
729 | 729 | ||
@@ -743,7 +743,7 @@ void aac_printf(struct aac_dev *dev, u32 val) | |||
743 | { | 743 | { |
744 | int length = val & 0xffff; | 744 | int length = val & 0xffff; |
745 | int level = (val >> 16) & 0xffff; | 745 | int level = (val >> 16) & 0xffff; |
746 | 746 | ||
747 | /* | 747 | /* |
748 | * The size of the printfbuf is set in port.c | 748 | * The size of the printfbuf is set in port.c |
749 | * There is no variable or define for it | 749 | * There is no variable or define for it |
@@ -757,7 +757,7 @@ void aac_printf(struct aac_dev *dev, u32 val) | |||
757 | else | 757 | else |
758 | printk(KERN_INFO "%s:%s", dev->name, cp); | 758 | printk(KERN_INFO "%s:%s", dev->name, cp); |
759 | } | 759 | } |
760 | memset(cp, 0, 256); | 760 | memset(cp, 0, 256); |
761 | } | 761 | } |
762 | 762 | ||
763 | 763 | ||
@@ -816,9 +816,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) | |||
816 | */ | 816 | */ |
817 | 817 | ||
818 | if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { | 818 | if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { |
819 | device = scsi_device_lookup(dev->scsi_host_ptr, | 819 | device = scsi_device_lookup(dev->scsi_host_ptr, |
820 | CONTAINER_TO_CHANNEL(container), | 820 | CONTAINER_TO_CHANNEL(container), |
821 | CONTAINER_TO_ID(container), | 821 | CONTAINER_TO_ID(container), |
822 | CONTAINER_TO_LUN(container)); | 822 | CONTAINER_TO_LUN(container)); |
823 | if (device) { | 823 | if (device) { |
824 | dev->fsa_dev[container].config_needed = CHANGE; | 824 | dev->fsa_dev[container].config_needed = CHANGE; |
@@ -1184,13 +1184,13 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1184 | } | 1184 | } |
1185 | (void)aac_get_adapter_info(aac); | 1185 | (void)aac_get_adapter_info(aac); |
1186 | if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) { | 1186 | if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) { |
1187 | host->sg_tablesize = 34; | 1187 | host->sg_tablesize = 34; |
1188 | host->max_sectors = (host->sg_tablesize * 8) + 112; | 1188 | host->max_sectors = (host->sg_tablesize * 8) + 112; |
1189 | } | 1189 | } |
1190 | if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) { | 1190 | if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) { |
1191 | host->sg_tablesize = 17; | 1191 | host->sg_tablesize = 17; |
1192 | host->max_sectors = (host->sg_tablesize * 8) + 112; | 1192 | host->max_sectors = (host->sg_tablesize * 8) + 112; |
1193 | } | 1193 | } |
1194 | aac_get_config_status(aac, 1); | 1194 | aac_get_config_status(aac, 1); |
1195 | aac_get_containers(aac); | 1195 | aac_get_containers(aac); |
1196 | /* | 1196 | /* |
@@ -1461,7 +1461,7 @@ out: | |||
1461 | * until the queue is empty. When the queue is empty it will wait for | 1461 | * until the queue is empty. When the queue is empty it will wait for |
1462 | * more FIBs. | 1462 | * more FIBs. |
1463 | */ | 1463 | */ |
1464 | 1464 | ||
1465 | int aac_command_thread(void *data) | 1465 | int aac_command_thread(void *data) |
1466 | { | 1466 | { |
1467 | struct aac_dev *dev = data; | 1467 | struct aac_dev *dev = data; |
@@ -1487,30 +1487,29 @@ int aac_command_thread(void *data) | |||
1487 | add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); | 1487 | add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); |
1488 | set_current_state(TASK_INTERRUPTIBLE); | 1488 | set_current_state(TASK_INTERRUPTIBLE); |
1489 | dprintk ((KERN_INFO "aac_command_thread start\n")); | 1489 | dprintk ((KERN_INFO "aac_command_thread start\n")); |
1490 | while(1) | 1490 | while (1) { |
1491 | { | ||
1492 | spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); | 1491 | spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); |
1493 | while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { | 1492 | while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { |
1494 | struct list_head *entry; | 1493 | struct list_head *entry; |
1495 | struct aac_aifcmd * aifcmd; | 1494 | struct aac_aifcmd * aifcmd; |
1496 | 1495 | ||
1497 | set_current_state(TASK_RUNNING); | 1496 | set_current_state(TASK_RUNNING); |
1498 | 1497 | ||
1499 | entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; | 1498 | entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; |
1500 | list_del(entry); | 1499 | list_del(entry); |
1501 | 1500 | ||
1502 | spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); | 1501 | spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); |
1503 | fib = list_entry(entry, struct fib, fiblink); | 1502 | fib = list_entry(entry, struct fib, fiblink); |
1504 | /* | 1503 | /* |
1505 | * We will process the FIB here or pass it to a | 1504 | * We will process the FIB here or pass it to a |
1506 | * worker thread that is TBD. We Really can't | 1505 | * worker thread that is TBD. We Really can't |
1507 | * do anything at this point since we don't have | 1506 | * do anything at this point since we don't have |
1508 | * anything defined for this thread to do. | 1507 | * anything defined for this thread to do. |
1509 | */ | 1508 | */ |
1510 | hw_fib = fib->hw_fib_va; | 1509 | hw_fib = fib->hw_fib_va; |
1511 | memset(fib, 0, sizeof(struct fib)); | 1510 | memset(fib, 0, sizeof(struct fib)); |
1512 | fib->type = FSAFS_NTC_FIB_CONTEXT; | 1511 | fib->type = FSAFS_NTC_FIB_CONTEXT; |
1513 | fib->size = sizeof( struct fib ); | 1512 | fib->size = sizeof(struct fib); |
1514 | fib->hw_fib_va = hw_fib; | 1513 | fib->hw_fib_va = hw_fib; |
1515 | fib->data = hw_fib->data; | 1514 | fib->data = hw_fib->data; |
1516 | fib->dev = dev; | 1515 | fib->dev = dev; |
@@ -1526,17 +1525,17 @@ int aac_command_thread(void *data) | |||
1526 | } else { | 1525 | } else { |
1527 | /* The u32 here is important and intended. We are using | 1526 | /* The u32 here is important and intended. We are using |
1528 | 32bit wrapping time to fit the adapter field */ | 1527 | 32bit wrapping time to fit the adapter field */ |
1529 | 1528 | ||
1530 | u32 time_now, time_last; | 1529 | u32 time_now, time_last; |
1531 | unsigned long flagv; | 1530 | unsigned long flagv; |
1532 | unsigned num; | 1531 | unsigned num; |
1533 | struct hw_fib ** hw_fib_pool, ** hw_fib_p; | 1532 | struct hw_fib ** hw_fib_pool, ** hw_fib_p; |
1534 | struct fib ** fib_pool, ** fib_p; | 1533 | struct fib ** fib_pool, ** fib_p; |
1535 | 1534 | ||
1536 | /* Sniff events */ | 1535 | /* Sniff events */ |
1537 | if ((aifcmd->command == | 1536 | if ((aifcmd->command == |
1538 | cpu_to_le32(AifCmdEventNotify)) || | 1537 | cpu_to_le32(AifCmdEventNotify)) || |
1539 | (aifcmd->command == | 1538 | (aifcmd->command == |
1540 | cpu_to_le32(AifCmdJobProgress))) { | 1539 | cpu_to_le32(AifCmdJobProgress))) { |
1541 | aac_handle_aif(dev, fib); | 1540 | aac_handle_aif(dev, fib); |
1542 | } | 1541 | } |
@@ -1588,7 +1587,7 @@ int aac_command_thread(void *data) | |||
1588 | spin_lock_irqsave(&dev->fib_lock, flagv); | 1587 | spin_lock_irqsave(&dev->fib_lock, flagv); |
1589 | entry = dev->fib_list.next; | 1588 | entry = dev->fib_list.next; |
1590 | /* | 1589 | /* |
1591 | * For each Context that is on the | 1590 | * For each Context that is on the |
1592 | * fibctxList, make a copy of the | 1591 | * fibctxList, make a copy of the |
1593 | * fib, and then set the event to wake up the | 1592 | * fib, and then set the event to wake up the |
1594 | * thread that is waiting for it. | 1593 | * thread that is waiting for it. |
@@ -1613,7 +1612,7 @@ int aac_command_thread(void *data) | |||
1613 | */ | 1612 | */ |
1614 | time_last = fibctx->jiffies; | 1613 | time_last = fibctx->jiffies; |
1615 | /* | 1614 | /* |
1616 | * Has it been > 2 minutes | 1615 | * Has it been > 2 minutes |
1617 | * since the last read off | 1616 | * since the last read off |
1618 | * the queue? | 1617 | * the queue? |
1619 | */ | 1618 | */ |
@@ -1644,7 +1643,7 @@ int aac_command_thread(void *data) | |||
1644 | */ | 1643 | */ |
1645 | list_add_tail(&newfib->fiblink, &fibctx->fib_list); | 1644 | list_add_tail(&newfib->fiblink, &fibctx->fib_list); |
1646 | fibctx->count++; | 1645 | fibctx->count++; |
1647 | /* | 1646 | /* |
1648 | * Set the event to wake up the | 1647 | * Set the event to wake up the |
1649 | * thread that is waiting. | 1648 | * thread that is waiting. |
1650 | */ | 1649 | */ |