diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/aacraid/commsup.c |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/scsi/aacraid/commsup.c')
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 939 |
1 files changed, 939 insertions, 0 deletions
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c new file mode 100644 index 00000000000..3f36dbaa2bb --- /dev/null +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -0,0 +1,939 @@ | |||
1 | /* | ||
2 | * Adaptec AAC series RAID controller driver | ||
3 | * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> | ||
4 | * | ||
5 | * based on the old aacraid driver that is.. | ||
6 | * Adaptec aacraid device driver for Linux. | ||
7 | * | ||
8 | * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; see the file COPYING. If not, write to | ||
22 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | * | ||
24 | * Module Name: | ||
25 | * commsup.c | ||
26 | * | ||
27 | * Abstract: Contain all routines that are required for FSA host/adapter | ||
28 | * commuication. | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/sched.h> | ||
36 | #include <linux/pci.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/completion.h> | ||
40 | #include <linux/blkdev.h> | ||
41 | #include <asm/semaphore.h> | ||
42 | |||
43 | #include "aacraid.h" | ||
44 | |||
45 | /** | ||
46 | * fib_map_alloc - allocate the fib objects | ||
47 | * @dev: Adapter to allocate for | ||
48 | * | ||
49 | * Allocate and map the shared PCI space for the FIB blocks used to | ||
50 | * talk to the Adaptec firmware. | ||
51 | */ | ||
52 | |||
53 | static int fib_map_alloc(struct aac_dev *dev) | ||
54 | { | ||
55 | if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL) | ||
56 | return -ENOMEM; | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | /** | ||
61 | * fib_map_free - free the fib objects | ||
62 | * @dev: Adapter to free | ||
63 | * | ||
64 | * Free the PCI mappings and the memory allocated for FIB blocks | ||
65 | * on this adapter. | ||
66 | */ | ||
67 | |||
68 | void fib_map_free(struct aac_dev *dev) | ||
69 | { | ||
70 | pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa); | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * fib_setup - setup the fibs | ||
75 | * @dev: Adapter to set up | ||
76 | * | ||
77 | * Allocate the PCI space for the fibs, map it and then intialise the | ||
78 | * fib area, the unmapped fib data and also the free list | ||
79 | */ | ||
80 | |||
81 | int fib_setup(struct aac_dev * dev) | ||
82 | { | ||
83 | struct fib *fibptr; | ||
84 | struct hw_fib *hw_fib_va; | ||
85 | dma_addr_t hw_fib_pa; | ||
86 | int i; | ||
87 | |||
88 | if(fib_map_alloc(dev)<0) | ||
89 | return -ENOMEM; | ||
90 | |||
91 | hw_fib_va = dev->hw_fib_va; | ||
92 | hw_fib_pa = dev->hw_fib_pa; | ||
93 | memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB); | ||
94 | /* | ||
95 | * Initialise the fibs | ||
96 | */ | ||
97 | for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) | ||
98 | { | ||
99 | fibptr->dev = dev; | ||
100 | fibptr->hw_fib = hw_fib_va; | ||
101 | fibptr->data = (void *) fibptr->hw_fib->data; | ||
102 | fibptr->next = fibptr+1; /* Forward chain the fibs */ | ||
103 | init_MUTEX_LOCKED(&fibptr->event_wait); | ||
104 | spin_lock_init(&fibptr->event_lock); | ||
105 | hw_fib_va->header.XferState = 0xffffffff; | ||
106 | hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib)); | ||
107 | fibptr->hw_fib_pa = hw_fib_pa; | ||
108 | hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib)); | ||
109 | hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib); | ||
110 | } | ||
111 | /* | ||
112 | * Add the fib chain to the free list | ||
113 | */ | ||
114 | dev->fibs[AAC_NUM_FIB-1].next = NULL; | ||
115 | /* | ||
116 | * Enable this to debug out of queue space | ||
117 | */ | ||
118 | dev->free_fib = &dev->fibs[0]; | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * fib_alloc - allocate a fib | ||
124 | * @dev: Adapter to allocate the fib for | ||
125 | * | ||
126 | * Allocate a fib from the adapter fib pool. If the pool is empty we | ||
127 | * wait for fibs to become free. | ||
128 | */ | ||
129 | |||
130 | struct fib * fib_alloc(struct aac_dev *dev) | ||
131 | { | ||
132 | struct fib * fibptr; | ||
133 | unsigned long flags; | ||
134 | spin_lock_irqsave(&dev->fib_lock, flags); | ||
135 | fibptr = dev->free_fib; | ||
136 | /* Cannot sleep here or you get hangs. Instead we did the | ||
137 | maths at compile time. */ | ||
138 | if(!fibptr) | ||
139 | BUG(); | ||
140 | dev->free_fib = fibptr->next; | ||
141 | spin_unlock_irqrestore(&dev->fib_lock, flags); | ||
142 | /* | ||
143 | * Set the proper node type code and node byte size | ||
144 | */ | ||
145 | fibptr->type = FSAFS_NTC_FIB_CONTEXT; | ||
146 | fibptr->size = sizeof(struct fib); | ||
147 | /* | ||
148 | * Null out fields that depend on being zero at the start of | ||
149 | * each I/O | ||
150 | */ | ||
151 | fibptr->hw_fib->header.XferState = 0; | ||
152 | fibptr->callback = NULL; | ||
153 | fibptr->callback_data = NULL; | ||
154 | |||
155 | return fibptr; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * fib_free - free a fib | ||
160 | * @fibptr: fib to free up | ||
161 | * | ||
162 | * Frees up a fib and places it on the appropriate queue | ||
163 | * (either free or timed out) | ||
164 | */ | ||
165 | |||
166 | void fib_free(struct fib * fibptr) | ||
167 | { | ||
168 | unsigned long flags; | ||
169 | |||
170 | spin_lock_irqsave(&fibptr->dev->fib_lock, flags); | ||
171 | if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) { | ||
172 | aac_config.fib_timeouts++; | ||
173 | fibptr->next = fibptr->dev->timeout_fib; | ||
174 | fibptr->dev->timeout_fib = fibptr; | ||
175 | } else { | ||
176 | if (fibptr->hw_fib->header.XferState != 0) { | ||
177 | printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", | ||
178 | (void*)fibptr, | ||
179 | le32_to_cpu(fibptr->hw_fib->header.XferState)); | ||
180 | } | ||
181 | fibptr->next = fibptr->dev->free_fib; | ||
182 | fibptr->dev->free_fib = fibptr; | ||
183 | } | ||
184 | spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags); | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * fib_init - initialise a fib | ||
189 | * @fibptr: The fib to initialize | ||
190 | * | ||
191 | * Set up the generic fib fields ready for use | ||
192 | */ | ||
193 | |||
194 | void fib_init(struct fib *fibptr) | ||
195 | { | ||
196 | struct hw_fib *hw_fib = fibptr->hw_fib; | ||
197 | |||
198 | hw_fib->header.StructType = FIB_MAGIC; | ||
199 | hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib)); | ||
200 | hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable); | ||
201 | hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa); | ||
202 | hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa); | ||
203 | hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib)); | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * fib_deallocate - deallocate a fib | ||
208 | * @fibptr: fib to deallocate | ||
209 | * | ||
210 | * Will deallocate and return to the free pool the FIB pointed to by the | ||
211 | * caller. | ||
212 | */ | ||
213 | |||
214 | void fib_dealloc(struct fib * fibptr) | ||
215 | { | ||
216 | struct hw_fib *hw_fib = fibptr->hw_fib; | ||
217 | if(hw_fib->header.StructType != FIB_MAGIC) | ||
218 | BUG(); | ||
219 | hw_fib->header.XferState = 0; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Commuication primitives define and support the queuing method we use to | ||
224 | * support host to adapter commuication. All queue accesses happen through | ||
225 | * these routines and are the only routines which have a knowledge of the | ||
226 | * how these queues are implemented. | ||
227 | */ | ||
228 | |||
229 | /** | ||
230 | * aac_get_entry - get a queue entry | ||
231 | * @dev: Adapter | ||
232 | * @qid: Queue Number | ||
233 | * @entry: Entry return | ||
234 | * @index: Index return | ||
235 | * @nonotify: notification control | ||
236 | * | ||
237 | * With a priority the routine returns a queue entry if the queue has free entries. If the queue | ||
238 | * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is | ||
239 | * returned. | ||
240 | */ | ||
241 | |||
242 | static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) | ||
243 | { | ||
244 | struct aac_queue * q; | ||
245 | |||
246 | /* | ||
247 | * All of the queues wrap when they reach the end, so we check | ||
248 | * to see if they have reached the end and if they have we just | ||
249 | * set the index back to zero. This is a wrap. You could or off | ||
250 | * the high bits in all updates but this is a bit faster I think. | ||
251 | */ | ||
252 | |||
253 | q = &dev->queues->queue[qid]; | ||
254 | |||
255 | *index = le32_to_cpu(*(q->headers.producer)); | ||
256 | if ((*index - 2) == le32_to_cpu(*(q->headers.consumer))) | ||
257 | *nonotify = 1; | ||
258 | |||
259 | if (qid == AdapHighCmdQueue) { | ||
260 | if (*index >= ADAP_HIGH_CMD_ENTRIES) | ||
261 | *index = 0; | ||
262 | } else if (qid == AdapNormCmdQueue) { | ||
263 | if (*index >= ADAP_NORM_CMD_ENTRIES) | ||
264 | *index = 0; /* Wrap to front of the Producer Queue. */ | ||
265 | } | ||
266 | else if (qid == AdapHighRespQueue) | ||
267 | { | ||
268 | if (*index >= ADAP_HIGH_RESP_ENTRIES) | ||
269 | *index = 0; | ||
270 | } | ||
271 | else if (qid == AdapNormRespQueue) | ||
272 | { | ||
273 | if (*index >= ADAP_NORM_RESP_ENTRIES) | ||
274 | *index = 0; /* Wrap to front of the Producer Queue. */ | ||
275 | } | ||
276 | else { | ||
277 | printk("aacraid: invalid qid\n"); | ||
278 | BUG(); | ||
279 | } | ||
280 | |||
281 | if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ | ||
282 | printk(KERN_WARNING "Queue %d full, %d outstanding.\n", | ||
283 | qid, q->numpending); | ||
284 | return 0; | ||
285 | } else { | ||
286 | *entry = q->base + *index; | ||
287 | return 1; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * aac_queue_get - get the next free QE | ||
293 | * @dev: Adapter | ||
294 | * @index: Returned index | ||
295 | * @priority: Priority of fib | ||
296 | * @fib: Fib to associate with the queue entry | ||
297 | * @wait: Wait if queue full | ||
298 | * @fibptr: Driver fib object to go with fib | ||
299 | * @nonotify: Don't notify the adapter | ||
300 | * | ||
301 | * Gets the next free QE off the requested priorty adapter command | ||
302 | * queue and associates the Fib with the QE. The QE represented by | ||
303 | * index is ready to insert on the queue when this routine returns | ||
304 | * success. | ||
305 | */ | ||
306 | |||
307 | static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) | ||
308 | { | ||
309 | struct aac_entry * entry = NULL; | ||
310 | int map = 0; | ||
311 | struct aac_queue * q = &dev->queues->queue[qid]; | ||
312 | |||
313 | spin_lock_irqsave(q->lock, q->SavedIrql); | ||
314 | |||
315 | if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) | ||
316 | { | ||
317 | /* if no entries wait for some if caller wants to */ | ||
318 | while (!aac_get_entry(dev, qid, &entry, index, nonotify)) | ||
319 | { | ||
320 | printk(KERN_ERR "GetEntries failed\n"); | ||
321 | } | ||
322 | /* | ||
323 | * Setup queue entry with a command, status and fib mapped | ||
324 | */ | ||
325 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); | ||
326 | map = 1; | ||
327 | } | ||
328 | else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue) | ||
329 | { | ||
330 | while(!aac_get_entry(dev, qid, &entry, index, nonotify)) | ||
331 | { | ||
332 | /* if no entries wait for some if caller wants to */ | ||
333 | } | ||
334 | /* | ||
335 | * Setup queue entry with command, status and fib mapped | ||
336 | */ | ||
337 | entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); | ||
338 | entry->addr = hw_fib->header.SenderFibAddress; | ||
339 | /* Restore adapters pointer to the FIB */ | ||
340 | hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */ | ||
341 | map = 0; | ||
342 | } | ||
343 | /* | ||
344 | * If MapFib is true than we need to map the Fib and put pointers | ||
345 | * in the queue entry. | ||
346 | */ | ||
347 | if (map) | ||
348 | entry->addr = cpu_to_le32(fibptr->hw_fib_pa); | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | |||
353 | /** | ||
354 | * aac_insert_entry - insert a queue entry | ||
355 | * @dev: Adapter | ||
356 | * @index: Index of entry to insert | ||
357 | * @qid: Queue number | ||
358 | * @nonotify: Suppress adapter notification | ||
359 | * | ||
360 | * Gets the next free QE off the requested priorty adapter command | ||
361 | * queue and associates the Fib with the QE. The QE represented by | ||
362 | * index is ready to insert on the queue when this routine returns | ||
363 | * success. | ||
364 | */ | ||
365 | |||
366 | static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) | ||
367 | { | ||
368 | struct aac_queue * q = &dev->queues->queue[qid]; | ||
369 | |||
370 | if(q == NULL) | ||
371 | BUG(); | ||
372 | *(q->headers.producer) = cpu_to_le32(index + 1); | ||
373 | spin_unlock_irqrestore(q->lock, q->SavedIrql); | ||
374 | |||
375 | if (qid == AdapHighCmdQueue || | ||
376 | qid == AdapNormCmdQueue || | ||
377 | qid == AdapHighRespQueue || | ||
378 | qid == AdapNormRespQueue) | ||
379 | { | ||
380 | if (!nonotify) | ||
381 | aac_adapter_notify(dev, qid); | ||
382 | } | ||
383 | else | ||
384 | printk("Suprise insert!\n"); | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * Define the highest level of host to adapter communication routines. | ||
390 | * These routines will support host to adapter FS commuication. These | ||
391 | * routines have no knowledge of the commuication method used. This level | ||
392 | * sends and receives FIBs. This level has no knowledge of how these FIBs | ||
393 | * get passed back and forth. | ||
394 | */ | ||
395 | |||
396 | /** | ||
397 | * fib_send - send a fib to the adapter | ||
398 | * @command: Command to send | ||
399 | * @fibptr: The fib | ||
400 | * @size: Size of fib data area | ||
401 | * @priority: Priority of Fib | ||
402 | * @wait: Async/sync select | ||
403 | * @reply: True if a reply is wanted | ||
404 | * @callback: Called with reply | ||
405 | * @callback_data: Passed to callback | ||
406 | * | ||
407 | * Sends the requested FIB to the adapter and optionally will wait for a | ||
408 | * response FIB. If the caller does not wish to wait for a response than | ||
409 | * an event to wait on must be supplied. This event will be set when a | ||
410 | * response FIB is received from the adapter. | ||
411 | */ | ||
412 | |||
413 | int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) | ||
414 | { | ||
415 | u32 index; | ||
416 | u32 qid; | ||
417 | struct aac_dev * dev = fibptr->dev; | ||
418 | unsigned long nointr = 0; | ||
419 | struct hw_fib * hw_fib = fibptr->hw_fib; | ||
420 | struct aac_queue * q; | ||
421 | unsigned long flags = 0; | ||
422 | if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) | ||
423 | return -EBUSY; | ||
424 | /* | ||
425 | * There are 5 cases with the wait and reponse requested flags. | ||
426 | * The only invalid cases are if the caller requests to wait and | ||
427 | * does not request a response and if the caller does not want a | ||
428 | * response and the Fib is not allocated from pool. If a response | ||
429 | * is not requesed the Fib will just be deallocaed by the DPC | ||
430 | * routine when the response comes back from the adapter. No | ||
431 | * further processing will be done besides deleting the Fib. We | ||
432 | * will have a debug mode where the adapter can notify the host | ||
433 | * it had a problem and the host can log that fact. | ||
434 | */ | ||
435 | if (wait && !reply) { | ||
436 | return -EINVAL; | ||
437 | } else if (!wait && reply) { | ||
438 | hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected); | ||
439 | FIB_COUNTER_INCREMENT(aac_config.AsyncSent); | ||
440 | } else if (!wait && !reply) { | ||
441 | hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected); | ||
442 | FIB_COUNTER_INCREMENT(aac_config.NoResponseSent); | ||
443 | } else if (wait && reply) { | ||
444 | hw_fib->header.XferState |= cpu_to_le32(ResponseExpected); | ||
445 | FIB_COUNTER_INCREMENT(aac_config.NormalSent); | ||
446 | } | ||
447 | /* | ||
448 | * Map the fib into 32bits by using the fib number | ||
449 | */ | ||
450 | |||
451 | hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1); | ||
452 | hw_fib->header.SenderData = (u32)(fibptr - dev->fibs); | ||
453 | /* | ||
454 | * Set FIB state to indicate where it came from and if we want a | ||
455 | * response from the adapter. Also load the command from the | ||
456 | * caller. | ||
457 | * | ||
458 | * Map the hw fib pointer as a 32bit value | ||
459 | */ | ||
460 | hw_fib->header.Command = cpu_to_le16(command); | ||
461 | hw_fib->header.XferState |= cpu_to_le32(SentFromHost); | ||
462 | fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/ | ||
463 | /* | ||
464 | * Set the size of the Fib we want to send to the adapter | ||
465 | */ | ||
466 | hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size); | ||
467 | if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) { | ||
468 | return -EMSGSIZE; | ||
469 | } | ||
470 | /* | ||
471 | * Get a queue entry connect the FIB to it and send an notify | ||
472 | * the adapter a command is ready. | ||
473 | */ | ||
474 | if (priority == FsaHigh) { | ||
475 | hw_fib->header.XferState |= cpu_to_le32(HighPriority); | ||
476 | qid = AdapHighCmdQueue; | ||
477 | } else { | ||
478 | hw_fib->header.XferState |= cpu_to_le32(NormalPriority); | ||
479 | qid = AdapNormCmdQueue; | ||
480 | } | ||
481 | q = &dev->queues->queue[qid]; | ||
482 | |||
483 | if(wait) | ||
484 | spin_lock_irqsave(&fibptr->event_lock, flags); | ||
485 | if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0) | ||
486 | return -EWOULDBLOCK; | ||
487 | dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); | ||
488 | dprintk((KERN_DEBUG "Fib contents:.\n")); | ||
489 | dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); | ||
490 | dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); | ||
491 | dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); | ||
492 | dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); | ||
493 | dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); | ||
494 | /* | ||
495 | * Fill in the Callback and CallbackContext if we are not | ||
496 | * going to wait. | ||
497 | */ | ||
498 | if (!wait) { | ||
499 | fibptr->callback = callback; | ||
500 | fibptr->callback_data = callback_data; | ||
501 | } | ||
502 | FIB_COUNTER_INCREMENT(aac_config.FibsSent); | ||
503 | list_add_tail(&fibptr->queue, &q->pendingq); | ||
504 | q->numpending++; | ||
505 | |||
506 | fibptr->done = 0; | ||
507 | fibptr->flags = 0; | ||
508 | |||
509 | if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0) | ||
510 | return -EWOULDBLOCK; | ||
511 | /* | ||
512 | * If the caller wanted us to wait for response wait now. | ||
513 | */ | ||
514 | |||
515 | if (wait) { | ||
516 | spin_unlock_irqrestore(&fibptr->event_lock, flags); | ||
517 | down(&fibptr->event_wait); | ||
518 | if(fibptr->done == 0) | ||
519 | BUG(); | ||
520 | |||
521 | if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ | ||
522 | return -ETIMEDOUT; | ||
523 | } else { | ||
524 | return 0; | ||
525 | } | ||
526 | } | ||
527 | /* | ||
528 | * If the user does not want a response than return success otherwise | ||
529 | * return pending | ||
530 | */ | ||
531 | if (reply) | ||
532 | return -EINPROGRESS; | ||
533 | else | ||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * aac_consumer_get - get the top of the queue | ||
539 | * @dev: Adapter | ||
540 | * @q: Queue | ||
541 | * @entry: Return entry | ||
542 | * | ||
543 | * Will return a pointer to the entry on the top of the queue requested that | ||
544 | * we are a consumer of, and return the address of the queue entry. It does | ||
545 | * not change the state of the queue. | ||
546 | */ | ||
547 | |||
548 | int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) | ||
549 | { | ||
550 | u32 index; | ||
551 | int status; | ||
552 | if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { | ||
553 | status = 0; | ||
554 | } else { | ||
555 | /* | ||
556 | * The consumer index must be wrapped if we have reached | ||
557 | * the end of the queue, else we just use the entry | ||
558 | * pointed to by the header index | ||
559 | */ | ||
560 | if (le32_to_cpu(*q->headers.consumer) >= q->entries) | ||
561 | index = 0; | ||
562 | else | ||
563 | index = le32_to_cpu(*q->headers.consumer); | ||
564 | *entry = q->base + index; | ||
565 | status = 1; | ||
566 | } | ||
567 | return(status); | ||
568 | } | ||
569 | |||
570 | /** | ||
571 | * aac_consumer_free - free consumer entry | ||
572 | * @dev: Adapter | ||
573 | * @q: Queue | ||
574 | * @qid: Queue ident | ||
575 | * | ||
576 | * Frees up the current top of the queue we are a consumer of. If the | ||
577 | * queue was full notify the producer that the queue is no longer full. | ||
578 | */ | ||
579 | |||
580 | void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) | ||
581 | { | ||
582 | int wasfull = 0; | ||
583 | u32 notify; | ||
584 | |||
585 | if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) | ||
586 | wasfull = 1; | ||
587 | |||
588 | if (le32_to_cpu(*q->headers.consumer) >= q->entries) | ||
589 | *q->headers.consumer = cpu_to_le32(1); | ||
590 | else | ||
591 | *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1); | ||
592 | |||
593 | if (wasfull) { | ||
594 | switch (qid) { | ||
595 | |||
596 | case HostNormCmdQueue: | ||
597 | notify = HostNormCmdNotFull; | ||
598 | break; | ||
599 | case HostHighCmdQueue: | ||
600 | notify = HostHighCmdNotFull; | ||
601 | break; | ||
602 | case HostNormRespQueue: | ||
603 | notify = HostNormRespNotFull; | ||
604 | break; | ||
605 | case HostHighRespQueue: | ||
606 | notify = HostHighRespNotFull; | ||
607 | break; | ||
608 | default: | ||
609 | BUG(); | ||
610 | return; | ||
611 | } | ||
612 | aac_adapter_notify(dev, notify); | ||
613 | } | ||
614 | } | ||
615 | |||
616 | /** | ||
617 | * fib_adapter_complete - complete adapter issued fib | ||
618 | * @fibptr: fib to complete | ||
619 | * @size: size of fib | ||
620 | * | ||
621 | * Will do all necessary work to complete a FIB that was sent from | ||
622 | * the adapter. | ||
623 | */ | ||
624 | |||
625 | int fib_adapter_complete(struct fib * fibptr, unsigned short size) | ||
626 | { | ||
627 | struct hw_fib * hw_fib = fibptr->hw_fib; | ||
628 | struct aac_dev * dev = fibptr->dev; | ||
629 | unsigned long nointr = 0; | ||
630 | if (hw_fib->header.XferState == 0) | ||
631 | return 0; | ||
632 | /* | ||
633 | * If we plan to do anything check the structure type first. | ||
634 | */ | ||
635 | if ( hw_fib->header.StructType != FIB_MAGIC ) { | ||
636 | return -EINVAL; | ||
637 | } | ||
638 | /* | ||
639 | * This block handles the case where the adapter had sent us a | ||
640 | * command and we have finished processing the command. We | ||
641 | * call completeFib when we are done processing the command | ||
642 | * and want to send a response back to the adapter. This will | ||
643 | * send the completed cdb to the adapter. | ||
644 | */ | ||
645 | if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { | ||
646 | hw_fib->header.XferState |= cpu_to_le32(HostProcessed); | ||
647 | if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) { | ||
648 | u32 index; | ||
649 | if (size) | ||
650 | { | ||
651 | size += sizeof(struct aac_fibhdr); | ||
652 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | ||
653 | return -EMSGSIZE; | ||
654 | hw_fib->header.Size = cpu_to_le16(size); | ||
655 | } | ||
656 | if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) { | ||
657 | return -EWOULDBLOCK; | ||
658 | } | ||
659 | if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) { | ||
660 | } | ||
661 | } | ||
662 | else if (hw_fib->header.XferState & NormalPriority) | ||
663 | { | ||
664 | u32 index; | ||
665 | |||
666 | if (size) { | ||
667 | size += sizeof(struct aac_fibhdr); | ||
668 | if (size > le16_to_cpu(hw_fib->header.SenderSize)) | ||
669 | return -EMSGSIZE; | ||
670 | hw_fib->header.Size = cpu_to_le16(size); | ||
671 | } | ||
672 | if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) | ||
673 | return -EWOULDBLOCK; | ||
674 | if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) | ||
675 | { | ||
676 | } | ||
677 | } | ||
678 | } | ||
679 | else | ||
680 | { | ||
681 | printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n"); | ||
682 | BUG(); | ||
683 | } | ||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * fib_complete - fib completion handler | ||
689 | * @fib: FIB to complete | ||
690 | * | ||
691 | * Will do all necessary work to complete a FIB. | ||
692 | */ | ||
693 | |||
694 | int fib_complete(struct fib * fibptr) | ||
695 | { | ||
696 | struct hw_fib * hw_fib = fibptr->hw_fib; | ||
697 | |||
698 | /* | ||
699 | * Check for a fib which has already been completed | ||
700 | */ | ||
701 | |||
702 | if (hw_fib->header.XferState == 0) | ||
703 | return 0; | ||
704 | /* | ||
705 | * If we plan to do anything check the structure type first. | ||
706 | */ | ||
707 | |||
708 | if (hw_fib->header.StructType != FIB_MAGIC) | ||
709 | return -EINVAL; | ||
710 | /* | ||
711 | * This block completes a cdb which orginated on the host and we | ||
712 | * just need to deallocate the cdb or reinit it. At this point the | ||
713 | * command is complete that we had sent to the adapter and this | ||
714 | * cdb could be reused. | ||
715 | */ | ||
716 | if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && | ||
717 | (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) | ||
718 | { | ||
719 | fib_dealloc(fibptr); | ||
720 | } | ||
721 | else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost)) | ||
722 | { | ||
723 | /* | ||
724 | * This handles the case when the host has aborted the I/O | ||
725 | * to the adapter because the adapter is not responding | ||
726 | */ | ||
727 | fib_dealloc(fibptr); | ||
728 | } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) { | ||
729 | fib_dealloc(fibptr); | ||
730 | } else { | ||
731 | BUG(); | ||
732 | } | ||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | /** | ||
737 | * aac_printf - handle printf from firmware | ||
738 | * @dev: Adapter | ||
739 | * @val: Message info | ||
740 | * | ||
741 | * Print a message passed to us by the controller firmware on the | ||
742 | * Adaptec board | ||
743 | */ | ||
744 | |||
745 | void aac_printf(struct aac_dev *dev, u32 val) | ||
746 | { | ||
747 | int length = val & 0xffff; | ||
748 | int level = (val >> 16) & 0xffff; | ||
749 | char *cp = dev->printfbuf; | ||
750 | |||
751 | /* | ||
752 | * The size of the printfbuf is set in port.c | ||
753 | * There is no variable or define for it | ||
754 | */ | ||
755 | if (length > 255) | ||
756 | length = 255; | ||
757 | if (cp[length] != 0) | ||
758 | cp[length] = 0; | ||
759 | if (level == LOG_AAC_HIGH_ERROR) | ||
760 | printk(KERN_WARNING "aacraid:%s", cp); | ||
761 | else | ||
762 | printk(KERN_INFO "aacraid:%s", cp); | ||
763 | memset(cp, 0, 256); | ||
764 | } | ||
765 | |||
766 | /** | ||
767 | * aac_command_thread - command processing thread | ||
768 | * @dev: Adapter to monitor | ||
769 | * | ||
770 | * Waits on the commandready event in it's queue. When the event gets set | ||
771 | * it will pull FIBs off it's queue. It will continue to pull FIBs off | ||
772 | * until the queue is empty. When the queue is empty it will wait for | ||
773 | * more FIBs. | ||
774 | */ | ||
775 | |||
776 | int aac_command_thread(struct aac_dev * dev) | ||
777 | { | ||
778 | struct hw_fib *hw_fib, *hw_newfib; | ||
779 | struct fib *fib, *newfib; | ||
780 | struct aac_queue_block *queues = dev->queues; | ||
781 | struct aac_fib_context *fibctx; | ||
782 | unsigned long flags; | ||
783 | DECLARE_WAITQUEUE(wait, current); | ||
784 | |||
785 | /* | ||
786 | * We can only have one thread per adapter for AIF's. | ||
787 | */ | ||
788 | if (dev->aif_thread) | ||
789 | return -EINVAL; | ||
790 | /* | ||
791 | * Set up the name that will appear in 'ps' | ||
792 | * stored in task_struct.comm[16]. | ||
793 | */ | ||
794 | daemonize("aacraid"); | ||
795 | allow_signal(SIGKILL); | ||
796 | /* | ||
797 | * Let the DPC know it has a place to send the AIF's to. | ||
798 | */ | ||
799 | dev->aif_thread = 1; | ||
800 | add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); | ||
801 | set_current_state(TASK_INTERRUPTIBLE); | ||
802 | while(1) | ||
803 | { | ||
804 | spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); | ||
805 | while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) { | ||
806 | struct list_head *entry; | ||
807 | struct aac_aifcmd * aifcmd; | ||
808 | |||
809 | set_current_state(TASK_RUNNING); | ||
810 | |||
811 | entry = queues->queue[HostNormCmdQueue].cmdq.next; | ||
812 | list_del(entry); | ||
813 | |||
814 | spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); | ||
815 | fib = list_entry(entry, struct fib, fiblink); | ||
816 | /* | ||
817 | * We will process the FIB here or pass it to a | ||
818 | * worker thread that is TBD. We Really can't | ||
819 | * do anything at this point since we don't have | ||
820 | * anything defined for this thread to do. | ||
821 | */ | ||
822 | hw_fib = fib->hw_fib; | ||
823 | memset(fib, 0, sizeof(struct fib)); | ||
824 | fib->type = FSAFS_NTC_FIB_CONTEXT; | ||
825 | fib->size = sizeof( struct fib ); | ||
826 | fib->hw_fib = hw_fib; | ||
827 | fib->data = hw_fib->data; | ||
828 | fib->dev = dev; | ||
829 | /* | ||
830 | * We only handle AifRequest fibs from the adapter. | ||
831 | */ | ||
832 | aifcmd = (struct aac_aifcmd *) hw_fib->data; | ||
833 | if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { | ||
834 | /* Handle Driver Notify Events */ | ||
835 | *(u32 *)hw_fib->data = cpu_to_le32(ST_OK); | ||
836 | fib_adapter_complete(fib, sizeof(u32)); | ||
837 | } else { | ||
838 | struct list_head *entry; | ||
839 | /* The u32 here is important and intended. We are using | ||
840 | 32bit wrapping time to fit the adapter field */ | ||
841 | |||
842 | u32 time_now, time_last; | ||
843 | unsigned long flagv; | ||
844 | |||
845 | time_now = jiffies/HZ; | ||
846 | |||
847 | spin_lock_irqsave(&dev->fib_lock, flagv); | ||
848 | entry = dev->fib_list.next; | ||
849 | /* | ||
850 | * For each Context that is on the | ||
851 | * fibctxList, make a copy of the | ||
852 | * fib, and then set the event to wake up the | ||
853 | * thread that is waiting for it. | ||
854 | */ | ||
855 | while (entry != &dev->fib_list) { | ||
856 | /* | ||
857 | * Extract the fibctx | ||
858 | */ | ||
859 | fibctx = list_entry(entry, struct aac_fib_context, next); | ||
860 | /* | ||
861 | * Check if the queue is getting | ||
862 | * backlogged | ||
863 | */ | ||
864 | if (fibctx->count > 20) | ||
865 | { | ||
866 | /* | ||
867 | * It's *not* jiffies folks, | ||
868 | * but jiffies / HZ so do not | ||
869 | * panic ... | ||
870 | */ | ||
871 | time_last = fibctx->jiffies; | ||
872 | /* | ||
873 | * Has it been > 2 minutes | ||
874 | * since the last read off | ||
875 | * the queue? | ||
876 | */ | ||
877 | if ((time_now - time_last) > 120) { | ||
878 | entry = entry->next; | ||
879 | aac_close_fib_context(dev, fibctx); | ||
880 | continue; | ||
881 | } | ||
882 | } | ||
883 | /* | ||
884 | * Warning: no sleep allowed while | ||
885 | * holding spinlock | ||
886 | */ | ||
887 | hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); | ||
888 | newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); | ||
889 | if (newfib && hw_newfib) { | ||
890 | /* | ||
891 | * Make the copy of the FIB | ||
892 | */ | ||
893 | memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib)); | ||
894 | memcpy(newfib, fib, sizeof(struct fib)); | ||
895 | newfib->hw_fib = hw_newfib; | ||
896 | /* | ||
897 | * Put the FIB onto the | ||
898 | * fibctx's fibs | ||
899 | */ | ||
900 | list_add_tail(&newfib->fiblink, &fibctx->fib_list); | ||
901 | fibctx->count++; | ||
902 | /* | ||
903 | * Set the event to wake up the | ||
904 | * thread that will waiting. | ||
905 | */ | ||
906 | up(&fibctx->wait_sem); | ||
907 | } else { | ||
908 | printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); | ||
909 | if(newfib) | ||
910 | kfree(newfib); | ||
911 | if(hw_newfib) | ||
912 | kfree(hw_newfib); | ||
913 | } | ||
914 | entry = entry->next; | ||
915 | } | ||
916 | /* | ||
917 | * Set the status of this FIB | ||
918 | */ | ||
919 | *(u32 *)hw_fib->data = cpu_to_le32(ST_OK); | ||
920 | fib_adapter_complete(fib, sizeof(u32)); | ||
921 | spin_unlock_irqrestore(&dev->fib_lock, flagv); | ||
922 | } | ||
923 | spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); | ||
924 | kfree(fib); | ||
925 | } | ||
926 | /* | ||
927 | * There are no more AIF's | ||
928 | */ | ||
929 | spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); | ||
930 | schedule(); | ||
931 | |||
932 | if(signal_pending(current)) | ||
933 | break; | ||
934 | set_current_state(TASK_INTERRUPTIBLE); | ||
935 | } | ||
936 | remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); | ||
937 | dev->aif_thread = 0; | ||
938 | complete_and_exit(&dev->aif_completion, 0); | ||
939 | } | ||