aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message/i2o/iop.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/message/i2o/iop.c')
-rw-r--r--drivers/message/i2o/iop.c1327
1 files changed, 1327 insertions, 0 deletions
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
new file mode 100644
index 000000000000..50c8cedf7a2d
--- /dev/null
+++ b/drivers/message/i2o/iop.c
@@ -0,0 +1,1327 @@
1/*
2 * Functions to handle I2O controllers and I2O message handling
3 *
4 * Copyright (C) 1999-2002 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * A lot of the I2O message side code from this is taken from the
14 * Red Creek RCPCI45 adapter driver by Red Creek Communications
15 *
16 * Fixes/additions:
17 * Philipp Rumpf
18 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 * Alan Cox <alan@redhat.com>:
23 * Ported to Linux 2.5.
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Minor fixes for 2.6.
26 */
27
28#include <linux/module.h>
29#include <linux/i2o.h>
30#include <linux/delay.h>
31
32#define OSM_VERSION "$Rev$"
33#define OSM_DESCRIPTION "I2O subsystem"
34
35/* global I2O controller list */
36LIST_HEAD(i2o_controllers);
37
38/*
39 * global I2O System Table. Contains information about all the IOPs in the
40 * system. Used to inform IOPs about each others existence.
41 */
42static struct i2o_dma i2o_systab;
43
44static int i2o_hrt_get(struct i2o_controller *c);
45
46/* Module internal functions from other sources */
47extern struct i2o_driver i2o_exec_driver;
48extern int i2o_exec_lct_get(struct i2o_controller *);
49extern void i2o_device_remove(struct i2o_device *);
50
51extern int __init i2o_driver_init(void);
52extern void __exit i2o_driver_exit(void);
53extern int __init i2o_exec_init(void);
54extern void __exit i2o_exec_exit(void);
55extern int __init i2o_pci_init(void);
56extern void __exit i2o_pci_exit(void);
57extern int i2o_device_init(void);
58extern void i2o_device_exit(void);
59
60/**
61 * i2o_msg_nop - Returns a message which is not used
62 * @c: I2O controller from which the message was created
63 * @m: message which should be returned
64 *
65 * If you fetch a message via i2o_msg_get, and can't use it, you must
66 * return the message with this function. Otherwise the message frame
67 * is lost.
68 */
69void i2o_msg_nop(struct i2o_controller *c, u32 m)
70{
71 struct i2o_message __iomem *msg = c->in_queue.virt + m;
72
73 writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
74 writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
75 &msg->u.head[1]);
76 writel(0, &msg->u.head[2]);
77 writel(0, &msg->u.head[3]);
78 i2o_msg_post(c, m);
79};
80
81/**
82 * i2o_msg_get_wait - obtain an I2O message from the IOP
83 * @c: I2O controller
84 * @msg: pointer to a I2O message pointer
85 * @wait: how long to wait until timeout
86 *
87 * This function waits up to wait seconds for a message slot to be
88 * available.
89 *
90 * On a success the message is returned and the pointer to the message is
91 * set in msg. The returned message is the physical page frame offset
92 * address from the read port (see the i2o spec). If no message is
93 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
94 */
95u32 i2o_msg_get_wait(struct i2o_controller *c, struct i2o_message __iomem **msg,
96 int wait)
97{
98 unsigned long timeout = jiffies + wait * HZ;
99 u32 m;
100
101 while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) {
102 if (time_after(jiffies, timeout)) {
103 pr_debug("%s: Timeout waiting for message frame.\n",
104 c->name);
105 return I2O_QUEUE_EMPTY;
106 }
107 set_current_state(TASK_UNINTERRUPTIBLE);
108 schedule_timeout(1);
109 }
110
111 return m;
112};
113
114#if BITS_PER_LONG == 64
115/**
116 * i2o_cntxt_list_add - Append a pointer to context list and return a id
117 * @c: controller to which the context list belong
118 * @ptr: pointer to add to the context list
119 *
120 * Because the context field in I2O is only 32-bit large, on 64-bit the
121 * pointer is to large to fit in the context field. The i2o_cntxt_list
122 * functions therefore map pointers to context fields.
123 *
124 * Returns context id > 0 on success or 0 on failure.
125 */
126u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
127{
128 struct i2o_context_list_element *entry;
129 unsigned long flags;
130
131 if (!ptr)
132 printk(KERN_ERR "%s: couldn't add NULL pointer to context list!"
133 "\n", c->name);
134
135 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
136 if (!entry) {
137 printk(KERN_ERR "%s: Could not allocate memory for context "
138 "list element\n", c->name);
139 return 0;
140 }
141
142 entry->ptr = ptr;
143 entry->timestamp = jiffies;
144 INIT_LIST_HEAD(&entry->list);
145
146 spin_lock_irqsave(&c->context_list_lock, flags);
147
148 if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
149 atomic_inc(&c->context_list_counter);
150
151 entry->context = atomic_read(&c->context_list_counter);
152
153 list_add(&entry->list, &c->context_list);
154
155 spin_unlock_irqrestore(&c->context_list_lock, flags);
156
157 pr_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context);
158
159 return entry->context;
160};
161
162/**
163 * i2o_cntxt_list_remove - Remove a pointer from the context list
164 * @c: controller to which the context list belong
165 * @ptr: pointer which should be removed from the context list
166 *
167 * Removes a previously added pointer from the context list and returns
168 * the matching context id.
169 *
170 * Returns context id on succes or 0 on failure.
171 */
172u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr)
173{
174 struct i2o_context_list_element *entry;
175 u32 context = 0;
176 unsigned long flags;
177
178 spin_lock_irqsave(&c->context_list_lock, flags);
179 list_for_each_entry(entry, &c->context_list, list)
180 if (entry->ptr == ptr) {
181 list_del(&entry->list);
182 context = entry->context;
183 kfree(entry);
184 break;
185 }
186 spin_unlock_irqrestore(&c->context_list_lock, flags);
187
188 if (!context)
189 printk(KERN_WARNING "%s: Could not remove nonexistent ptr "
190 "%p\n", c->name, ptr);
191
192 pr_debug("%s: remove ptr from context list %d -> %p\n", c->name,
193 context, ptr);
194
195 return context;
196};
197
198/**
199 * i2o_cntxt_list_get - Get a pointer from the context list and remove it
200 * @c: controller to which the context list belong
201 * @context: context id to which the pointer belong
202 *
203 * Returns pointer to the matching context id on success or NULL on
204 * failure.
205 */
206void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context)
207{
208 struct i2o_context_list_element *entry;
209 unsigned long flags;
210 void *ptr = NULL;
211
212 spin_lock_irqsave(&c->context_list_lock, flags);
213 list_for_each_entry(entry, &c->context_list, list)
214 if (entry->context == context) {
215 list_del(&entry->list);
216 ptr = entry->ptr;
217 kfree(entry);
218 break;
219 }
220 spin_unlock_irqrestore(&c->context_list_lock, flags);
221
222 if (!ptr)
223 printk(KERN_WARNING "%s: context id %d not found\n", c->name,
224 context);
225
226 pr_debug("%s: get ptr from context list %d -> %p\n", c->name, context,
227 ptr);
228
229 return ptr;
230};
231
232/**
233 * i2o_cntxt_list_get_ptr - Get a context id from the context list
234 * @c: controller to which the context list belong
235 * @ptr: pointer to which the context id should be fetched
236 *
237 * Returns context id which matches to the pointer on succes or 0 on
238 * failure.
239 */
240u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr)
241{
242 struct i2o_context_list_element *entry;
243 u32 context = 0;
244 unsigned long flags;
245
246 spin_lock_irqsave(&c->context_list_lock, flags);
247 list_for_each_entry(entry, &c->context_list, list)
248 if (entry->ptr == ptr) {
249 context = entry->context;
250 break;
251 }
252 spin_unlock_irqrestore(&c->context_list_lock, flags);
253
254 if (!context)
255 printk(KERN_WARNING "%s: Could not find nonexistent ptr "
256 "%p\n", c->name, ptr);
257
258 pr_debug("%s: get context id from context list %p -> %d\n", c->name,
259 ptr, context);
260
261 return context;
262};
263#endif
264
265/**
266 * i2o_iop_find - Find an I2O controller by id
267 * @unit: unit number of the I2O controller to search for
268 *
269 * Lookup the I2O controller on the controller list.
270 *
271 * Returns pointer to the I2O controller on success or NULL if not found.
272 */
273struct i2o_controller *i2o_find_iop(int unit)
274{
275 struct i2o_controller *c;
276
277 list_for_each_entry(c, &i2o_controllers, list) {
278 if (c->unit == unit)
279 return c;
280 }
281
282 return NULL;
283};
284
285/**
286 * i2o_iop_find_device - Find a I2O device on an I2O controller
287 * @c: I2O controller where the I2O device hangs on
288 * @tid: TID of the I2O device to search for
289 *
290 * Searches the devices of the I2O controller for a device with TID tid and
291 * returns it.
292 *
293 * Returns a pointer to the I2O device if found, otherwise NULL.
294 */
295struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
296{
297 struct i2o_device *dev;
298
299 list_for_each_entry(dev, &c->devices, list)
300 if (dev->lct_data.tid == tid)
301 return dev;
302
303 return NULL;
304};
305
306/**
307 * i2o_quiesce_controller - quiesce controller
308 * @c: controller
309 *
310 * Quiesce an IOP. Causes IOP to make external operation quiescent
311 * (i2o 'READY' state). Internal operation of the IOP continues normally.
312 *
313 * Returns 0 on success or negative error code on failure.
314 */
315static int i2o_iop_quiesce(struct i2o_controller *c)
316{
317 struct i2o_message __iomem *msg;
318 u32 m;
319 i2o_status_block *sb = c->status_block.virt;
320 int rc;
321
322 i2o_status_get(c);
323
324 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
325 if ((sb->iop_state != ADAPTER_STATE_READY) &&
326 (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
327 return 0;
328
329 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
330 if (m == I2O_QUEUE_EMPTY)
331 return -ETIMEDOUT;
332
333 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
334 writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID,
335 &msg->u.head[1]);
336
337 /* Long timeout needed for quiesce if lots of devices */
338 if ((rc = i2o_msg_post_wait(c, m, 240)))
339 printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
340 c->name, -rc);
341 else
342 pr_debug("%s: Quiesced.\n", c->name);
343
344 i2o_status_get(c); // Entered READY state
345
346 return rc;
347};
348
349/**
350 * i2o_iop_enable - move controller from ready to OPERATIONAL
351 * @c: I2O controller
352 *
353 * Enable IOP. This allows the IOP to resume external operations and
354 * reverses the effect of a quiesce. Returns zero or an error code if
355 * an error occurs.
356 */
357static int i2o_iop_enable(struct i2o_controller *c)
358{
359 struct i2o_message __iomem *msg;
360 u32 m;
361 i2o_status_block *sb = c->status_block.virt;
362 int rc;
363
364 i2o_status_get(c);
365
366 /* Enable only allowed on READY state */
367 if (sb->iop_state != ADAPTER_STATE_READY)
368 return -EINVAL;
369
370 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
371 if (m == I2O_QUEUE_EMPTY)
372 return -ETIMEDOUT;
373
374 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
375 writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID,
376 &msg->u.head[1]);
377
378 /* How long of a timeout do we need? */
379 if ((rc = i2o_msg_post_wait(c, m, 240)))
380 printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
381 c->name, -rc);
382 else
383 pr_debug("%s: Enabled.\n", c->name);
384
385 i2o_status_get(c); // entered OPERATIONAL state
386
387 return rc;
388};
389
390/**
391 * i2o_iop_quiesce_all - Quiesce all I2O controllers on the system
392 *
393 * Quiesce all I2O controllers which are connected to the system.
394 */
395static inline void i2o_iop_quiesce_all(void)
396{
397 struct i2o_controller *c, *tmp;
398
399 list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
400 if (!c->no_quiesce)
401 i2o_iop_quiesce(c);
402 }
403};
404
405/**
406 * i2o_iop_enable_all - Enables all controllers on the system
407 *
408 * Enables all I2O controllers which are connected to the system.
409 */
410static inline void i2o_iop_enable_all(void)
411{
412 struct i2o_controller *c, *tmp;
413
414 list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
415 i2o_iop_enable(c);
416};
417
418/**
419 * i2o_clear_controller - Bring I2O controller into HOLD state
420 * @c: controller
421 *
422 * Clear an IOP to HOLD state, ie. terminate external operations, clear all
423 * input queues and prepare for a system restart. IOP's internal operation
424 * continues normally and the outbound queue is alive. The IOP is not
425 * expected to rebuild its LCT.
426 *
427 * Returns 0 on success or negative error code on failure.
428 */
429static int i2o_iop_clear(struct i2o_controller *c)
430{
431 struct i2o_message __iomem *msg;
432 u32 m;
433 int rc;
434
435 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
436 if (m == I2O_QUEUE_EMPTY)
437 return -ETIMEDOUT;
438
439 /* Quiesce all IOPs first */
440 i2o_iop_quiesce_all();
441
442 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
443 writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID,
444 &msg->u.head[1]);
445
446 if ((rc = i2o_msg_post_wait(c, m, 30)))
447 printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
448 c->name, -rc);
449 else
450 pr_debug("%s: Cleared.\n", c->name);
451
452 /* Enable all IOPs */
453 i2o_iop_enable_all();
454
455 i2o_status_get(c);
456
457 return rc;
458}
459
460/**
461 * i2o_iop_reset - reset an I2O controller
462 * @c: controller to reset
463 *
464 * Reset the IOP into INIT state and wait until IOP gets into RESET state.
465 * Terminate all external operations, clear IOP's inbound and outbound
466 * queues, terminate all DDMs, and reload the IOP's operating environment
467 * and all local DDMs. The IOP rebuilds its LCT.
468 */
469static int i2o_iop_reset(struct i2o_controller *c)
470{
471 u8 *status = c->status.virt;
472 struct i2o_message __iomem *msg;
473 u32 m;
474 unsigned long timeout;
475 i2o_status_block *sb = c->status_block.virt;
476 int rc = 0;
477
478 pr_debug("%s: Resetting controller\n", c->name);
479
480 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
481 if (m == I2O_QUEUE_EMPTY)
482 return -ETIMEDOUT;
483
484 memset(status, 0, 8);
485
486 /* Quiesce all IOPs first */
487 i2o_iop_quiesce_all();
488
489 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
490 writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID,
491 &msg->u.head[1]);
492 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
493 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context
494 writel(0, &msg->body[0]);
495 writel(0, &msg->body[1]);
496 writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]);
497 writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]);
498
499 i2o_msg_post(c, m);
500
501 /* Wait for a reply */
502 timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
503 while (!*status) {
504 if (time_after(jiffies, timeout)) {
505 printk(KERN_ERR "%s: IOP reset timeout.\n", c->name);
506 rc = -ETIMEDOUT;
507 goto exit;
508 }
509
510 /* Promise bug */
511 if (status[1] || status[4]) {
512 *status = 0;
513 break;
514 }
515
516 set_current_state(TASK_UNINTERRUPTIBLE);
517 schedule_timeout(1);
518
519 rmb();
520 }
521
522 if (*status == I2O_CMD_IN_PROGRESS) {
523 /*
524 * Once the reset is sent, the IOP goes into the INIT state
525 * which is indeterminate. We need to wait until the IOP
526 * has rebooted before we can let the system talk to
527 * it. We read the inbound Free_List until a message is
528 * available. If we can't read one in the given ammount of
529 * time, we assume the IOP could not reboot properly.
530 */
531 pr_debug("%s: Reset in progress, waiting for reboot...\n",
532 c->name);
533
534 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
535 while (m == I2O_QUEUE_EMPTY) {
536 if (time_after(jiffies, timeout)) {
537 printk(KERN_ERR "%s: IOP reset timeout.\n",
538 c->name);
539 rc = -ETIMEDOUT;
540 goto exit;
541 }
542 set_current_state(TASK_UNINTERRUPTIBLE);
543 schedule_timeout(1);
544
545 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
546 }
547 i2o_msg_nop(c, m);
548 }
549
550 /* from here all quiesce commands are safe */
551 c->no_quiesce = 0;
552
553 /* If IopReset was rejected or didn't perform reset, try IopClear */
554 i2o_status_get(c);
555 if (*status == I2O_CMD_REJECTED || sb->iop_state != ADAPTER_STATE_RESET) {
556 printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",
557 c->name);
558 i2o_iop_clear(c);
559 } else
560 pr_debug("%s: Reset completed.\n", c->name);
561
562 exit:
563 /* Enable all IOPs */
564 i2o_iop_enable_all();
565
566 return rc;
567};
568
569/**
570 * i2o_iop_init_outbound_queue - setup the outbound message queue
571 * @c: I2O controller
572 *
573 * Clear and (re)initialize IOP's outbound queue and post the message
574 * frames to the IOP.
575 *
576 * Returns 0 on success or a negative errno code on failure.
577 */
578static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
579{
580 u8 *status = c->status.virt;
581 u32 m;
582 struct i2o_message __iomem *msg;
583 ulong timeout;
584 int i;
585
586 pr_debug("%s: Initializing Outbound Queue...\n", c->name);
587
588 memset(status, 0, 4);
589
590 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
591 if (m == I2O_QUEUE_EMPTY)
592 return -ETIMEDOUT;
593
594 writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]);
595 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
596 &msg->u.head[1]);
597 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
598 writel(0x0106, &msg->u.s.tcntxt); /* FIXME: why 0x0106, maybe in
599 Spec? */
600 writel(PAGE_SIZE, &msg->body[0]);
601 writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); /* Outbound msg frame
602 size in words and Initcode */
603 writel(0xd0000004, &msg->body[2]);
604 writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]);
605 writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]);
606
607 i2o_msg_post(c, m);
608
609 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
610 while (*status <= I2O_CMD_IN_PROGRESS) {
611 if (time_after(jiffies, timeout)) {
612 printk(KERN_WARNING "%s: Timeout Initializing\n",
613 c->name);
614 return -ETIMEDOUT;
615 }
616 set_current_state(TASK_UNINTERRUPTIBLE);
617 schedule_timeout(1);
618
619 rmb();
620 }
621
622 m = c->out_queue.phys;
623
624 /* Post frames */
625 for (i = 0; i < NMBR_MSG_FRAMES; i++) {
626 i2o_flush_reply(c, m);
627 udelay(1); /* Promise */
628 m += MSG_FRAME_SIZE * 4;
629 }
630
631 return 0;
632}
633
634/**
635 * i2o_iop_send_nop - send a core NOP message
636 * @c: controller
637 *
638 * Send a no-operation message with a reply set to cause no
639 * action either. Needed for bringing up promise controllers.
640 */
641static int i2o_iop_send_nop(struct i2o_controller *c)
642{
643 struct i2o_message __iomem *msg;
644 u32 m = i2o_msg_get_wait(c, &msg, HZ);
645 if (m == I2O_QUEUE_EMPTY)
646 return -ETIMEDOUT;
647 i2o_msg_nop(c, m);
648 return 0;
649}
650
651/**
652 * i2o_iop_activate - Bring controller up to HOLD
653 * @c: controller
654 *
655 * This function brings an I2O controller into HOLD state. The adapter
656 * is reset if necessary and then the queues and resource table are read.
657 *
658 * Returns 0 on success or negative error code on failure.
659 */
660static int i2o_iop_activate(struct i2o_controller *c)
661{
662 struct pci_dev *i960 = NULL;
663 i2o_status_block *sb = c->status_block.virt;
664 int rc;
665
666 if (c->promise) {
667 /* Beat up the hardware first of all */
668 i960 =
669 pci_find_slot(c->pdev->bus->number,
670 PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
671 if (i960)
672 pci_write_config_word(i960, 0x42, 0);
673
674 /* Follow this sequence precisely or the controller
675 ceases to perform useful functions until reboot */
676 if ((rc = i2o_iop_send_nop(c)))
677 return rc;
678
679 if ((rc = i2o_iop_reset(c)))
680 return rc;
681 }
682
683 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
684 /* In READY state, Get status */
685
686 rc = i2o_status_get(c);
687 if (rc) {
688 printk(KERN_INFO "%s: Unable to obtain status, "
689 "attempting a reset.\n", c->name);
690 if (i2o_iop_reset(c))
691 return rc;
692 }
693
694 if (sb->i2o_version > I2OVER15) {
695 printk(KERN_ERR "%s: Not running version 1.5 of the I2O "
696 "Specification.\n", c->name);
697 return -ENODEV;
698 }
699
700 switch (sb->iop_state) {
701 case ADAPTER_STATE_FAULTED:
702 printk(KERN_CRIT "%s: hardware fault\n", c->name);
703 return -ENODEV;
704
705 case ADAPTER_STATE_READY:
706 case ADAPTER_STATE_OPERATIONAL:
707 case ADAPTER_STATE_HOLD:
708 case ADAPTER_STATE_FAILED:
709 pr_debug("%s: already running, trying to reset...\n", c->name);
710 if (i2o_iop_reset(c))
711 return -ENODEV;
712 }
713
714 rc = i2o_iop_init_outbound_queue(c);
715 if (rc)
716 return rc;
717
718 if (c->promise) {
719 if ((rc = i2o_iop_send_nop(c)))
720 return rc;
721
722 if ((rc = i2o_status_get(c)))
723 return rc;
724
725 if (i960)
726 pci_write_config_word(i960, 0x42, 0x3FF);
727 }
728
729 /* In HOLD state */
730
731 rc = i2o_hrt_get(c);
732
733 return rc;
734};
735
736/**
737 * i2o_iop_systab_set - Set the I2O System Table of the specified IOP
738 * @c: I2O controller to which the system table should be send
739 *
740 * Before the systab could be set i2o_systab_build() must be called.
741 *
742 * Returns 0 on success or negative error code on failure.
743 */
744static int i2o_iop_systab_set(struct i2o_controller *c)
745{
746 struct i2o_message __iomem *msg;
747 u32 m;
748 i2o_status_block *sb = c->status_block.virt;
749 struct device *dev = &c->pdev->dev;
750 struct resource *root;
751 int rc;
752
753 if (sb->current_mem_size < sb->desired_mem_size) {
754 struct resource *res = &c->mem_resource;
755 res->name = c->pdev->bus->name;
756 res->flags = IORESOURCE_MEM;
757 res->start = 0;
758 res->end = 0;
759 printk(KERN_INFO "%s: requires private memory resources.\n",
760 c->name);
761 root = pci_find_parent_resource(c->pdev, res);
762 if (root == NULL)
763 printk(KERN_WARNING "%s: Can't find parent resource!\n",
764 c->name);
765 if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
766 NULL, NULL) >= 0) {
767 c->mem_alloc = 1;
768 sb->current_mem_size = 1 + res->end - res->start;
769 sb->current_mem_base = res->start;
770 printk(KERN_INFO "%s: allocated %ld bytes of PCI memory"
771 " at 0x%08lX.\n", c->name,
772 1 + res->end - res->start, res->start);
773 }
774 }
775
776 if (sb->current_io_size < sb->desired_io_size) {
777 struct resource *res = &c->io_resource;
778 res->name = c->pdev->bus->name;
779 res->flags = IORESOURCE_IO;
780 res->start = 0;
781 res->end = 0;
782 printk(KERN_INFO "%s: requires private memory resources.\n",
783 c->name);
784 root = pci_find_parent_resource(c->pdev, res);
785 if (root == NULL)
786 printk(KERN_WARNING "%s: Can't find parent resource!\n",
787 c->name);
788 if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
789 NULL, NULL) >= 0) {
790 c->io_alloc = 1;
791 sb->current_io_size = 1 + res->end - res->start;
792 sb->current_mem_base = res->start;
793 printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at"
794 " 0x%08lX.\n", c->name,
795 1 + res->end - res->start, res->start);
796 }
797 }
798
799 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
800 if (m == I2O_QUEUE_EMPTY)
801 return -ETIMEDOUT;
802
803 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
804 PCI_DMA_TODEVICE);
805 if (!i2o_systab.phys) {
806 i2o_msg_nop(c, m);
807 return -ENOMEM;
808 }
809
810 writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]);
811 writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID,
812 &msg->u.head[1]);
813
814 /*
815 * Provide three SGL-elements:
816 * System table (SysTab), Private memory space declaration and
817 * Private i/o space declaration
818 *
819 * FIXME: is this still true?
820 * Nasty one here. We can't use dma_alloc_coherent to send the
821 * same table to everyone. We have to go remap it for them all
822 */
823
824 writel(c->unit + 2, &msg->body[0]);
825 writel(0, &msg->body[1]);
826 writel(0x54000000 | i2o_systab.len, &msg->body[2]);
827 writel(i2o_systab.phys, &msg->body[3]);
828 writel(0x54000000 | sb->current_mem_size, &msg->body[4]);
829 writel(sb->current_mem_base, &msg->body[5]);
830 writel(0xd4000000 | sb->current_io_size, &msg->body[6]);
831 writel(sb->current_io_base, &msg->body[6]);
832
833 rc = i2o_msg_post_wait(c, m, 120);
834
835 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
836 PCI_DMA_TODEVICE);
837
838 if (rc < 0)
839 printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n",
840 c->name, -rc);
841 else
842 pr_debug("%s: SysTab set.\n", c->name);
843
844 i2o_status_get(c); // Entered READY state
845
846 return rc;
847}
848
849/**
850 * i2o_iop_online - Bring a controller online into OPERATIONAL state.
851 * @c: I2O controller
852 *
853 * Send the system table and enable the I2O controller.
854 *
855 * Returns 0 on success or negativer error code on failure.
856 */
857static int i2o_iop_online(struct i2o_controller *c)
858{
859 int rc;
860
861 rc = i2o_iop_systab_set(c);
862 if (rc)
863 return rc;
864
865 /* In READY state */
866 pr_debug("%s: Attempting to enable...\n", c->name);
867 rc = i2o_iop_enable(c);
868 if (rc)
869 return rc;
870
871 return 0;
872};
873
874/**
875 * i2o_iop_remove - Remove the I2O controller from the I2O core
876 * @c: I2O controller
877 *
878 * Remove the I2O controller from the I2O core. If devices are attached to
879 * the controller remove these also and finally reset the controller.
880 */
881void i2o_iop_remove(struct i2o_controller *c)
882{
883 struct i2o_device *dev, *tmp;
884
885 pr_debug("%s: deleting controller\n", c->name);
886
887 i2o_driver_notify_controller_remove_all(c);
888
889 list_del(&c->list);
890
891 list_for_each_entry_safe(dev, tmp, &c->devices, list)
892 i2o_device_remove(dev);
893
894 /* Ask the IOP to switch to RESET state */
895 i2o_iop_reset(c);
896}
897
898/**
899 * i2o_systab_build - Build system table
900 *
901 * The system table contains information about all the IOPs in the system
902 * (duh) and is used by the Executives on the IOPs to establish peer2peer
903 * connections. We're not supporting peer2peer at the moment, but this
904 * will be needed down the road for things like lan2lan forwarding.
905 *
906 * Returns 0 on success or negative error code on failure.
907 */
908static int i2o_systab_build(void)
909{
910 struct i2o_controller *c, *tmp;
911 int num_controllers = 0;
912 u32 change_ind = 0;
913 int count = 0;
914 struct i2o_sys_tbl *systab = i2o_systab.virt;
915
916 list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
917 num_controllers++;
918
919 if (systab) {
920 change_ind = systab->change_ind;
921 kfree(i2o_systab.virt);
922 }
923
924 /* Header + IOPs */
925 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
926 sizeof(struct i2o_sys_tbl_entry);
927
928 systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL);
929 if (!systab) {
930 printk(KERN_ERR "i2o: unable to allocate memory for System "
931 "Table\n");
932 return -ENOMEM;
933 }
934 memset(systab, 0, i2o_systab.len);
935
936 systab->version = I2OVERSION;
937 systab->change_ind = change_ind + 1;
938
939 list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
940 i2o_status_block *sb;
941
942 if (count >= num_controllers) {
943 printk(KERN_ERR "i2o: controller added while building "
944 "system table\n");
945 break;
946 }
947
948 sb = c->status_block.virt;
949
950 /*
951 * Get updated IOP state so we have the latest information
952 *
953 * We should delete the controller at this point if it
954 * doesn't respond since if it's not on the system table
955 * it is techninically not part of the I2O subsystem...
956 */
957 if (unlikely(i2o_status_get(c))) {
958 printk(KERN_ERR "%s: Deleting b/c could not get status"
959 " while attempting to build system table\n",
960 c->name);
961 i2o_iop_remove(c);
962 continue; // try the next one
963 }
964
965 systab->iops[count].org_id = sb->org_id;
966 systab->iops[count].iop_id = c->unit + 2;
967 systab->iops[count].seg_num = 0;
968 systab->iops[count].i2o_version = sb->i2o_version;
969 systab->iops[count].iop_state = sb->iop_state;
970 systab->iops[count].msg_type = sb->msg_type;
971 systab->iops[count].frame_size = sb->inbound_frame_size;
972 systab->iops[count].last_changed = change_ind;
973 systab->iops[count].iop_capabilities = sb->iop_capabilities;
974 systab->iops[count].inbound_low = i2o_ptr_low(c->post_port);
975 systab->iops[count].inbound_high = i2o_ptr_high(c->post_port);
976
977 count++;
978 }
979
980 systab->num_entries = count;
981
982 return 0;
983};
984
985/**
986 * i2o_parse_hrt - Parse the hardware resource table.
987 * @c: I2O controller
988 *
989 * We don't do anything with it except dumping it (in debug mode).
990 *
991 * Returns 0.
992 */
993static int i2o_parse_hrt(struct i2o_controller *c)
994{
995 i2o_dump_hrt(c);
996 return 0;
997};
998
999/**
1000 * i2o_status_get - Get the status block from the I2O controller
1001 * @c: I2O controller
1002 *
1003 * Issue a status query on the controller. This updates the attached
1004 * status block. The status block could then be accessed through
1005 * c->status_block.
1006 *
1007 * Returns 0 on sucess or negative error code on failure.
1008 */
1009int i2o_status_get(struct i2o_controller *c)
1010{
1011 struct i2o_message __iomem *msg;
1012 u32 m;
1013 u8 *status_block;
1014 unsigned long timeout;
1015
1016 status_block = (u8 *) c->status_block.virt;
1017 memset(status_block, 0, sizeof(i2o_status_block));
1018
1019 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
1020 if (m == I2O_QUEUE_EMPTY)
1021 return -ETIMEDOUT;
1022
1023 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
1024 writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
1025 &msg->u.head[1]);
1026 writel(i2o_exec_driver.context, &msg->u.s.icntxt);
1027 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context
1028 writel(0, &msg->body[0]);
1029 writel(0, &msg->body[1]);
1030 writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]);
1031 writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]);
1032 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */
1033
1034 i2o_msg_post(c, m);
1035
1036 /* Wait for a reply */
1037 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
1038 while (status_block[87] != 0xFF) {
1039 if (time_after(jiffies, timeout)) {
1040 printk(KERN_ERR "%s: Get status timeout.\n", c->name);
1041 return -ETIMEDOUT;
1042 }
1043
1044 set_current_state(TASK_UNINTERRUPTIBLE);
1045 schedule_timeout(1);
1046
1047 rmb();
1048 }
1049
1050#ifdef DEBUG
1051 i2o_debug_state(c);
1052#endif
1053
1054 return 0;
1055}
1056
1057/*
1058 * i2o_hrt_get - Get the Hardware Resource Table from the I2O controller
1059 * @c: I2O controller from which the HRT should be fetched
1060 *
1061 * The HRT contains information about possible hidden devices but is
1062 * mostly useless to us.
1063 *
1064 * Returns 0 on success or negativer error code on failure.
1065 */
1066static int i2o_hrt_get(struct i2o_controller *c)
1067{
1068 int rc;
1069 int i;
1070 i2o_hrt *hrt = c->hrt.virt;
1071 u32 size = sizeof(i2o_hrt);
1072 struct device *dev = &c->pdev->dev;
1073
1074 for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
1075 struct i2o_message __iomem *msg;
1076 u32 m;
1077
1078 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
1079 if (m == I2O_QUEUE_EMPTY)
1080 return -ETIMEDOUT;
1081
1082 writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]);
1083 writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
1084 &msg->u.head[1]);
1085 writel(0xd0000000 | c->hrt.len, &msg->body[0]);
1086 writel(c->hrt.phys, &msg->body[1]);
1087
1088 rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt);
1089
1090 if (rc < 0) {
1091 printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
1092 c->name, -rc);
1093 return rc;
1094 }
1095
1096 size = hrt->num_entries * hrt->entry_len << 2;
1097 if (size > c->hrt.len) {
1098 if (i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL))
1099 return -ENOMEM;
1100 else
1101 hrt = c->hrt.virt;
1102 } else
1103 return i2o_parse_hrt(c);
1104 }
1105
1106 printk(KERN_ERR "%s: Unable to get HRT after %d tries, giving up\n",
1107 c->name, I2O_HRT_GET_TRIES);
1108
1109 return -EBUSY;
1110}
1111
1112/**
1113 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
1114 *
1115 * Allocate the necessary memory for a i2o_controller struct and
1116 * initialize the lists.
1117 *
1118 * Returns a pointer to the I2O controller or a negative error code on
1119 * failure.
1120 */
1121struct i2o_controller *i2o_iop_alloc(void)
1122{
1123 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
1124 struct i2o_controller *c;
1125
1126 c = kmalloc(sizeof(*c), GFP_KERNEL);
1127 if (!c) {
1128 printk(KERN_ERR "i2o: Insufficient memory to allocate a I2O "
1129 "controller.\n");
1130 return ERR_PTR(-ENOMEM);
1131 }
1132 memset(c, 0, sizeof(*c));
1133
1134 INIT_LIST_HEAD(&c->devices);
1135 spin_lock_init(&c->lock);
1136 init_MUTEX(&c->lct_lock);
1137 c->unit = unit++;
1138 sprintf(c->name, "iop%d", c->unit);
1139
1140#if BITS_PER_LONG == 64
1141 spin_lock_init(&c->context_list_lock);
1142 atomic_set(&c->context_list_counter, 0);
1143 INIT_LIST_HEAD(&c->context_list);
1144#endif
1145
1146 return c;
1147};
1148
1149/**
1150 * i2o_iop_free - Free the i2o_controller struct
1151 * @c: I2O controller to free
1152 */
1153void i2o_iop_free(struct i2o_controller *c)
1154{
1155 kfree(c);
1156};
1157
1158/**
1159 * i2o_iop_add - Initialize the I2O controller and add him to the I2O core
1160 * @c: controller
1161 *
1162 * Initialize the I2O controller and if no error occurs add him to the I2O
1163 * core.
1164 *
1165 * Returns 0 on success or negative error code on failure.
1166 */
1167int i2o_iop_add(struct i2o_controller *c)
1168{
1169 int rc;
1170
1171 printk(KERN_INFO "%s: Activating I2O controller...\n", c->name);
1172 printk(KERN_INFO "%s: This may take a few minutes if there are many "
1173 "devices\n", c->name);
1174
1175 if ((rc = i2o_iop_activate(c))) {
1176 printk(KERN_ERR "%s: could not activate controller\n",
1177 c->name);
1178 i2o_iop_reset(c);
1179 return rc;
1180 }
1181
1182 pr_debug("%s: building sys table...\n", c->name);
1183
1184 if ((rc = i2o_systab_build())) {
1185 i2o_iop_reset(c);
1186 return rc;
1187 }
1188
1189 pr_debug("%s: online controller...\n", c->name);
1190
1191 if ((rc = i2o_iop_online(c))) {
1192 i2o_iop_reset(c);
1193 return rc;
1194 }
1195
1196 pr_debug("%s: getting LCT...\n", c->name);
1197
1198 if ((rc = i2o_exec_lct_get(c))) {
1199 i2o_iop_reset(c);
1200 return rc;
1201 }
1202
1203 list_add(&c->list, &i2o_controllers);
1204
1205 i2o_driver_notify_controller_add_all(c);
1206
1207 printk(KERN_INFO "%s: Controller added\n", c->name);
1208
1209 return 0;
1210};
1211
1212/**
1213 * i2o_event_register - Turn on/off event notification for a I2O device
1214 * @dev: I2O device which should receive the event registration request
1215 * @drv: driver which want to get notified
1216 * @tcntxt: transaction context to use with this notifier
1217 * @evt_mask: mask of events
1218 *
1219 * Create and posts an event registration message to the task. No reply
1220 * is waited for, or expected. If you do not want further notifications,
1221 * call the i2o_event_register again with a evt_mask of 0.
1222 *
1223 * Returns 0 on success or -ETIMEDOUT if no message could be fetched for
1224 * sending the request.
1225 */
1226int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
1227 int tcntxt, u32 evt_mask)
1228{
1229 struct i2o_controller *c = dev->iop;
1230 struct i2o_message __iomem *msg;
1231 u32 m;
1232
1233 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
1234 if (m == I2O_QUEUE_EMPTY)
1235 return -ETIMEDOUT;
1236
1237 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
1238 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data.
1239 tid, &msg->u.head[1]);
1240 writel(drv->context, &msg->u.s.icntxt);
1241 writel(tcntxt, &msg->u.s.tcntxt);
1242 writel(evt_mask, &msg->body[0]);
1243
1244 i2o_msg_post(c, m);
1245
1246 return 0;
1247};
1248
1249/**
1250 * i2o_iop_init - I2O main initialization function
1251 *
1252 * Initialize the I2O drivers (OSM) functions, register the Executive OSM,
1253 * initialize the I2O PCI part and finally initialize I2O device stuff.
1254 *
1255 * Returns 0 on success or negative error code on failure.
1256 */
1257static int __init i2o_iop_init(void)
1258{
1259 int rc = 0;
1260
1261 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1262
1263 rc = i2o_device_init();
1264 if (rc)
1265 goto exit;
1266
1267 rc = i2o_driver_init();
1268 if (rc)
1269 goto device_exit;
1270
1271 rc = i2o_exec_init();
1272 if (rc)
1273 goto driver_exit;
1274
1275 rc = i2o_pci_init();
1276 if (rc < 0)
1277 goto exec_exit;
1278
1279 return 0;
1280
1281 exec_exit:
1282 i2o_exec_exit();
1283
1284 driver_exit:
1285 i2o_driver_exit();
1286
1287 device_exit:
1288 i2o_device_exit();
1289
1290 exit:
1291 return rc;
1292}
1293
1294/**
1295 * i2o_iop_exit - I2O main exit function
1296 *
1297 * Removes I2O controllers from PCI subsystem and shut down OSMs.
1298 */
1299static void __exit i2o_iop_exit(void)
1300{
1301 i2o_pci_exit();
1302 i2o_exec_exit();
1303 i2o_driver_exit();
1304 i2o_device_exit();
1305};
1306
1307module_init(i2o_iop_init);
1308module_exit(i2o_iop_exit);
1309
1310MODULE_AUTHOR("Red Hat Software");
1311MODULE_LICENSE("GPL");
1312MODULE_DESCRIPTION(OSM_DESCRIPTION);
1313MODULE_VERSION(OSM_VERSION);
1314
1315#if BITS_PER_LONG == 64
1316EXPORT_SYMBOL(i2o_cntxt_list_add);
1317EXPORT_SYMBOL(i2o_cntxt_list_get);
1318EXPORT_SYMBOL(i2o_cntxt_list_remove);
1319EXPORT_SYMBOL(i2o_cntxt_list_get_ptr);
1320#endif
1321EXPORT_SYMBOL(i2o_msg_get_wait);
1322EXPORT_SYMBOL(i2o_msg_nop);
1323EXPORT_SYMBOL(i2o_find_iop);
1324EXPORT_SYMBOL(i2o_iop_find_device);
1325EXPORT_SYMBOL(i2o_event_register);
1326EXPORT_SYMBOL(i2o_status_get);
1327EXPORT_SYMBOL(i2o_controllers);