aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/i2o.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/i2o.h')
-rw-r--r--include/linux/i2o.h497
1 files changed, 310 insertions, 187 deletions
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index ea9a3ad4b67f..bdc286ec947c 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -119,12 +119,21 @@ struct i2o_driver {
119}; 119};
120 120
121/* 121/*
122 * Contains all information which are necessary for DMA operations 122 * Contains DMA mapped address information
123 */ 123 */
124struct i2o_dma { 124struct i2o_dma {
125 void *virt; 125 void *virt;
126 dma_addr_t phys; 126 dma_addr_t phys;
127 u32 len; 127 size_t len;
128};
129
130/*
131 * Contains IO mapped address information
132 */
133struct i2o_io {
134 void __iomem *virt;
135 unsigned long phys;
136 unsigned long len;
128}; 137};
129 138
130/* 139/*
@@ -147,28 +156,25 @@ struct i2o_controller {
147 156
148 struct pci_dev *pdev; /* PCI device */ 157 struct pci_dev *pdev; /* PCI device */
149 158
150 unsigned int short_req:1; /* use small block sizes */ 159 unsigned int promise:1; /* Promise controller */
160 unsigned int adaptec:1; /* DPT / Adaptec controller */
161 unsigned int raptor:1; /* split bar */
151 unsigned int no_quiesce:1; /* dont quiesce before reset */ 162 unsigned int no_quiesce:1; /* dont quiesce before reset */
152 unsigned int raptor:1; /* split bar */ 163 unsigned int short_req:1; /* use small block sizes */
153 unsigned int promise:1; /* Promise controller */ 164 unsigned int limit_sectors:1; /* limit number of sectors / request */
154 165 unsigned int pae_support:1; /* controller has 64-bit SGL support */
155#ifdef CONFIG_MTRR
156 int mtrr_reg0;
157 int mtrr_reg1;
158#endif
159 166
160 struct list_head devices; /* list of I2O devices */ 167 struct list_head devices; /* list of I2O devices */
161
162 struct notifier_block *event_notifer; /* Events */
163 atomic_t users;
164 struct list_head list; /* Controller list */ 168 struct list_head list; /* Controller list */
165 void __iomem *post_port; /* Inbout port address */ 169
166 void __iomem *reply_port; /* Outbound port address */ 170 void __iomem *in_port; /* Inbout port address */
167 void __iomem *irq_mask; /* Interrupt register address */ 171 void __iomem *out_port; /* Outbound port address */
172 void __iomem *irq_status; /* Interrupt status register address */
173 void __iomem *irq_mask; /* Interrupt mask register address */
168 174
169 /* Dynamic LCT related data */ 175 /* Dynamic LCT related data */
170 176
171 struct i2o_dma status; /* status of IOP */ 177 struct i2o_dma status; /* IOP status block */
172 178
173 struct i2o_dma hrt; /* HW Resource Table */ 179 struct i2o_dma hrt; /* HW Resource Table */
174 i2o_lct *lct; /* Logical Config Table */ 180 i2o_lct *lct; /* Logical Config Table */
@@ -176,21 +182,19 @@ struct i2o_controller {
176 struct semaphore lct_lock; /* Lock for LCT updates */ 182 struct semaphore lct_lock; /* Lock for LCT updates */
177 struct i2o_dma status_block; /* IOP status block */ 183 struct i2o_dma status_block; /* IOP status block */
178 184
179 struct i2o_dma base; /* controller messaging unit */ 185 struct i2o_io base; /* controller messaging unit */
180 struct i2o_dma in_queue; /* inbound message queue Host->IOP */ 186 struct i2o_io in_queue; /* inbound message queue Host->IOP */
181 struct i2o_dma out_queue; /* outbound message queue IOP->Host */ 187 struct i2o_dma out_queue; /* outbound message queue IOP->Host */
182 188
183 unsigned int battery:1; /* Has a battery backup */ 189 unsigned int battery:1; /* Has a battery backup */
184 unsigned int io_alloc:1; /* An I/O resource was allocated */ 190 unsigned int io_alloc:1; /* An I/O resource was allocated */
185 unsigned int mem_alloc:1; /* A memory resource was allocated */ 191 unsigned int mem_alloc:1; /* A memory resource was allocated */
186 192
187 struct resource io_resource; /* I/O resource allocated to the IOP */ 193 struct resource io_resource; /* I/O resource allocated to the IOP */
188 struct resource mem_resource; /* Mem resource allocated to the IOP */ 194 struct resource mem_resource; /* Mem resource allocated to the IOP */
189 195
190 struct proc_dir_entry *proc_entry; /* /proc dir */
191
192 struct list_head bus_list; /* list of busses on IOP */
193 struct device device; 196 struct device device;
197 struct class_device classdev; /* I2O controller class */
194 struct i2o_device *exec; /* Executive */ 198 struct i2o_device *exec; /* Executive */
195#if BITS_PER_LONG == 64 199#if BITS_PER_LONG == 64
196 spinlock_t context_list_lock; /* lock for context_list */ 200 spinlock_t context_list_lock; /* lock for context_list */
@@ -241,9 +245,10 @@ struct i2o_sys_tbl {
241extern struct list_head i2o_controllers; 245extern struct list_head i2o_controllers;
242 246
243/* Message functions */ 247/* Message functions */
244static inline u32 i2o_msg_get(struct i2o_controller *, struct i2o_message __iomem **); 248static inline u32 i2o_msg_get(struct i2o_controller *,
245extern u32 i2o_msg_get_wait(struct i2o_controller *, struct i2o_message __iomem **, 249 struct i2o_message __iomem **);
246 int); 250extern u32 i2o_msg_get_wait(struct i2o_controller *,
251 struct i2o_message __iomem **, int);
247static inline void i2o_msg_post(struct i2o_controller *, u32); 252static inline void i2o_msg_post(struct i2o_controller *, u32);
248static inline int i2o_msg_post_wait(struct i2o_controller *, u32, 253static inline int i2o_msg_post_wait(struct i2o_controller *, u32,
249 unsigned long); 254 unsigned long);
@@ -252,15 +257,6 @@ extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
252extern void i2o_msg_nop(struct i2o_controller *, u32); 257extern void i2o_msg_nop(struct i2o_controller *, u32);
253static inline void i2o_flush_reply(struct i2o_controller *, u32); 258static inline void i2o_flush_reply(struct i2o_controller *, u32);
254 259
255/* DMA handling functions */
256static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t,
257 unsigned int);
258static inline void i2o_dma_free(struct device *, struct i2o_dma *);
259int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int);
260
261static inline int i2o_dma_map(struct device *, struct i2o_dma *);
262static inline void i2o_dma_unmap(struct device *, struct i2o_dma *);
263
264/* IOP functions */ 260/* IOP functions */
265extern int i2o_status_get(struct i2o_controller *); 261extern int i2o_status_get(struct i2o_controller *);
266 262
@@ -285,6 +281,16 @@ static inline u32 i2o_ptr_high(void *ptr)
285{ 281{
286 return (u32) ((u64) ptr >> 32); 282 return (u32) ((u64) ptr >> 32);
287}; 283};
284
285static inline u32 i2o_dma_low(dma_addr_t dma_addr)
286{
287 return (u32) (u64) dma_addr;
288};
289
290static inline u32 i2o_dma_high(dma_addr_t dma_addr)
291{
292 return (u32) ((u64) dma_addr >> 32);
293};
288#else 294#else
289static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) 295static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
290{ 296{
@@ -315,8 +321,246 @@ static inline u32 i2o_ptr_high(void *ptr)
315{ 321{
316 return 0; 322 return 0;
317}; 323};
324
325static inline u32 i2o_dma_low(dma_addr_t dma_addr)
326{
327 return (u32) dma_addr;
328};
329
330static inline u32 i2o_dma_high(dma_addr_t dma_addr)
331{
332 return 0;
333};
318#endif 334#endif
319 335
336/**
337 * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
338 * @c: I2O controller for which the calculation should be done
339 * @body_size: maximum body size used for message in 32-bit words.
340 *
341 * Return the maximum number of SG elements in a SG list.
342 */
343static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
344{
345 i2o_status_block *sb = c->status_block.virt;
346 u16 sg_count =
347 (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
348 body_size;
349
350 if (c->pae_support) {
351 /*
352 * for 64-bit a SG attribute element must be added and each
353 * SG element needs 12 bytes instead of 8.
354 */
355 sg_count -= 2;
356 sg_count /= 3;
357 } else
358 sg_count /= 2;
359
360 if (c->short_req && (sg_count > 8))
361 sg_count = 8;
362
363 return sg_count;
364};
365
366/**
367 * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
368 * @c: I2O controller
369 * @ptr: pointer to the data which should be mapped
370 * @size: size of data in bytes
371 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
372 * @sg_ptr: pointer to the SG list inside the I2O message
373 *
374 * This function does all necessary DMA handling and also writes the I2O
375 * SGL elements into the I2O message. For details on DMA handling see also
376 * dma_map_single(). The pointer sg_ptr will only be set to the end of the
377 * SG list if the allocation was successful.
378 *
379 * Returns DMA address which must be checked for failures using
380 * dma_mapping_error().
381 */
382static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
383 size_t size,
384 enum dma_data_direction direction,
385 u32 __iomem ** sg_ptr)
386{
387 u32 sg_flags;
388 u32 __iomem *mptr = *sg_ptr;
389 dma_addr_t dma_addr;
390
391 switch (direction) {
392 case DMA_TO_DEVICE:
393 sg_flags = 0xd4000000;
394 break;
395 case DMA_FROM_DEVICE:
396 sg_flags = 0xd0000000;
397 break;
398 default:
399 return 0;
400 }
401
402 dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
403 if (!dma_mapping_error(dma_addr)) {
404#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
405 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
406 writel(0x7C020002, mptr++);
407 writel(PAGE_SIZE, mptr++);
408 }
409#endif
410
411 writel(sg_flags | size, mptr++);
412 writel(i2o_dma_low(dma_addr), mptr++);
413#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
414 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
415 writel(i2o_dma_high(dma_addr), mptr++);
416#endif
417 *sg_ptr = mptr;
418 }
419 return dma_addr;
420};
421
422/**
423 * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
424 * @c: I2O controller
425 * @sg: SG list to be mapped
426 * @sg_count: number of elements in the SG list
427 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
428 * @sg_ptr: pointer to the SG list inside the I2O message
429 *
430 * This function does all necessary DMA handling and also writes the I2O
431 * SGL elements into the I2O message. For details on DMA handling see also
432 * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
433 * list if the allocation was successful.
434 *
435 * Returns 0 on failure or 1 on success.
436 */
437static inline int i2o_dma_map_sg(struct i2o_controller *c,
438 struct scatterlist *sg, int sg_count,
439 enum dma_data_direction direction,
440 u32 __iomem ** sg_ptr)
441{
442 u32 sg_flags;
443 u32 __iomem *mptr = *sg_ptr;
444
445 switch (direction) {
446 case DMA_TO_DEVICE:
447 sg_flags = 0x14000000;
448 break;
449 case DMA_FROM_DEVICE:
450 sg_flags = 0x10000000;
451 break;
452 default:
453 return 0;
454 }
455
456 sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
457 if (!sg_count)
458 return 0;
459
460#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
461 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
462 writel(0x7C020002, mptr++);
463 writel(PAGE_SIZE, mptr++);
464 }
465#endif
466
467 while (sg_count-- > 0) {
468 if (!sg_count)
469 sg_flags |= 0xC0000000;
470 writel(sg_flags | sg_dma_len(sg), mptr++);
471 writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
472#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
473 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
474 writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
475#endif
476 sg++;
477 }
478 *sg_ptr = mptr;
479
480 return 1;
481};
482
483/**
484 * i2o_dma_alloc - Allocate DMA memory
485 * @dev: struct device pointer to the PCI device of the I2O controller
486 * @addr: i2o_dma struct which should get the DMA buffer
487 * @len: length of the new DMA memory
488 * @gfp_mask: GFP mask
489 *
490 * Allocate a coherent DMA memory and write the pointers into addr.
491 *
492 * Returns 0 on success or -ENOMEM on failure.
493 */
494static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
495 size_t len, unsigned int gfp_mask)
496{
497 struct pci_dev *pdev = to_pci_dev(dev);
498 int dma_64 = 0;
499
500 if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
501 dma_64 = 1;
502 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
503 return -ENOMEM;
504 }
505
506 addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
507
508 if ((sizeof(dma_addr_t) > 4) && dma_64)
509 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
510 printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
511
512 if (!addr->virt)
513 return -ENOMEM;
514
515 memset(addr->virt, 0, len);
516 addr->len = len;
517
518 return 0;
519};
520
521/**
522 * i2o_dma_free - Free DMA memory
523 * @dev: struct device pointer to the PCI device of the I2O controller
524 * @addr: i2o_dma struct which contains the DMA buffer
525 *
526 * Free a coherent DMA memory and set virtual address of addr to NULL.
527 */
528static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
529{
530 if (addr->virt) {
531 if (addr->phys)
532 dma_free_coherent(dev, addr->len, addr->virt,
533 addr->phys);
534 else
535 kfree(addr->virt);
536 addr->virt = NULL;
537 }
538};
539
540/**
541 * i2o_dma_realloc - Realloc DMA memory
542 * @dev: struct device pointer to the PCI device of the I2O controller
543 * @addr: pointer to a i2o_dma struct DMA buffer
544 * @len: new length of memory
545 * @gfp_mask: GFP mask
546 *
547 * If there was something allocated in the addr, free it first. If len > 0
548 * than try to allocate it and write the addresses back to the addr
549 * structure. If len == 0 set the virtual address to NULL.
550 *
551 * Returns the 0 on success or negative error code on failure.
552 */
553static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
554 size_t len, unsigned int gfp_mask)
555{
556 i2o_dma_free(dev, addr);
557
558 if (len)
559 return i2o_dma_alloc(dev, addr, len, gfp_mask);
560
561 return 0;
562};
563
320/* I2O driver (OSM) functions */ 564/* I2O driver (OSM) functions */
321extern int i2o_driver_register(struct i2o_driver *); 565extern int i2o_driver_register(struct i2o_driver *);
322extern void i2o_driver_unregister(struct i2o_driver *); 566extern void i2o_driver_unregister(struct i2o_driver *);
@@ -385,49 +629,11 @@ extern int i2o_device_claim_release(struct i2o_device *);
385/* Exec OSM functions */ 629/* Exec OSM functions */
386extern int i2o_exec_lct_get(struct i2o_controller *); 630extern int i2o_exec_lct_get(struct i2o_controller *);
387 631
388/* device to i2o_device and driver to i2o_driver convertion functions */ 632/* device / driver / kobject conversion functions */
389#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) 633#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
390#define to_i2o_device(dev) container_of(dev, struct i2o_device, device) 634#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
391 635#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
392/* 636#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
393 * Messenger inlines
394 */
395static inline u32 I2O_POST_READ32(struct i2o_controller *c)
396{
397 rmb();
398 return readl(c->post_port);
399};
400
401static inline void I2O_POST_WRITE32(struct i2o_controller *c, u32 val)
402{
403 wmb();
404 writel(val, c->post_port);
405};
406
407static inline u32 I2O_REPLY_READ32(struct i2o_controller *c)
408{
409 rmb();
410 return readl(c->reply_port);
411};
412
413static inline void I2O_REPLY_WRITE32(struct i2o_controller *c, u32 val)
414{
415 wmb();
416 writel(val, c->reply_port);
417};
418
419static inline u32 I2O_IRQ_READ32(struct i2o_controller *c)
420{
421 rmb();
422 return readl(c->irq_mask);
423};
424
425static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val)
426{
427 wmb();
428 writel(val, c->irq_mask);
429 wmb();
430};
431 637
432/** 638/**
433 * i2o_msg_get - obtain an I2O message from the IOP 639 * i2o_msg_get - obtain an I2O message from the IOP
@@ -443,11 +649,11 @@ static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val)
443 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. 649 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
444 */ 650 */
445static inline u32 i2o_msg_get(struct i2o_controller *c, 651static inline u32 i2o_msg_get(struct i2o_controller *c,
446 struct i2o_message __iomem **msg) 652 struct i2o_message __iomem ** msg)
447{ 653{
448 u32 m; 654 u32 m = readl(c->in_port);
449 655
450 if ((m = I2O_POST_READ32(c)) != I2O_QUEUE_EMPTY) 656 if (m != I2O_QUEUE_EMPTY)
451 *msg = c->in_queue.virt + m; 657 *msg = c->in_queue.virt + m;
452 658
453 return m; 659 return m;
@@ -462,7 +668,7 @@ static inline u32 i2o_msg_get(struct i2o_controller *c,
462 */ 668 */
463static inline void i2o_msg_post(struct i2o_controller *c, u32 m) 669static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
464{ 670{
465 I2O_POST_WRITE32(c, m); 671 writel(m, c->in_port);
466}; 672};
467 673
468/** 674/**
@@ -491,12 +697,10 @@ static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m,
491 * The I2O controller must be informed that the reply message is not needed 697 * The I2O controller must be informed that the reply message is not needed
492 * anymore. If you forget to flush the reply, the message frame can't be 698 * anymore. If you forget to flush the reply, the message frame can't be
493 * used by the controller anymore and is therefore lost. 699 * used by the controller anymore and is therefore lost.
494 *
495 * FIXME: is there a timeout after which the controller reuse the message?
496 */ 700 */
497static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) 701static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
498{ 702{
499 I2O_REPLY_WRITE32(c, m); 703 writel(m, c->out_port);
500}; 704};
501 705
502/** 706/**
@@ -530,97 +734,13 @@ static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
530 * work for receive side messages as they are kmalloc objects 734 * work for receive side messages as they are kmalloc objects
531 * in a different pool. 735 * in a different pool.
532 */ 736 */
533static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct i2o_controller *c, 737static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
534 u32 m) 738 i2o_controller *c,
739 u32 m)
535{ 740{
536 return c->in_queue.virt + m; 741 return c->in_queue.virt + m;
537}; 742};
538 743
539/**
540 * i2o_dma_alloc - Allocate DMA memory
541 * @dev: struct device pointer to the PCI device of the I2O controller
542 * @addr: i2o_dma struct which should get the DMA buffer
543 * @len: length of the new DMA memory
544 * @gfp_mask: GFP mask
545 *
546 * Allocate a coherent DMA memory and write the pointers into addr.
547 *
548 * Returns 0 on success or -ENOMEM on failure.
549 */
550static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
551 size_t len, unsigned int gfp_mask)
552{
553 addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
554 if (!addr->virt)
555 return -ENOMEM;
556
557 memset(addr->virt, 0, len);
558 addr->len = len;
559
560 return 0;
561};
562
563/**
564 * i2o_dma_free - Free DMA memory
565 * @dev: struct device pointer to the PCI device of the I2O controller
566 * @addr: i2o_dma struct which contains the DMA buffer
567 *
568 * Free a coherent DMA memory and set virtual address of addr to NULL.
569 */
570static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
571{
572 if (addr->virt) {
573 if (addr->phys)
574 dma_free_coherent(dev, addr->len, addr->virt,
575 addr->phys);
576 else
577 kfree(addr->virt);
578 addr->virt = NULL;
579 }
580};
581
582/**
583 * i2o_dma_map - Map the memory to DMA
584 * @dev: struct device pointer to the PCI device of the I2O controller
585 * @addr: i2o_dma struct which should be mapped
586 *
587 * Map the memory in addr->virt to coherent DMA memory and write the
588 * physical address into addr->phys.
589 *
590 * Returns 0 on success or -ENOMEM on failure.
591 */
592static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr)
593{
594 if (!addr->virt)
595 return -EFAULT;
596
597 if (!addr->phys)
598 addr->phys = dma_map_single(dev, addr->virt, addr->len,
599 DMA_BIDIRECTIONAL);
600 if (!addr->phys)
601 return -ENOMEM;
602
603 return 0;
604};
605
606/**
607 * i2o_dma_unmap - Unmap the DMA memory
608 * @dev: struct device pointer to the PCI device of the I2O controller
609 * @addr: i2o_dma struct which should be unmapped
610 *
611 * Unmap the memory in addr->virt from DMA memory.
612 */
613static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr)
614{
615 if (!addr->virt)
616 return;
617
618 if (addr->phys) {
619 dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL);
620 addr->phys = 0;
621 }
622};
623
624/* 744/*
625 * Endian handling wrapped into the macro - keeps the core code 745 * Endian handling wrapped into the macro - keeps the core code
626 * cleaner. 746 * cleaner.
@@ -773,6 +893,14 @@ extern void i2o_debug_state(struct i2o_controller *c);
773#define I2O_CMD_SCSI_BUSRESET 0x27 893#define I2O_CMD_SCSI_BUSRESET 0x27
774 894
775/* 895/*
896 * Bus Adapter Class
897 */
898#define I2O_CMD_BUS_ADAPTER_RESET 0x85
899#define I2O_CMD_BUS_RESET 0x87
900#define I2O_CMD_BUS_SCAN 0x89
901#define I2O_CMD_BUS_QUIESCE 0x8b
902
903/*
776 * Random Block Storage Class 904 * Random Block Storage Class
777 */ 905 */
778#define I2O_CMD_BLOCK_READ 0x30 906#define I2O_CMD_BLOCK_READ 0x30
@@ -784,7 +912,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
784#define I2O_CMD_BLOCK_MEJECT 0x43 912#define I2O_CMD_BLOCK_MEJECT 0x43
785#define I2O_CMD_BLOCK_POWER 0x70 913#define I2O_CMD_BLOCK_POWER 0x70
786 914
787#define I2O_PRIVATE_MSG 0xFF 915#define I2O_CMD_PRIVATE 0xFF
788 916
789/* Command status values */ 917/* Command status values */
790 918
@@ -922,7 +1050,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
922#define I2OVER15 0x0001 1050#define I2OVER15 0x0001
923#define I2OVER20 0x0002 1051#define I2OVER20 0x0002
924 1052
925/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */ 1053/* Default is 1.5 */
926#define I2OVERSION I2OVER15 1054#define I2OVERSION I2OVER15
927 1055
928#define SGL_OFFSET_0 I2OVERSION 1056#define SGL_OFFSET_0 I2OVERSION
@@ -933,9 +1061,9 @@ extern void i2o_debug_state(struct i2o_controller *c);
933#define SGL_OFFSET_8 (0x0080 | I2OVERSION) 1061#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
934#define SGL_OFFSET_9 (0x0090 | I2OVERSION) 1062#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
935#define SGL_OFFSET_10 (0x00A0 | I2OVERSION) 1063#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
936 1064#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
937#define TRL_OFFSET_5 (0x0050 | I2OVERSION) 1065#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
938#define TRL_OFFSET_6 (0x0060 | I2OVERSION) 1066#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
939 1067
940/* Transaction Reply Lists (TRL) Control Word structure */ 1068/* Transaction Reply Lists (TRL) Control Word structure */
941#define TRL_SINGLE_FIXED_LENGTH 0x00 1069#define TRL_SINGLE_FIXED_LENGTH 0x00
@@ -962,17 +1090,13 @@ extern void i2o_debug_state(struct i2o_controller *c);
962#define ELEVEN_WORD_MSG_SIZE 0x000B0000 1090#define ELEVEN_WORD_MSG_SIZE 0x000B0000
963#define I2O_MESSAGE_SIZE(x) ((x)<<16) 1091#define I2O_MESSAGE_SIZE(x) ((x)<<16)
964 1092
965/* Special TID Assignments */ 1093/* special TID assignments */
966
967#define ADAPTER_TID 0 1094#define ADAPTER_TID 0
968#define HOST_TID 1 1095#define HOST_TID 1
969 1096
970#define MSG_FRAME_SIZE 128 /* i2o_scsi assumes >= 32 */ 1097/* outbound queue defines */
971#define REPLY_FRAME_SIZE 17 1098#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
972#define SG_TABLESIZE 30 1099#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
973#define NMBR_MSG_FRAMES 128
974
975#define MSG_POOL_SIZE (MSG_FRAME_SIZE*NMBR_MSG_FRAMES*sizeof(u32))
976 1100
977#define I2O_POST_WAIT_OK 0 1101#define I2O_POST_WAIT_OK 0
978#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT 1102#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
@@ -993,11 +1117,10 @@ extern void i2o_debug_state(struct i2o_controller *c);
993#define I2O_HRT_GET_TRIES 3 1117#define I2O_HRT_GET_TRIES 3
994#define I2O_LCT_GET_TRIES 3 1118#define I2O_LCT_GET_TRIES 3
995 1119
996/* request queue sizes */ 1120/* defines for max_sectors and max_phys_segments */
997#define I2O_MAX_SECTORS 1024 1121#define I2O_MAX_SECTORS 1024
998#define I2O_MAX_SEGMENTS 128 1122#define I2O_MAX_SECTORS_LIMITED 256
999 1123#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
1000#define I2O_REQ_MEMPOOL_SIZE 32
1001 1124
1002#endif /* __KERNEL__ */ 1125#endif /* __KERNEL__ */
1003#endif /* _I2O_H */ 1126#endif /* _I2O_H */