aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/i2o.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/i2o.h')
-rw-r--r--include/linux/i2o.h321
1 files changed, 265 insertions, 56 deletions
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index e8cd11290010..497ea574f96b 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -157,7 +157,8 @@ struct i2o_controller {
157 157
158 void __iomem *in_port; /* Inbout port address */ 158 void __iomem *in_port; /* Inbout port address */
159 void __iomem *out_port; /* Outbound port address */ 159 void __iomem *out_port; /* Outbound port address */
160 void __iomem *irq_mask; /* Interrupt register address */ 160 void __iomem *irq_status; /* Interrupt status register address */
161 void __iomem *irq_mask; /* Interrupt mask register address */
161 162
162 /* Dynamic LCT related data */ 163 /* Dynamic LCT related data */
163 164
@@ -242,15 +243,6 @@ extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
242extern void i2o_msg_nop(struct i2o_controller *, u32); 243extern void i2o_msg_nop(struct i2o_controller *, u32);
243static inline void i2o_flush_reply(struct i2o_controller *, u32); 244static inline void i2o_flush_reply(struct i2o_controller *, u32);
244 245
245/* DMA handling functions */
246static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t,
247 unsigned int);
248static inline void i2o_dma_free(struct device *, struct i2o_dma *);
249int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int);
250
251static inline int i2o_dma_map(struct device *, struct i2o_dma *);
252static inline void i2o_dma_unmap(struct device *, struct i2o_dma *);
253
254/* IOP functions */ 246/* IOP functions */
255extern int i2o_status_get(struct i2o_controller *); 247extern int i2o_status_get(struct i2o_controller *);
256 248
@@ -275,6 +267,16 @@ static inline u32 i2o_ptr_high(void *ptr)
275{ 267{
276 return (u32) ((u64) ptr >> 32); 268 return (u32) ((u64) ptr >> 32);
277}; 269};
270
271static inline u32 i2o_dma_low(dma_addr_t dma_addr)
272{
273 return (u32) (u64) dma_addr;
274};
275
276static inline u32 i2o_dma_high(dma_addr_t dma_addr)
277{
278 return (u32) ((u64) dma_addr >> 32);
279};
278#else 280#else
279static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) 281static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
280{ 282{
@@ -305,8 +307,246 @@ static inline u32 i2o_ptr_high(void *ptr)
305{ 307{
306 return 0; 308 return 0;
307}; 309};
310
311static inline u32 i2o_dma_low(dma_addr_t dma_addr)
312{
313 return (u32) dma_addr;
314};
315
316static inline u32 i2o_dma_high(dma_addr_t dma_addr)
317{
318 return 0;
319};
320#endif
321
322/**
323 * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
324 * @c: I2O controller for which the calculation should be done
325 * @body_size: maximum body size used for message in 32-bit words.
326 *
327 * Return the maximum number of SG elements in a SG list.
328 */
329static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
330{
331 i2o_status_block *sb = c->status_block.virt;
332 u16 sg_count =
333 (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
334 body_size;
335
336 if (c->pae_support) {
337 /*
338 * for 64-bit a SG attribute element must be added and each
339 * SG element needs 12 bytes instead of 8.
340 */
341 sg_count -= 2;
342 sg_count /= 3;
343 } else
344 sg_count /= 2;
345
346 if (c->short_req && (sg_count > 8))
347 sg_count = 8;
348
349 return sg_count;
350};
351
352/**
353 * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
354 * @c: I2O controller
355 * @ptr: pointer to the data which should be mapped
356 * @size: size of data in bytes
357 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
358 * @sg_ptr: pointer to the SG list inside the I2O message
359 *
360 * This function does all necessary DMA handling and also writes the I2O
361 * SGL elements into the I2O message. For details on DMA handling see also
362 * dma_map_single(). The pointer sg_ptr will only be set to the end of the
363 * SG list if the allocation was successful.
364 *
365 * Returns DMA address which must be checked for failures using
366 * dma_mapping_error().
367 */
368static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
369 size_t size,
370 enum dma_data_direction direction,
371 u32 __iomem ** sg_ptr)
372{
373 u32 sg_flags;
374 u32 __iomem *mptr = *sg_ptr;
375 dma_addr_t dma_addr;
376
377 switch (direction) {
378 case DMA_TO_DEVICE:
379 sg_flags = 0xd4000000;
380 break;
381 case DMA_FROM_DEVICE:
382 sg_flags = 0xd0000000;
383 break;
384 default:
385 return 0;
386 }
387
388 dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
389 if (!dma_mapping_error(dma_addr)) {
390#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
391 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
392 writel(0x7C020002, mptr++);
393 writel(PAGE_SIZE, mptr++);
394 }
395#endif
396
397 writel(sg_flags | size, mptr++);
398 writel(i2o_dma_low(dma_addr), mptr++);
399#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
400 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
401 writel(i2o_dma_high(dma_addr), mptr++);
402#endif
403 *sg_ptr = mptr;
404 }
405 return dma_addr;
406};
407
408/**
409 * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
410 * @c: I2O controller
411 * @sg: SG list to be mapped
412 * @sg_count: number of elements in the SG list
413 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
414 * @sg_ptr: pointer to the SG list inside the I2O message
415 *
416 * This function does all necessary DMA handling and also writes the I2O
417 * SGL elements into the I2O message. For details on DMA handling see also
418 * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
419 * list if the allocation was successful.
420 *
421 * Returns 0 on failure or 1 on success.
422 */
423static inline int i2o_dma_map_sg(struct i2o_controller *c,
424 struct scatterlist *sg, int sg_count,
425 enum dma_data_direction direction,
426 u32 __iomem ** sg_ptr)
427{
428 u32 sg_flags;
429 u32 __iomem *mptr = *sg_ptr;
430
431 switch (direction) {
432 case DMA_TO_DEVICE:
433 sg_flags = 0x14000000;
434 break;
435 case DMA_FROM_DEVICE:
436 sg_flags = 0x10000000;
437 break;
438 default:
439 return 0;
440 }
441
442 sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
443 if (!sg_count)
444 return 0;
445
446#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
447 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
448 writel(0x7C020002, mptr++);
449 writel(PAGE_SIZE, mptr++);
450 }
308#endif 451#endif
309 452
453 while (sg_count-- > 0) {
454 if (!sg_count)
455 sg_flags |= 0xC0000000;
456 writel(sg_flags | sg_dma_len(sg), mptr++);
457 writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
458#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
459 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
460 writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
461#endif
462 sg++;
463 }
464 *sg_ptr = mptr;
465
466 return 1;
467};
468
469/**
470 * i2o_dma_alloc - Allocate DMA memory
471 * @dev: struct device pointer to the PCI device of the I2O controller
472 * @addr: i2o_dma struct which should get the DMA buffer
473 * @len: length of the new DMA memory
474 * @gfp_mask: GFP mask
475 *
476 * Allocate a coherent DMA memory and write the pointers into addr.
477 *
478 * Returns 0 on success or -ENOMEM on failure.
479 */
480static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
481 size_t len, unsigned int gfp_mask)
482{
483 struct pci_dev *pdev = to_pci_dev(dev);
484 int dma_64 = 0;
485
486 if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
487 dma_64 = 1;
488 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
489 return -ENOMEM;
490 }
491
492 addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
493
494 if ((sizeof(dma_addr_t) > 4) && dma_64)
495 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
496 printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
497
498 if (!addr->virt)
499 return -ENOMEM;
500
501 memset(addr->virt, 0, len);
502 addr->len = len;
503
504 return 0;
505};
506
507/**
508 * i2o_dma_free - Free DMA memory
509 * @dev: struct device pointer to the PCI device of the I2O controller
510 * @addr: i2o_dma struct which contains the DMA buffer
511 *
512 * Free a coherent DMA memory and set virtual address of addr to NULL.
513 */
514static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
515{
516 if (addr->virt) {
517 if (addr->phys)
518 dma_free_coherent(dev, addr->len, addr->virt,
519 addr->phys);
520 else
521 kfree(addr->virt);
522 addr->virt = NULL;
523 }
524};
525
526/**
527 * i2o_dma_realloc - Realloc DMA memory
528 * @dev: struct device pointer to the PCI device of the I2O controller
529 * @addr: pointer to a i2o_dma struct DMA buffer
530 * @len: new length of memory
531 * @gfp_mask: GFP mask
532 *
533 * If there was something allocated in the addr, free it first. If len > 0
534 * than try to allocate it and write the addresses back to the addr
535 * structure. If len == 0 set the virtual address to NULL.
536 *
537 * Returns the 0 on success or negative error code on failure.
538 */
539static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
540 size_t len, unsigned int gfp_mask)
541{
542 i2o_dma_free(dev, addr);
543
544 if (len)
545 return i2o_dma_alloc(dev, addr, len, gfp_mask);
546
547 return 0;
548};
549
310/* I2O driver (OSM) functions */ 550/* I2O driver (OSM) functions */
311extern int i2o_driver_register(struct i2o_driver *); 551extern int i2o_driver_register(struct i2o_driver *);
312extern void i2o_driver_unregister(struct i2o_driver *); 552extern void i2o_driver_unregister(struct i2o_driver *);
@@ -375,10 +615,11 @@ extern int i2o_device_claim_release(struct i2o_device *);
375/* Exec OSM functions */ 615/* Exec OSM functions */
376extern int i2o_exec_lct_get(struct i2o_controller *); 616extern int i2o_exec_lct_get(struct i2o_controller *);
377 617
378/* device / driver conversion functions */ 618/* device / driver / kobject conversion functions */
379#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) 619#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
380#define to_i2o_device(dev) container_of(dev, struct i2o_device, device) 620#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
381#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) 621#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
622#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
382 623
383/** 624/**
384 * i2o_msg_get - obtain an I2O message from the IOP 625 * i2o_msg_get - obtain an I2O message from the IOP
@@ -466,8 +707,10 @@ static inline struct i2o_message __iomem *i2o_msg_out_to_virt(struct
466 i2o_controller *c, 707 i2o_controller *c,
467 u32 m) 708 u32 m)
468{ 709{
469 BUG_ON(m < c->out_queue.phys 710 if (unlikely
470 || m >= c->out_queue.phys + c->out_queue.len); 711 (m < c->out_queue.phys
712 || m >= c->out_queue.phys + c->out_queue.len))
713 return NULL;
471 714
472 return c->out_queue.virt + (m - c->out_queue.phys); 715 return c->out_queue.virt + (m - c->out_queue.phys);
473}; 716};
@@ -532,48 +775,6 @@ static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
532 } 775 }
533}; 776};
534 777
535/**
536 * i2o_dma_map - Map the memory to DMA
537 * @dev: struct device pointer to the PCI device of the I2O controller
538 * @addr: i2o_dma struct which should be mapped
539 *
540 * Map the memory in addr->virt to coherent DMA memory and write the
541 * physical address into addr->phys.
542 *
543 * Returns 0 on success or -ENOMEM on failure.
544 */
545static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr)
546{
547 if (!addr->virt)
548 return -EFAULT;
549
550 if (!addr->phys)
551 addr->phys = dma_map_single(dev, addr->virt, addr->len,
552 DMA_BIDIRECTIONAL);
553 if (!addr->phys)
554 return -ENOMEM;
555
556 return 0;
557};
558
559/**
560 * i2o_dma_unmap - Unmap the DMA memory
561 * @dev: struct device pointer to the PCI device of the I2O controller
562 * @addr: i2o_dma struct which should be unmapped
563 *
564 * Unmap the memory in addr->virt from DMA memory.
565 */
566static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr)
567{
568 if (!addr->virt)
569 return;
570
571 if (addr->phys) {
572 dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL);
573 addr->phys = 0;
574 }
575};
576
577/* 778/*
578 * Endian handling wrapped into the macro - keeps the core code 779 * Endian handling wrapped into the macro - keeps the core code
579 * cleaner. 780 * cleaner.
@@ -726,6 +927,14 @@ extern void i2o_debug_state(struct i2o_controller *c);
726#define I2O_CMD_SCSI_BUSRESET 0x27 927#define I2O_CMD_SCSI_BUSRESET 0x27
727 928
728/* 929/*
930 * Bus Adapter Class
931 */
932#define I2O_CMD_BUS_ADAPTER_RESET 0x85
933#define I2O_CMD_BUS_RESET 0x87
934#define I2O_CMD_BUS_SCAN 0x89
935#define I2O_CMD_BUS_QUIESCE 0x8b
936
937/*
729 * Random Block Storage Class 938 * Random Block Storage Class
730 */ 939 */
731#define I2O_CMD_BLOCK_READ 0x30 940#define I2O_CMD_BLOCK_READ 0x30
@@ -948,7 +1157,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
948 1157
949/* request queue sizes */ 1158/* request queue sizes */
950#define I2O_MAX_SECTORS 1024 1159#define I2O_MAX_SECTORS 1024
951#define I2O_MAX_SEGMENTS 128 1160#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
952 1161
953#define I2O_REQ_MEMPOOL_SIZE 32 1162#define I2O_REQ_MEMPOOL_SIZE 32
954 1163