aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/i2o.h
diff options
context:
space:
mode:
authorAlan Cox <alan@redhat.com>2008-10-16 01:02:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-16 14:21:38 -0400
commit9d793b0bcbbbc37d80241862dfa5257963d5415e (patch)
treeb842e5e92825d85fae63afadf4fcb4c1a681c28c /include/linux/i2o.h
parent673c0c00382ed807f09d94e806f3519ddeeb4f70 (diff)
i2o: Fix 32/64bit DMA locking
The I2O ioctls assume 32bits. In itself that is fine as they are old cards and nobody uses 64bit. However on LKML it was noted this assumption is also made for allocated memory and is unsafe on 64bit systems. Fixing this is a mess. It turns out there is tons of crap buried in a header file that does racy 32/64bit filtering on the masks. So we: - Verify all callers of the racy code can sleep (i2o_dma_[re]alloc) - Move the code into a new i2o/memory.c file - Remove the gfp_mask argument so nobody can try and misuse the function - Wrap a mutex around the problem area (a single mutex is easy to do and none of this is performance relevant) - Switch the remaining problem kmalloc holdout to use i2o_dma_alloc Cc: Markus Lidel <Markus.Lidel@shadowconnect.com> Cc: Vasily Averin <vvs@sw.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/i2o.h')
-rw-r--r--include/linux/i2o.h292
1 files changed, 12 insertions, 280 deletions
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 75ae6d8aba4..4c4e57d1f19 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -570,7 +570,6 @@ struct i2o_controller {
570#endif 570#endif
571 spinlock_t lock; /* lock for controller 571 spinlock_t lock; /* lock for controller
572 configuration */ 572 configuration */
573
574 void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */ 573 void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */
575}; 574};
576 575
@@ -691,289 +690,22 @@ static inline u32 i2o_dma_high(dma_addr_t dma_addr)
691}; 690};
692#endif 691#endif
693 692
694/** 693extern u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size);
695 * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL 694extern dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
696 * @c: I2O controller for which the calculation should be done
697 * @body_size: maximum body size used for message in 32-bit words.
698 *
699 * Return the maximum number of SG elements in a SG list.
700 */
701static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
702{
703 i2o_status_block *sb = c->status_block.virt;
704 u16 sg_count =
705 (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
706 body_size;
707
708 if (c->pae_support) {
709 /*
710 * for 64-bit a SG attribute element must be added and each
711 * SG element needs 12 bytes instead of 8.
712 */
713 sg_count -= 2;
714 sg_count /= 3;
715 } else
716 sg_count /= 2;
717
718 if (c->short_req && (sg_count > 8))
719 sg_count = 8;
720
721 return sg_count;
722};
723
724/**
725 * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
726 * @c: I2O controller
727 * @ptr: pointer to the data which should be mapped
728 * @size: size of data in bytes
729 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
730 * @sg_ptr: pointer to the SG list inside the I2O message
731 *
732 * This function does all necessary DMA handling and also writes the I2O
733 * SGL elements into the I2O message. For details on DMA handling see also
734 * dma_map_single(). The pointer sg_ptr will only be set to the end of the
735 * SG list if the allocation was successful.
736 *
737 * Returns DMA address which must be checked for failures using
738 * dma_mapping_error().
739 */
740static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
741 size_t size, 695 size_t size,
742 enum dma_data_direction direction, 696 enum dma_data_direction direction,
743 u32 ** sg_ptr) 697 u32 ** sg_ptr);
744{ 698extern int i2o_dma_map_sg(struct i2o_controller *c,
745 u32 sg_flags;
746 u32 *mptr = *sg_ptr;
747 dma_addr_t dma_addr;
748
749 switch (direction) {
750 case DMA_TO_DEVICE:
751 sg_flags = 0xd4000000;
752 break;
753 case DMA_FROM_DEVICE:
754 sg_flags = 0xd0000000;
755 break;
756 default:
757 return 0;
758 }
759
760 dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
761 if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
762#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
763 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
764 *mptr++ = cpu_to_le32(0x7C020002);
765 *mptr++ = cpu_to_le32(PAGE_SIZE);
766 }
767#endif
768
769 *mptr++ = cpu_to_le32(sg_flags | size);
770 *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
771#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
772 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
773 *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
774#endif
775 *sg_ptr = mptr;
776 }
777 return dma_addr;
778};
779
780/**
781 * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
782 * @c: I2O controller
783 * @sg: SG list to be mapped
784 * @sg_count: number of elements in the SG list
785 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
786 * @sg_ptr: pointer to the SG list inside the I2O message
787 *
788 * This function does all necessary DMA handling and also writes the I2O
789 * SGL elements into the I2O message. For details on DMA handling see also
790 * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
791 * list if the allocation was successful.
792 *
793 * Returns 0 on failure or 1 on success.
794 */
795static inline int i2o_dma_map_sg(struct i2o_controller *c,
796 struct scatterlist *sg, int sg_count, 699 struct scatterlist *sg, int sg_count,
797 enum dma_data_direction direction, 700 enum dma_data_direction direction,
798 u32 ** sg_ptr) 701 u32 ** sg_ptr);
799{ 702extern int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len);
800 u32 sg_flags; 703extern void i2o_dma_free(struct device *dev, struct i2o_dma *addr);
801 u32 *mptr = *sg_ptr; 704extern int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
802 705 size_t len);
803 switch (direction) { 706extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
804 case DMA_TO_DEVICE: 707 size_t size, int min_nr);
805 sg_flags = 0x14000000; 708extern void i2o_pool_free(struct i2o_pool *pool);
806 break;
807 case DMA_FROM_DEVICE:
808 sg_flags = 0x10000000;
809 break;
810 default:
811 return 0;
812 }
813
814 sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
815 if (!sg_count)
816 return 0;
817
818#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
819 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
820 *mptr++ = cpu_to_le32(0x7C020002);
821 *mptr++ = cpu_to_le32(PAGE_SIZE);
822 }
823#endif
824
825 while (sg_count-- > 0) {
826 if (!sg_count)
827 sg_flags |= 0xC0000000;
828 *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
829 *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
830#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
831 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
832 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
833#endif
834 sg = sg_next(sg);
835 }
836 *sg_ptr = mptr;
837
838 return 1;
839};
840
841/**
842 * i2o_dma_alloc - Allocate DMA memory
843 * @dev: struct device pointer to the PCI device of the I2O controller
844 * @addr: i2o_dma struct which should get the DMA buffer
845 * @len: length of the new DMA memory
846 * @gfp_mask: GFP mask
847 *
848 * Allocate a coherent DMA memory and write the pointers into addr.
849 *
850 * Returns 0 on success or -ENOMEM on failure.
851 */
852static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
853 size_t len, gfp_t gfp_mask)
854{
855 struct pci_dev *pdev = to_pci_dev(dev);
856 int dma_64 = 0;
857
858 if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
859 dma_64 = 1;
860 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
861 return -ENOMEM;
862 }
863
864 addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
865
866 if ((sizeof(dma_addr_t) > 4) && dma_64)
867 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
868 printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
869
870 if (!addr->virt)
871 return -ENOMEM;
872
873 memset(addr->virt, 0, len);
874 addr->len = len;
875
876 return 0;
877};
878
879/**
880 * i2o_dma_free - Free DMA memory
881 * @dev: struct device pointer to the PCI device of the I2O controller
882 * @addr: i2o_dma struct which contains the DMA buffer
883 *
884 * Free a coherent DMA memory and set virtual address of addr to NULL.
885 */
886static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
887{
888 if (addr->virt) {
889 if (addr->phys)
890 dma_free_coherent(dev, addr->len, addr->virt,
891 addr->phys);
892 else
893 kfree(addr->virt);
894 addr->virt = NULL;
895 }
896};
897
898/**
899 * i2o_dma_realloc - Realloc DMA memory
900 * @dev: struct device pointer to the PCI device of the I2O controller
901 * @addr: pointer to a i2o_dma struct DMA buffer
902 * @len: new length of memory
903 * @gfp_mask: GFP mask
904 *
905 * If there was something allocated in the addr, free it first. If len > 0
906 * than try to allocate it and write the addresses back to the addr
907 * structure. If len == 0 set the virtual address to NULL.
908 *
909 * Returns the 0 on success or negative error code on failure.
910 */
911static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
912 size_t len, gfp_t gfp_mask)
913{
914 i2o_dma_free(dev, addr);
915
916 if (len)
917 return i2o_dma_alloc(dev, addr, len, gfp_mask);
918
919 return 0;
920};
921
922/*
923 * i2o_pool_alloc - Allocate an slab cache and mempool
924 * @mempool: pointer to struct i2o_pool to write data into.
925 * @name: name which is used to identify cache
926 * @size: size of each object
927 * @min_nr: minimum number of objects
928 *
929 * First allocates a slab cache with name and size. Then allocates a
930 * mempool which uses the slab cache for allocation and freeing.
931 *
932 * Returns 0 on success or negative error code on failure.
933 */
934static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
935 size_t size, int min_nr)
936{
937 pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
938 if (!pool->name)
939 goto exit;
940 strcpy(pool->name, name);
941
942 pool->slab =
943 kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL);
944 if (!pool->slab)
945 goto free_name;
946
947 pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
948 if (!pool->mempool)
949 goto free_slab;
950
951 return 0;
952
953 free_slab:
954 kmem_cache_destroy(pool->slab);
955
956 free_name:
957 kfree(pool->name);
958
959 exit:
960 return -ENOMEM;
961};
962
963/*
964 * i2o_pool_free - Free slab cache and mempool again
965 * @mempool: pointer to struct i2o_pool which should be freed
966 *
967 * Note that you have to return all objects to the mempool again before
968 * calling i2o_pool_free().
969 */
970static inline void i2o_pool_free(struct i2o_pool *pool)
971{
972 mempool_destroy(pool->mempool);
973 kmem_cache_destroy(pool->slab);
974 kfree(pool->name);
975};
976
977/* I2O driver (OSM) functions */ 709/* I2O driver (OSM) functions */
978extern int i2o_driver_register(struct i2o_driver *); 710extern int i2o_driver_register(struct i2o_driver *);
979extern void i2o_driver_unregister(struct i2o_driver *); 711extern void i2o_driver_unregister(struct i2o_driver *);