diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-09-04 09:04:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-19 06:59:03 -0400 |
commit | dbcc112e3b5367e81a845b082933506b0ff1d1e2 (patch) | |
tree | b5537684c5ea8d354d5e94b358741f4db3e05d7c /arch | |
parent | 07a2c01a0c2a0cb4581a67d50d4f17cb4d2457c4 (diff) |
AMD IOMMU: check for invalid device pointers
Currently AMD IOMMU code triggers a BUG_ON if NULL is passed as the
device. This is inconsistent with other IOMMU implementations.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 43 |
1 files changed, 35 insertions, 8 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 01c68c38840d..695e0fc41b10 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -646,6 +646,18 @@ static void set_device_domain(struct amd_iommu *iommu, | |||
646 | *****************************************************************************/ | 646 | *****************************************************************************/ |
647 | 647 | ||
648 | /* | 648 | /* |
649 | * This function checks if the driver got a valid device from the caller to | ||
650 | * avoid dereferencing invalid pointers. | ||
651 | */ | ||
652 | static bool check_device(struct device *dev) | ||
653 | { | ||
654 | if (!dev || !dev->dma_mask) | ||
655 | return false; | ||
656 | |||
657 | return true; | ||
658 | } | ||
659 | |||
660 | /* | ||
649 | * In the dma_ops path we only have the struct device. This function | 661 | * In the dma_ops path we only have the struct device. This function |
650 | * finds the corresponding IOMMU, the protection domain and the | 662 | * finds the corresponding IOMMU, the protection domain and the |
651 | * requestor id for a given device. | 663 | * requestor id for a given device. |
@@ -661,18 +673,19 @@ static int get_device_resources(struct device *dev, | |||
661 | struct pci_dev *pcidev; | 673 | struct pci_dev *pcidev; |
662 | u16 _bdf; | 674 | u16 _bdf; |
663 | 675 | ||
664 | BUG_ON(!dev || dev->bus != &pci_bus_type || !dev->dma_mask); | 676 | *iommu = NULL; |
677 | *domain = NULL; | ||
678 | *bdf = 0xffff; | ||
679 | |||
680 | if (dev->bus != &pci_bus_type) | ||
681 | return 0; | ||
665 | 682 | ||
666 | pcidev = to_pci_dev(dev); | 683 | pcidev = to_pci_dev(dev); |
667 | _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); | 684 | _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); |
668 | 685 | ||
669 | /* device not translated by any IOMMU in the system? */ | 686 | /* device not translated by any IOMMU in the system? */ |
670 | if (_bdf > amd_iommu_last_bdf) { | 687 | if (_bdf > amd_iommu_last_bdf) |
671 | *iommu = NULL; | ||
672 | *domain = NULL; | ||
673 | *bdf = 0xffff; | ||
674 | return 0; | 688 | return 0; |
675 | } | ||
676 | 689 | ||
677 | *bdf = amd_iommu_alias_table[_bdf]; | 690 | *bdf = amd_iommu_alias_table[_bdf]; |
678 | 691 | ||
@@ -826,6 +839,9 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
826 | u16 devid; | 839 | u16 devid; |
827 | dma_addr_t addr; | 840 | dma_addr_t addr; |
828 | 841 | ||
842 | if (!check_device(dev)) | ||
843 | return bad_dma_address; | ||
844 | |||
829 | get_device_resources(dev, &iommu, &domain, &devid); | 845 | get_device_resources(dev, &iommu, &domain, &devid); |
830 | 846 | ||
831 | if (iommu == NULL || domain == NULL) | 847 | if (iommu == NULL || domain == NULL) |
@@ -860,7 +876,8 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
860 | struct protection_domain *domain; | 876 | struct protection_domain *domain; |
861 | u16 devid; | 877 | u16 devid; |
862 | 878 | ||
863 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 879 | if (!check_device(dev) || |
880 | !get_device_resources(dev, &iommu, &domain, &devid)) | ||
864 | /* device not handled by any AMD IOMMU */ | 881 | /* device not handled by any AMD IOMMU */ |
865 | return; | 882 | return; |
866 | 883 | ||
@@ -910,6 +927,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
910 | phys_addr_t paddr; | 927 | phys_addr_t paddr; |
911 | int mapped_elems = 0; | 928 | int mapped_elems = 0; |
912 | 929 | ||
930 | if (!check_device(dev)) | ||
931 | return 0; | ||
932 | |||
913 | get_device_resources(dev, &iommu, &domain, &devid); | 933 | get_device_resources(dev, &iommu, &domain, &devid); |
914 | 934 | ||
915 | if (!iommu || !domain) | 935 | if (!iommu || !domain) |
@@ -967,7 +987,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
967 | u16 devid; | 987 | u16 devid; |
968 | int i; | 988 | int i; |
969 | 989 | ||
970 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 990 | if (!check_device(dev) || |
991 | !get_device_resources(dev, &iommu, &domain, &devid)) | ||
971 | return; | 992 | return; |
972 | 993 | ||
973 | spin_lock_irqsave(&domain->lock, flags); | 994 | spin_lock_irqsave(&domain->lock, flags); |
@@ -999,6 +1020,9 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
999 | u16 devid; | 1020 | u16 devid; |
1000 | phys_addr_t paddr; | 1021 | phys_addr_t paddr; |
1001 | 1022 | ||
1023 | if (!check_device(dev)) | ||
1024 | return NULL; | ||
1025 | |||
1002 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | 1026 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); |
1003 | if (!virt_addr) | 1027 | if (!virt_addr) |
1004 | return 0; | 1028 | return 0; |
@@ -1047,6 +1071,9 @@ static void free_coherent(struct device *dev, size_t size, | |||
1047 | struct protection_domain *domain; | 1071 | struct protection_domain *domain; |
1048 | u16 devid; | 1072 | u16 devid; |
1049 | 1073 | ||
1074 | if (!check_device(dev)) | ||
1075 | return; | ||
1076 | |||
1050 | get_device_resources(dev, &iommu, &domain, &devid); | 1077 | get_device_resources(dev, &iommu, &domain, &devid); |
1051 | 1078 | ||
1052 | if (!iommu || !domain) | 1079 | if (!iommu || !domain) |