aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-gart_64.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-09-25 06:42:12 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-27 14:13:46 -0400
commit0114267be1bebc2e9c913b19579900153583d617 (patch)
tree20aa3767b3029acad14705349ef3f96a682e053f /arch/x86/kernel/pci-gart_64.c
parent3610f2116e961cdcbd3546a3828470f7aa636212 (diff)
x86/iommu: use __GFP_ZERO instead of memset for GART
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r--arch/x86/kernel/pci-gart_64.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index aecea068f583..d077116fec1b 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -674,13 +674,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
674 info->aper_size = aper_size >> 20; 674 info->aper_size = aper_size >> 20;
675 675
676 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 676 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
677 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); 677 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
678 get_order(gatt_size));
678 if (!gatt) 679 if (!gatt)
679 panic("Cannot allocate GATT table"); 680 panic("Cannot allocate GATT table");
680 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 681 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
681 panic("Could not set GART PTEs to uncacheable pages"); 682 panic("Could not set GART PTEs to uncacheable pages");
682 683
683 memset(gatt, 0, gatt_size);
684 agp_gatt_table = gatt; 684 agp_gatt_table = gatt;
685 685
686 enable_gart_translations(); 686 enable_gart_translations();
@@ -788,19 +788,16 @@ void __init gart_iommu_init(void)
788 iommu_size = check_iommu_size(info.aper_base, aper_size); 788 iommu_size = check_iommu_size(info.aper_base, aper_size);
789 iommu_pages = iommu_size >> PAGE_SHIFT; 789 iommu_pages = iommu_size >> PAGE_SHIFT;
790 790
791 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, 791 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
792 get_order(iommu_pages/8)); 792 get_order(iommu_pages/8));
793 if (!iommu_gart_bitmap) 793 if (!iommu_gart_bitmap)
794 panic("Cannot allocate iommu bitmap\n"); 794 panic("Cannot allocate iommu bitmap\n");
795 memset(iommu_gart_bitmap, 0, iommu_pages/8);
796 795
797#ifdef CONFIG_IOMMU_LEAK 796#ifdef CONFIG_IOMMU_LEAK
798 if (leak_trace) { 797 if (leak_trace) {
799 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, 798 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
800 get_order(iommu_pages*sizeof(void *))); 799 get_order(iommu_pages*sizeof(void *)));
801 if (iommu_leak_tab) 800 if (!iommu_leak_tab)
802 memset(iommu_leak_tab, 0, iommu_pages * 8);
803 else
804 printk(KERN_DEBUG 801 printk(KERN_DEBUG
805 "PCI-DMA: Cannot allocate leak trace area\n"); 802 "PCI-DMA: Cannot allocate leak trace area\n");
806 } 803 }