diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
| commit | a1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch) | |
| tree | 0f1777542b385ebefd30b3586d830fd8ed6fda5b /arch/x86/kernel/amd_iommu.c | |
| parent | 75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff) | |
| parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) | |
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts:
arch/Kconfig
kernel/trace/trace.h
Merge reason: resolve the conflicts, plus adopt to the new
ring-buffer APIs.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
| -rw-r--r-- | arch/x86/kernel/amd_iommu.c | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 1c60554537c3..6c99f5037801 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -434,6 +434,16 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) | |||
| 434 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); | 434 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); |
| 435 | } | 435 | } |
| 436 | 436 | ||
| 437 | /* Flush the whole IO/TLB for a given protection domain - including PDE */ | ||
| 438 | static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) | ||
| 439 | { | ||
| 440 | u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | ||
| 441 | |||
| 442 | INC_STATS_COUNTER(domain_flush_single); | ||
| 443 | |||
| 444 | iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1); | ||
| 445 | } | ||
| 446 | |||
| 437 | /* | 447 | /* |
| 438 | * This function is used to flush the IO/TLB for a given protection domain | 448 | * This function is used to flush the IO/TLB for a given protection domain |
| 439 | * on every IOMMU in the system | 449 | * on every IOMMU in the system |
| @@ -1078,7 +1088,13 @@ static void attach_device(struct amd_iommu *iommu, | |||
| 1078 | amd_iommu_pd_table[devid] = domain; | 1088 | amd_iommu_pd_table[devid] = domain; |
| 1079 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1089 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
| 1080 | 1090 | ||
| 1091 | /* | ||
| 1092 | * We might boot into a crash-kernel here. The crashed kernel | ||
| 1093 | * left the caches in the IOMMU dirty. So we have to flush | ||
| 1094 | * here to evict all dirty stuff. | ||
| 1095 | */ | ||
| 1081 | iommu_queue_inv_dev_entry(iommu, devid); | 1096 | iommu_queue_inv_dev_entry(iommu, devid); |
| 1097 | iommu_flush_tlb_pde(iommu, domain->id); | ||
| 1082 | } | 1098 | } |
| 1083 | 1099 | ||
| 1084 | /* | 1100 | /* |
| @@ -1176,7 +1192,7 @@ out: | |||
| 1176 | return 0; | 1192 | return 0; |
| 1177 | } | 1193 | } |
| 1178 | 1194 | ||
| 1179 | struct notifier_block device_nb = { | 1195 | static struct notifier_block device_nb = { |
| 1180 | .notifier_call = device_change_notifier, | 1196 | .notifier_call = device_change_notifier, |
| 1181 | }; | 1197 | }; |
| 1182 | 1198 | ||
| @@ -1747,7 +1763,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
| 1747 | flag |= __GFP_ZERO; | 1763 | flag |= __GFP_ZERO; |
| 1748 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | 1764 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); |
| 1749 | if (!virt_addr) | 1765 | if (!virt_addr) |
| 1750 | return 0; | 1766 | return NULL; |
| 1751 | 1767 | ||
| 1752 | paddr = virt_to_phys(virt_addr); | 1768 | paddr = virt_to_phys(virt_addr); |
| 1753 | 1769 | ||
