diff options
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/amd_iommu.c | 36 | ||||
| -rw-r--r-- | arch/x86/kernel/apic.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 18 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 17 | ||||
| -rw-r--r-- | arch/x86/kernel/io_apic.c | 26 | ||||
| -rw-r--r-- | arch/x86/kernel/kvmclock.c | 2 |
6 files changed, 70 insertions, 31 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index e4899e0e8787..5662e226b0c9 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
| 187 | 187 | ||
| 188 | spin_lock_irqsave(&iommu->lock, flags); | 188 | spin_lock_irqsave(&iommu->lock, flags); |
| 189 | ret = __iommu_queue_command(iommu, cmd); | 189 | ret = __iommu_queue_command(iommu, cmd); |
| 190 | if (!ret) | ||
| 191 | iommu->need_sync = 1; | ||
| 190 | spin_unlock_irqrestore(&iommu->lock, flags); | 192 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 191 | 193 | ||
| 192 | return ret; | 194 | return ret; |
| @@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
| 210 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 212 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; |
| 211 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | 213 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); |
| 212 | 214 | ||
| 213 | iommu->need_sync = 0; | ||
| 214 | |||
| 215 | spin_lock_irqsave(&iommu->lock, flags); | 215 | spin_lock_irqsave(&iommu->lock, flags); |
| 216 | 216 | ||
| 217 | if (!iommu->need_sync) | ||
| 218 | goto out; | ||
| 219 | |||
| 220 | iommu->need_sync = 0; | ||
| 221 | |||
| 217 | ret = __iommu_queue_command(iommu, &cmd); | 222 | ret = __iommu_queue_command(iommu, &cmd); |
| 218 | 223 | ||
| 219 | if (ret) | 224 | if (ret) |
| @@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | |||
| 254 | 259 | ||
| 255 | ret = iommu_queue_command(iommu, &cmd); | 260 | ret = iommu_queue_command(iommu, &cmd); |
| 256 | 261 | ||
| 257 | iommu->need_sync = 1; | ||
| 258 | |||
| 259 | return ret; | 262 | return ret; |
| 260 | } | 263 | } |
| 261 | 264 | ||
| @@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
| 281 | 284 | ||
| 282 | ret = iommu_queue_command(iommu, &cmd); | 285 | ret = iommu_queue_command(iommu, &cmd); |
| 283 | 286 | ||
| 284 | iommu->need_sync = 1; | ||
| 285 | |||
| 286 | return ret; | 287 | return ret; |
| 287 | } | 288 | } |
| 288 | 289 | ||
| @@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu, | |||
| 762 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 763 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
| 763 | 764 | ||
| 764 | iommu_queue_inv_dev_entry(iommu, devid); | 765 | iommu_queue_inv_dev_entry(iommu, devid); |
| 765 | |||
| 766 | iommu->need_sync = 1; | ||
| 767 | } | 766 | } |
| 768 | 767 | ||
| 769 | /***************************************************************************** | 768 | /***************************************************************************** |
| @@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev, | |||
| 858 | print_devid(_bdf, 1); | 857 | print_devid(_bdf, 1); |
| 859 | } | 858 | } |
| 860 | 859 | ||
| 860 | if (domain_for_device(_bdf) == NULL) | ||
| 861 | set_device_domain(*iommu, *domain, _bdf); | ||
| 862 | |||
| 861 | return 1; | 863 | return 1; |
| 862 | } | 864 | } |
| 863 | 865 | ||
| @@ -1031,8 +1033,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
| 1031 | if (addr == bad_dma_address) | 1033 | if (addr == bad_dma_address) |
| 1032 | goto out; | 1034 | goto out; |
| 1033 | 1035 | ||
| 1034 | if (unlikely(iommu->need_sync)) | 1036 | iommu_completion_wait(iommu); |
| 1035 | iommu_completion_wait(iommu); | ||
| 1036 | 1037 | ||
| 1037 | out: | 1038 | out: |
| 1038 | spin_unlock_irqrestore(&domain->lock, flags); | 1039 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1060,8 +1061,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
| 1060 | 1061 | ||
| 1061 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1062 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); |
| 1062 | 1063 | ||
| 1063 | if (unlikely(iommu->need_sync)) | 1064 | iommu_completion_wait(iommu); |
| 1064 | iommu_completion_wait(iommu); | ||
| 1065 | 1065 | ||
| 1066 | spin_unlock_irqrestore(&domain->lock, flags); | 1066 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1067 | } | 1067 | } |
| @@ -1127,8 +1127,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1127 | goto unmap; | 1127 | goto unmap; |
| 1128 | } | 1128 | } |
| 1129 | 1129 | ||
| 1130 | if (unlikely(iommu->need_sync)) | 1130 | iommu_completion_wait(iommu); |
| 1131 | iommu_completion_wait(iommu); | ||
| 1132 | 1131 | ||
| 1133 | out: | 1132 | out: |
| 1134 | spin_unlock_irqrestore(&domain->lock, flags); | 1133 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1173,8 +1172,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1173 | s->dma_address = s->dma_length = 0; | 1172 | s->dma_address = s->dma_length = 0; |
| 1174 | } | 1173 | } |
| 1175 | 1174 | ||
| 1176 | if (unlikely(iommu->need_sync)) | 1175 | iommu_completion_wait(iommu); |
| 1177 | iommu_completion_wait(iommu); | ||
| 1178 | 1176 | ||
| 1179 | spin_unlock_irqrestore(&domain->lock, flags); | 1177 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1180 | } | 1178 | } |
| @@ -1225,8 +1223,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
| 1225 | goto out; | 1223 | goto out; |
| 1226 | } | 1224 | } |
| 1227 | 1225 | ||
| 1228 | if (unlikely(iommu->need_sync)) | 1226 | iommu_completion_wait(iommu); |
| 1229 | iommu_completion_wait(iommu); | ||
| 1230 | 1227 | ||
| 1231 | out: | 1228 | out: |
| 1232 | spin_unlock_irqrestore(&domain->lock, flags); | 1229 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1257,8 +1254,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
| 1257 | 1254 | ||
| 1258 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 1255 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
| 1259 | 1256 | ||
| 1260 | if (unlikely(iommu->need_sync)) | 1257 | iommu_completion_wait(iommu); |
| 1261 | iommu_completion_wait(iommu); | ||
| 1262 | 1258 | ||
| 1263 | spin_unlock_irqrestore(&domain->lock, flags); | 1259 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1264 | 1260 | ||
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 04a7f960bbc0..16f94879b525 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
| @@ -1315,7 +1315,7 @@ void enable_x2apic(void) | |||
| 1315 | } | 1315 | } |
| 1316 | } | 1316 | } |
| 1317 | 1317 | ||
| 1318 | void enable_IR_x2apic(void) | 1318 | void __init enable_IR_x2apic(void) |
| 1319 | { | 1319 | { |
| 1320 | #ifdef CONFIG_INTR_REMAP | 1320 | #ifdef CONFIG_INTR_REMAP |
| 1321 | int ret; | 1321 | int ret; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index d3dcd58b87cd..7f05f44b97e9 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -115,9 +115,20 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) | |||
| 115 | u32 i = 0; | 115 | u32 i = 0; |
| 116 | 116 | ||
| 117 | if (cpu_family == CPU_HW_PSTATE) { | 117 | if (cpu_family == CPU_HW_PSTATE) { |
| 118 | rdmsr(MSR_PSTATE_STATUS, lo, hi); | 118 | if (data->currpstate == HW_PSTATE_INVALID) { |
| 119 | i = lo & HW_PSTATE_MASK; | 119 | /* read (initial) hw pstate if not yet set */ |
| 120 | data->currpstate = i; | 120 | rdmsr(MSR_PSTATE_STATUS, lo, hi); |
| 121 | i = lo & HW_PSTATE_MASK; | ||
| 122 | |||
| 123 | /* | ||
| 124 | * a workaround for family 11h erratum 311 might cause | ||
| 125 | * an "out-of-range Pstate if the core is in Pstate-0 | ||
| 126 | */ | ||
| 127 | if (i >= data->numps) | ||
| 128 | data->currpstate = HW_PSTATE_0; | ||
| 129 | else | ||
| 130 | data->currpstate = i; | ||
| 131 | } | ||
| 121 | return 0; | 132 | return 0; |
| 122 | } | 133 | } |
| 123 | do { | 134 | do { |
| @@ -1121,6 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
| 1121 | } | 1132 | } |
| 1122 | 1133 | ||
| 1123 | data->cpu = pol->cpu; | 1134 | data->cpu = pol->cpu; |
| 1135 | data->currpstate = HW_PSTATE_INVALID; | ||
| 1124 | 1136 | ||
| 1125 | if (powernow_k8_cpu_init_acpi(data)) { | 1137 | if (powernow_k8_cpu_init_acpi(data)) { |
| 1126 | /* | 1138 | /* |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index ab48cfed4d96..65cfb5d7f77f 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
| @@ -5,6 +5,19 @@ | |||
| 5 | * http://www.gnu.org/licenses/gpl.html | 5 | * http://www.gnu.org/licenses/gpl.html |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | |||
| 9 | enum pstate { | ||
| 10 | HW_PSTATE_INVALID = 0xff, | ||
| 11 | HW_PSTATE_0 = 0, | ||
| 12 | HW_PSTATE_1 = 1, | ||
| 13 | HW_PSTATE_2 = 2, | ||
| 14 | HW_PSTATE_3 = 3, | ||
| 15 | HW_PSTATE_4 = 4, | ||
| 16 | HW_PSTATE_5 = 5, | ||
| 17 | HW_PSTATE_6 = 6, | ||
| 18 | HW_PSTATE_7 = 7, | ||
| 19 | }; | ||
| 20 | |||
| 8 | struct powernow_k8_data { | 21 | struct powernow_k8_data { |
| 9 | unsigned int cpu; | 22 | unsigned int cpu; |
| 10 | 23 | ||
| @@ -23,7 +36,9 @@ struct powernow_k8_data { | |||
| 23 | u32 exttype; /* extended interface = 1 */ | 36 | u32 exttype; /* extended interface = 1 */ |
| 24 | 37 | ||
| 25 | /* keep track of the current fid / vid or pstate */ | 38 | /* keep track of the current fid / vid or pstate */ |
| 26 | u32 currvid, currfid, currpstate; | 39 | u32 currvid; |
| 40 | u32 currfid; | ||
| 41 | enum pstate currpstate; | ||
| 27 | 42 | ||
| 28 | /* the powernow_table includes all frequency and vid/fid pairings: | 43 | /* the powernow_table includes all frequency and vid/fid pairings: |
| 29 | * fid are the lower 8 bits of the index, vid are the upper 8 bits. | 44 | * fid are the lower 8 bits of the index, vid are the upper 8 bits. |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1fec0f9b1508..9043251210fb 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
| @@ -3755,7 +3755,9 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
| 3755 | void __init setup_ioapic_dest(void) | 3755 | void __init setup_ioapic_dest(void) |
| 3756 | { | 3756 | { |
| 3757 | int pin, ioapic, irq, irq_entry; | 3757 | int pin, ioapic, irq, irq_entry; |
| 3758 | struct irq_desc *desc; | ||
| 3758 | struct irq_cfg *cfg; | 3759 | struct irq_cfg *cfg; |
| 3760 | cpumask_t mask; | ||
| 3759 | 3761 | ||
| 3760 | if (skip_ioapic_setup == 1) | 3762 | if (skip_ioapic_setup == 1) |
| 3761 | return; | 3763 | return; |
| @@ -3772,16 +3774,30 @@ void __init setup_ioapic_dest(void) | |||
| 3772 | * cpu is online. | 3774 | * cpu is online. |
| 3773 | */ | 3775 | */ |
| 3774 | cfg = irq_cfg(irq); | 3776 | cfg = irq_cfg(irq); |
| 3775 | if (!cfg->vector) | 3777 | if (!cfg->vector) { |
| 3776 | setup_IO_APIC_irq(ioapic, pin, irq, | 3778 | setup_IO_APIC_irq(ioapic, pin, irq, |
| 3777 | irq_trigger(irq_entry), | 3779 | irq_trigger(irq_entry), |
| 3778 | irq_polarity(irq_entry)); | 3780 | irq_polarity(irq_entry)); |
| 3781 | continue; | ||
| 3782 | |||
| 3783 | } | ||
| 3784 | |||
| 3785 | /* | ||
| 3786 | * Honour affinities which have been set in early boot | ||
| 3787 | */ | ||
| 3788 | desc = irq_to_desc(irq); | ||
| 3789 | if (desc->status & | ||
| 3790 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | ||
| 3791 | mask = desc->affinity; | ||
| 3792 | else | ||
| 3793 | mask = TARGET_CPUS; | ||
| 3794 | |||
| 3779 | #ifdef CONFIG_INTR_REMAP | 3795 | #ifdef CONFIG_INTR_REMAP |
| 3780 | else if (intr_remapping_enabled) | 3796 | if (intr_remapping_enabled) |
| 3781 | set_ir_ioapic_affinity_irq(irq, TARGET_CPUS); | 3797 | set_ir_ioapic_affinity_irq(irq, mask); |
| 3782 | #endif | ||
| 3783 | else | 3798 | else |
| 3784 | set_ioapic_affinity_irq(irq, TARGET_CPUS); | 3799 | #endif |
| 3800 | set_ioapic_affinity_irq(irq, mask); | ||
| 3785 | } | 3801 | } |
| 3786 | 3802 | ||
| 3787 | } | 3803 | } |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1c9cc431ea4f..e169ae9b6a62 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
| @@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt) | |||
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | #ifdef CONFIG_X86_LOCAL_APIC | 130 | #ifdef CONFIG_X86_LOCAL_APIC |
| 131 | static void __devinit kvm_setup_secondary_clock(void) | 131 | static void __cpuinit kvm_setup_secondary_clock(void) |
| 132 | { | 132 | { |
| 133 | /* | 133 | /* |
| 134 | * Now that the first cpu already had this clocksource initialized, | 134 | * Now that the first cpu already had this clocksource initialized, |
