diff options
Diffstat (limited to 'arch/x86')
| -rw-r--r-- | arch/x86/include/asm/amd_iommu_types.h | 24 | ||||
| -rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 6 | ||||
| -rw-r--r-- | arch/x86/include/asm/topology.h | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/amd_iommu.c | 49 | ||||
| -rw-r--r-- | arch/x86/kernel/mpparse.c | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/paravirt-spinlocks.c | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 2 | ||||
| -rw-r--r-- | arch/x86/oprofile/nmi_int.c | 5 | ||||
| -rw-r--r-- | arch/x86/oprofile/op_model_ppro.c | 4 |
9 files changed, 51 insertions, 47 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 1a30c0440c6b..ac302a2fa339 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
| @@ -251,13 +251,6 @@ struct amd_iommu { | |||
| 251 | /* Pointer to PCI device of this IOMMU */ | 251 | /* Pointer to PCI device of this IOMMU */ |
| 252 | struct pci_dev *dev; | 252 | struct pci_dev *dev; |
| 253 | 253 | ||
| 254 | /* | ||
| 255 | * Capability pointer. There could be more than one IOMMU per PCI | ||
| 256 | * device function if there are more than one AMD IOMMU capability | ||
| 257 | * pointers. | ||
| 258 | */ | ||
| 259 | u16 cap_ptr; | ||
| 260 | |||
| 261 | /* physical address of MMIO space */ | 254 | /* physical address of MMIO space */ |
| 262 | u64 mmio_phys; | 255 | u64 mmio_phys; |
| 263 | /* virtual address of MMIO space */ | 256 | /* virtual address of MMIO space */ |
| @@ -266,6 +259,13 @@ struct amd_iommu { | |||
| 266 | /* capabilities of that IOMMU read from ACPI */ | 259 | /* capabilities of that IOMMU read from ACPI */ |
| 267 | u32 cap; | 260 | u32 cap; |
| 268 | 261 | ||
| 262 | /* | ||
| 263 | * Capability pointer. There could be more than one IOMMU per PCI | ||
| 264 | * device function if there are more than one AMD IOMMU capability | ||
| 265 | * pointers. | ||
| 266 | */ | ||
| 267 | u16 cap_ptr; | ||
| 268 | |||
| 269 | /* pci domain of this IOMMU */ | 269 | /* pci domain of this IOMMU */ |
| 270 | u16 pci_seg; | 270 | u16 pci_seg; |
| 271 | 271 | ||
| @@ -284,19 +284,19 @@ struct amd_iommu { | |||
| 284 | /* size of command buffer */ | 284 | /* size of command buffer */ |
| 285 | u32 cmd_buf_size; | 285 | u32 cmd_buf_size; |
| 286 | 286 | ||
| 287 | /* event buffer virtual address */ | ||
| 288 | u8 *evt_buf; | ||
| 289 | /* size of event buffer */ | 287 | /* size of event buffer */ |
| 290 | u32 evt_buf_size; | 288 | u32 evt_buf_size; |
| 289 | /* event buffer virtual address */ | ||
| 290 | u8 *evt_buf; | ||
| 291 | /* MSI number for event interrupt */ | 291 | /* MSI number for event interrupt */ |
| 292 | u16 evt_msi_num; | 292 | u16 evt_msi_num; |
| 293 | 293 | ||
| 294 | /* if one, we need to send a completion wait command */ | ||
| 295 | int need_sync; | ||
| 296 | |||
| 297 | /* true if interrupts for this IOMMU are already enabled */ | 294 | /* true if interrupts for this IOMMU are already enabled */ |
| 298 | bool int_enabled; | 295 | bool int_enabled; |
| 299 | 296 | ||
| 297 | /* if one, we need to send a completion wait command */ | ||
| 298 | int need_sync; | ||
| 299 | |||
| 300 | /* default dma_ops domain for that IOMMU */ | 300 | /* default dma_ops domain for that IOMMU */ |
| 301 | struct dma_ops_domain *default_dom; | 301 | struct dma_ops_domain *default_dom; |
| 302 | }; | 302 | }; |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 7f225a4b2a26..097794ff6b79 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
| @@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
| 71 | /* Make sure we keep the same behaviour */ | 71 | /* Make sure we keep the same behaviour */ |
| 72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 73 | { | 73 | { |
| 74 | #ifdef CONFIG_X86_32 | 74 | #ifdef CONFIG_X86_64 |
| 75 | return 0; | ||
| 76 | #else | ||
| 77 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 75 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| 78 | if (ops->mapping_error) | 76 | if (ops->mapping_error) |
| 79 | return ops->mapping_error(dev, dma_addr); | 77 | return ops->mapping_error(dev, dma_addr); |
| 80 | 78 | ||
| 81 | return (dma_addr == bad_dma_address); | ||
| 82 | #endif | 79 | #endif |
| 80 | return (dma_addr == bad_dma_address); | ||
| 83 | } | 81 | } |
| 84 | 82 | ||
| 85 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 83 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 4850e4b02b61..ff386ff50ed7 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
| @@ -239,7 +239,7 @@ struct pci_bus; | |||
| 239 | void set_pci_bus_resources_arch_default(struct pci_bus *b); | 239 | void set_pci_bus_resources_arch_default(struct pci_bus *b); |
| 240 | 240 | ||
| 241 | #ifdef CONFIG_SMP | 241 | #ifdef CONFIG_SMP |
| 242 | #define mc_capable() (boot_cpu_data.x86_max_cores > 1) | 242 | #define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids) |
| 243 | #define smt_capable() (smp_num_siblings > 1) | 243 | #define smt_capable() (smp_num_siblings > 1) |
| 244 | #endif | 244 | #endif |
| 245 | 245 | ||
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index e4899e0e8787..a7b6dec6fc3f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
| 187 | 187 | ||
| 188 | spin_lock_irqsave(&iommu->lock, flags); | 188 | spin_lock_irqsave(&iommu->lock, flags); |
| 189 | ret = __iommu_queue_command(iommu, cmd); | 189 | ret = __iommu_queue_command(iommu, cmd); |
| 190 | if (!ret) | ||
| 191 | iommu->need_sync = 1; | ||
| 190 | spin_unlock_irqrestore(&iommu->lock, flags); | 192 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 191 | 193 | ||
| 192 | return ret; | 194 | return ret; |
| @@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
| 210 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 212 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; |
| 211 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | 213 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); |
| 212 | 214 | ||
| 213 | iommu->need_sync = 0; | ||
| 214 | |||
| 215 | spin_lock_irqsave(&iommu->lock, flags); | 215 | spin_lock_irqsave(&iommu->lock, flags); |
| 216 | 216 | ||
| 217 | if (!iommu->need_sync) | ||
| 218 | goto out; | ||
| 219 | |||
| 220 | iommu->need_sync = 0; | ||
| 221 | |||
| 217 | ret = __iommu_queue_command(iommu, &cmd); | 222 | ret = __iommu_queue_command(iommu, &cmd); |
| 218 | 223 | ||
| 219 | if (ret) | 224 | if (ret) |
| @@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | |||
| 254 | 259 | ||
| 255 | ret = iommu_queue_command(iommu, &cmd); | 260 | ret = iommu_queue_command(iommu, &cmd); |
| 256 | 261 | ||
| 257 | iommu->need_sync = 1; | ||
| 258 | |||
| 259 | return ret; | 262 | return ret; |
| 260 | } | 263 | } |
| 261 | 264 | ||
| @@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
| 281 | 284 | ||
| 282 | ret = iommu_queue_command(iommu, &cmd); | 285 | ret = iommu_queue_command(iommu, &cmd); |
| 283 | 286 | ||
| 284 | iommu->need_sync = 1; | ||
| 285 | |||
| 286 | return ret; | 287 | return ret; |
| 287 | } | 288 | } |
| 288 | 289 | ||
| @@ -343,7 +344,7 @@ static int iommu_map(struct protection_domain *dom, | |||
| 343 | u64 __pte, *pte, *page; | 344 | u64 __pte, *pte, *page; |
| 344 | 345 | ||
| 345 | bus_addr = PAGE_ALIGN(bus_addr); | 346 | bus_addr = PAGE_ALIGN(bus_addr); |
| 346 | phys_addr = PAGE_ALIGN(bus_addr); | 347 | phys_addr = PAGE_ALIGN(phys_addr); |
| 347 | 348 | ||
| 348 | /* only support 512GB address spaces for now */ | 349 | /* only support 512GB address spaces for now */ |
| 349 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | 350 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) |
| @@ -599,7 +600,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) | |||
| 599 | continue; | 600 | continue; |
| 600 | 601 | ||
| 601 | p2 = IOMMU_PTE_PAGE(p1[i]); | 602 | p2 = IOMMU_PTE_PAGE(p1[i]); |
| 602 | for (j = 0; j < 512; ++i) { | 603 | for (j = 0; j < 512; ++j) { |
| 603 | if (!IOMMU_PTE_PRESENT(p2[j])) | 604 | if (!IOMMU_PTE_PRESENT(p2[j])) |
| 604 | continue; | 605 | continue; |
| 605 | p3 = IOMMU_PTE_PAGE(p2[j]); | 606 | p3 = IOMMU_PTE_PAGE(p2[j]); |
| @@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu, | |||
| 762 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 763 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
| 763 | 764 | ||
| 764 | iommu_queue_inv_dev_entry(iommu, devid); | 765 | iommu_queue_inv_dev_entry(iommu, devid); |
| 765 | |||
| 766 | iommu->need_sync = 1; | ||
| 767 | } | 766 | } |
| 768 | 767 | ||
| 769 | /***************************************************************************** | 768 | /***************************************************************************** |
| @@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev, | |||
| 858 | print_devid(_bdf, 1); | 857 | print_devid(_bdf, 1); |
| 859 | } | 858 | } |
| 860 | 859 | ||
| 860 | if (domain_for_device(_bdf) == NULL) | ||
| 861 | set_device_domain(*iommu, *domain, _bdf); | ||
| 862 | |||
| 861 | return 1; | 863 | return 1; |
| 862 | } | 864 | } |
| 863 | 865 | ||
| @@ -908,7 +910,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
| 908 | if (address >= dom->aperture_size) | 910 | if (address >= dom->aperture_size) |
| 909 | return; | 911 | return; |
| 910 | 912 | ||
| 911 | WARN_ON(address & 0xfffULL || address > dom->aperture_size); | 913 | WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size); |
| 912 | 914 | ||
| 913 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | 915 | pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; |
| 914 | pte += IOMMU_PTE_L0_INDEX(address); | 916 | pte += IOMMU_PTE_L0_INDEX(address); |
| @@ -920,8 +922,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
| 920 | 922 | ||
| 921 | /* | 923 | /* |
| 922 | * This function contains common code for mapping of a physically | 924 | * This function contains common code for mapping of a physically |
| 923 | * contiguous memory region into DMA address space. It is uses by all | 925 | * contiguous memory region into DMA address space. It is used by all |
| 924 | * mapping functions provided by this IOMMU driver. | 926 | * mapping functions provided with this IOMMU driver. |
| 925 | * Must be called with the domain lock held. | 927 | * Must be called with the domain lock held. |
| 926 | */ | 928 | */ |
| 927 | static dma_addr_t __map_single(struct device *dev, | 929 | static dma_addr_t __map_single(struct device *dev, |
| @@ -981,7 +983,8 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
| 981 | dma_addr_t i, start; | 983 | dma_addr_t i, start; |
| 982 | unsigned int pages; | 984 | unsigned int pages; |
| 983 | 985 | ||
| 984 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | 986 | if ((dma_addr == bad_dma_address) || |
| 987 | (dma_addr + size > dma_dom->aperture_size)) | ||
| 985 | return; | 988 | return; |
| 986 | 989 | ||
| 987 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 990 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
| @@ -1031,8 +1034,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
| 1031 | if (addr == bad_dma_address) | 1034 | if (addr == bad_dma_address) |
| 1032 | goto out; | 1035 | goto out; |
| 1033 | 1036 | ||
| 1034 | if (unlikely(iommu->need_sync)) | 1037 | iommu_completion_wait(iommu); |
| 1035 | iommu_completion_wait(iommu); | ||
| 1036 | 1038 | ||
| 1037 | out: | 1039 | out: |
| 1038 | spin_unlock_irqrestore(&domain->lock, flags); | 1040 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1060,8 +1062,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
| 1060 | 1062 | ||
| 1061 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1063 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); |
| 1062 | 1064 | ||
| 1063 | if (unlikely(iommu->need_sync)) | 1065 | iommu_completion_wait(iommu); |
| 1064 | iommu_completion_wait(iommu); | ||
| 1065 | 1066 | ||
| 1066 | spin_unlock_irqrestore(&domain->lock, flags); | 1067 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1067 | } | 1068 | } |
| @@ -1127,8 +1128,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1127 | goto unmap; | 1128 | goto unmap; |
| 1128 | } | 1129 | } |
| 1129 | 1130 | ||
| 1130 | if (unlikely(iommu->need_sync)) | 1131 | iommu_completion_wait(iommu); |
| 1131 | iommu_completion_wait(iommu); | ||
| 1132 | 1132 | ||
| 1133 | out: | 1133 | out: |
| 1134 | spin_unlock_irqrestore(&domain->lock, flags); | 1134 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1173,8 +1173,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1173 | s->dma_address = s->dma_length = 0; | 1173 | s->dma_address = s->dma_length = 0; |
| 1174 | } | 1174 | } |
| 1175 | 1175 | ||
| 1176 | if (unlikely(iommu->need_sync)) | 1176 | iommu_completion_wait(iommu); |
| 1177 | iommu_completion_wait(iommu); | ||
| 1178 | 1177 | ||
| 1179 | spin_unlock_irqrestore(&domain->lock, flags); | 1178 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1180 | } | 1179 | } |
| @@ -1225,8 +1224,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
| 1225 | goto out; | 1224 | goto out; |
| 1226 | } | 1225 | } |
| 1227 | 1226 | ||
| 1228 | if (unlikely(iommu->need_sync)) | 1227 | iommu_completion_wait(iommu); |
| 1229 | iommu_completion_wait(iommu); | ||
| 1230 | 1228 | ||
| 1231 | out: | 1229 | out: |
| 1232 | spin_unlock_irqrestore(&domain->lock, flags); | 1230 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1257,8 +1255,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
| 1257 | 1255 | ||
| 1258 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 1256 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
| 1259 | 1257 | ||
| 1260 | if (unlikely(iommu->need_sync)) | 1258 | iommu_completion_wait(iommu); |
| 1261 | iommu_completion_wait(iommu); | ||
| 1262 | 1259 | ||
| 1263 | spin_unlock_irqrestore(&domain->lock, flags); | 1260 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1264 | 1261 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index f98f4e1dba09..0f4c1fd5a1f4 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
| @@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early) | |||
| 604 | printk(KERN_INFO "Using ACPI for processor (LAPIC) " | 604 | printk(KERN_INFO "Using ACPI for processor (LAPIC) " |
| 605 | "configuration information\n"); | 605 | "configuration information\n"); |
| 606 | 606 | ||
| 607 | if (!mpf) | ||
| 608 | return; | ||
| 609 | |||
| 607 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", | 610 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", |
| 608 | mpf->mpf_specification); | 611 | mpf->mpf_specification); |
| 609 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) | 612 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) |
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 0e9f1982b1dd..95777b0faa73 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c | |||
| @@ -7,7 +7,8 @@ | |||
| 7 | 7 | ||
| 8 | #include <asm/paravirt.h> | 8 | #include <asm/paravirt.h> |
| 9 | 9 | ||
| 10 | static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) | 10 | static inline void |
| 11 | default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
| 11 | { | 12 | { |
| 12 | __raw_spin_lock(lock); | 13 | __raw_spin_lock(lock); |
| 13 | } | 14 | } |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index a42b02b4df68..ba7ad83e20a8 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
| @@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size) | |||
| 123 | 123 | ||
| 124 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 124 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
| 125 | iommu_area_free(iommu_gart_bitmap, offset, size); | 125 | iommu_area_free(iommu_gart_bitmap, offset, size); |
| 126 | if (offset >= next_bit) | ||
| 127 | next_bit = offset + size; | ||
| 126 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 128 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
| 127 | } | 129 | } |
| 128 | 130 | ||
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 022cd41ea9b4..202864ad49a7 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -401,14 +401,13 @@ static int __init ppro_init(char **cpu_type) | |||
| 401 | *cpu_type = "i386/pii"; | 401 | *cpu_type = "i386/pii"; |
| 402 | break; | 402 | break; |
| 403 | case 6 ... 8: | 403 | case 6 ... 8: |
| 404 | case 10 ... 11: | ||
| 404 | *cpu_type = "i386/piii"; | 405 | *cpu_type = "i386/piii"; |
| 405 | break; | 406 | break; |
| 406 | case 9: | 407 | case 9: |
| 408 | case 13: | ||
| 407 | *cpu_type = "i386/p6_mobile"; | 409 | *cpu_type = "i386/p6_mobile"; |
| 408 | break; | 410 | break; |
| 409 | case 10 ... 13: | ||
| 410 | *cpu_type = "i386/p6"; | ||
| 411 | break; | ||
| 412 | case 14: | 411 | case 14: |
| 413 | *cpu_type = "i386/core"; | 412 | *cpu_type = "i386/core"; |
| 414 | break; | 413 | break; |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 716d26f0e5d4..e9f80c744cf3 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
| @@ -156,6 +156,8 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
| 156 | unsigned int low, high; | 156 | unsigned int low, high; |
| 157 | int i; | 157 | int i; |
| 158 | 158 | ||
| 159 | if (!reset_value) | ||
| 160 | return; | ||
| 159 | for (i = 0; i < num_counters; ++i) { | 161 | for (i = 0; i < num_counters; ++i) { |
| 160 | if (reset_value[i]) { | 162 | if (reset_value[i]) { |
| 161 | CTRL_READ(low, high, msrs, i); | 163 | CTRL_READ(low, high, msrs, i); |
| @@ -171,6 +173,8 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
| 171 | unsigned int low, high; | 173 | unsigned int low, high; |
| 172 | int i; | 174 | int i; |
| 173 | 175 | ||
| 176 | if (!reset_value) | ||
| 177 | return; | ||
| 174 | for (i = 0; i < num_counters; ++i) { | 178 | for (i = 0; i < num_counters; ++i) { |
| 175 | if (!reset_value[i]) | 179 | if (!reset_value[i]) |
| 176 | continue; | 180 | continue; |
