diff options
| author | Joerg Roedel <joerg.roedel@amd.com> | 2008-12-03 06:19:27 -0500 |
|---|---|---|
| committer | Joerg Roedel <joerg.roedel@amd.com> | 2008-12-03 06:20:46 -0500 |
| commit | 09ee17eb8ea89514c13980c4010bdbbaea8630c2 (patch) | |
| tree | 2ca56f35c476ded55324b5bc11f050c7131980dd | |
| parent | f91ba190648be4ff127d6aaf3993ac19d66dc2c2 (diff) | |
AMD IOMMU: fix possible race while accessing iommu->need_sync
The access to the iommu->need_sync member needs to be protected by the
iommu->lock. Otherwise this is a possible race condition. Fix it with
this patch.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
| -rw-r--r-- | arch/x86/kernel/amd_iommu.c | 33 |
1 files changed, 13 insertions, 20 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a232e5a85d48..5662e226b0c9 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
| 187 | 187 | ||
| 188 | spin_lock_irqsave(&iommu->lock, flags); | 188 | spin_lock_irqsave(&iommu->lock, flags); |
| 189 | ret = __iommu_queue_command(iommu, cmd); | 189 | ret = __iommu_queue_command(iommu, cmd); |
| 190 | if (!ret) | ||
| 191 | iommu->need_sync = 1; | ||
| 190 | spin_unlock_irqrestore(&iommu->lock, flags); | 192 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 191 | 193 | ||
| 192 | return ret; | 194 | return ret; |
| @@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
| 210 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 212 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; |
| 211 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | 213 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); |
| 212 | 214 | ||
| 213 | iommu->need_sync = 0; | ||
| 214 | |||
| 215 | spin_lock_irqsave(&iommu->lock, flags); | 215 | spin_lock_irqsave(&iommu->lock, flags); |
| 216 | 216 | ||
| 217 | if (!iommu->need_sync) | ||
| 218 | goto out; | ||
| 219 | |||
| 220 | iommu->need_sync = 0; | ||
| 221 | |||
| 217 | ret = __iommu_queue_command(iommu, &cmd); | 222 | ret = __iommu_queue_command(iommu, &cmd); |
| 218 | 223 | ||
| 219 | if (ret) | 224 | if (ret) |
| @@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | |||
| 254 | 259 | ||
| 255 | ret = iommu_queue_command(iommu, &cmd); | 260 | ret = iommu_queue_command(iommu, &cmd); |
| 256 | 261 | ||
| 257 | iommu->need_sync = 1; | ||
| 258 | |||
| 259 | return ret; | 262 | return ret; |
| 260 | } | 263 | } |
| 261 | 264 | ||
| @@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
| 281 | 284 | ||
| 282 | ret = iommu_queue_command(iommu, &cmd); | 285 | ret = iommu_queue_command(iommu, &cmd); |
| 283 | 286 | ||
| 284 | iommu->need_sync = 1; | ||
| 285 | |||
| 286 | return ret; | 287 | return ret; |
| 287 | } | 288 | } |
| 288 | 289 | ||
| @@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu, | |||
| 762 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 763 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
| 763 | 764 | ||
| 764 | iommu_queue_inv_dev_entry(iommu, devid); | 765 | iommu_queue_inv_dev_entry(iommu, devid); |
| 765 | |||
| 766 | iommu->need_sync = 1; | ||
| 767 | } | 766 | } |
| 768 | 767 | ||
| 769 | /***************************************************************************** | 768 | /***************************************************************************** |
| @@ -1034,8 +1033,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
| 1034 | if (addr == bad_dma_address) | 1033 | if (addr == bad_dma_address) |
| 1035 | goto out; | 1034 | goto out; |
| 1036 | 1035 | ||
| 1037 | if (unlikely(iommu->need_sync)) | 1036 | iommu_completion_wait(iommu); |
| 1038 | iommu_completion_wait(iommu); | ||
| 1039 | 1037 | ||
| 1040 | out: | 1038 | out: |
| 1041 | spin_unlock_irqrestore(&domain->lock, flags); | 1039 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1063,8 +1061,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
| 1063 | 1061 | ||
| 1064 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1062 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); |
| 1065 | 1063 | ||
| 1066 | if (unlikely(iommu->need_sync)) | 1064 | iommu_completion_wait(iommu); |
| 1067 | iommu_completion_wait(iommu); | ||
| 1068 | 1065 | ||
| 1069 | spin_unlock_irqrestore(&domain->lock, flags); | 1066 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1070 | } | 1067 | } |
| @@ -1130,8 +1127,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1130 | goto unmap; | 1127 | goto unmap; |
| 1131 | } | 1128 | } |
| 1132 | 1129 | ||
| 1133 | if (unlikely(iommu->need_sync)) | 1130 | iommu_completion_wait(iommu); |
| 1134 | iommu_completion_wait(iommu); | ||
| 1135 | 1131 | ||
| 1136 | out: | 1132 | out: |
| 1137 | spin_unlock_irqrestore(&domain->lock, flags); | 1133 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1176,8 +1172,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 1176 | s->dma_address = s->dma_length = 0; | 1172 | s->dma_address = s->dma_length = 0; |
| 1177 | } | 1173 | } |
| 1178 | 1174 | ||
| 1179 | if (unlikely(iommu->need_sync)) | 1175 | iommu_completion_wait(iommu); |
| 1180 | iommu_completion_wait(iommu); | ||
| 1181 | 1176 | ||
| 1182 | spin_unlock_irqrestore(&domain->lock, flags); | 1177 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1183 | } | 1178 | } |
| @@ -1228,8 +1223,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
| 1228 | goto out; | 1223 | goto out; |
| 1229 | } | 1224 | } |
| 1230 | 1225 | ||
| 1231 | if (unlikely(iommu->need_sync)) | 1226 | iommu_completion_wait(iommu); |
| 1232 | iommu_completion_wait(iommu); | ||
| 1233 | 1227 | ||
| 1234 | out: | 1228 | out: |
| 1235 | spin_unlock_irqrestore(&domain->lock, flags); | 1229 | spin_unlock_irqrestore(&domain->lock, flags); |
| @@ -1260,8 +1254,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
| 1260 | 1254 | ||
| 1261 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 1255 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
| 1262 | 1256 | ||
| 1263 | if (unlikely(iommu->need_sync)) | 1257 | iommu_completion_wait(iommu); |
| 1264 | iommu_completion_wait(iommu); | ||
| 1265 | 1258 | ||
| 1266 | spin_unlock_irqrestore(&domain->lock, flags); | 1259 | spin_unlock_irqrestore(&domain->lock, flags); |
| 1267 | 1260 | ||
