diff options
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 41 |
1 files changed, 27 insertions, 14 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 22d7d050905d..042fdc27bc92 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -65,7 +65,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
65 | u8 *target; | 65 | u8 *target; |
66 | 66 | ||
67 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 67 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
68 | target = (iommu->cmd_buf + tail); | 68 | target = iommu->cmd_buf + tail; |
69 | memcpy_toio(target, cmd, sizeof(*cmd)); | 69 | memcpy_toio(target, cmd, sizeof(*cmd)); |
70 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | 70 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; |
71 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | 71 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); |
@@ -101,32 +101,39 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
101 | */ | 101 | */ |
102 | static int iommu_completion_wait(struct amd_iommu *iommu) | 102 | static int iommu_completion_wait(struct amd_iommu *iommu) |
103 | { | 103 | { |
104 | int ret; | 104 | int ret = 0, ready = 0; |
105 | unsigned status = 0; | ||
105 | struct iommu_cmd cmd; | 106 | struct iommu_cmd cmd; |
106 | volatile u64 ready = 0; | 107 | unsigned long flags, i = 0; |
107 | unsigned long ready_phys = virt_to_phys(&ready); | ||
108 | unsigned long i = 0; | ||
109 | 108 | ||
110 | memset(&cmd, 0, sizeof(cmd)); | 109 | memset(&cmd, 0, sizeof(cmd)); |
111 | cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK; | 110 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; |
112 | cmd.data[1] = upper_32_bits(ready_phys); | ||
113 | cmd.data[2] = 1; /* value written to 'ready' */ | ||
114 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | 111 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); |
115 | 112 | ||
116 | iommu->need_sync = 0; | 113 | iommu->need_sync = 0; |
117 | 114 | ||
118 | ret = iommu_queue_command(iommu, &cmd); | 115 | spin_lock_irqsave(&iommu->lock, flags); |
116 | |||
117 | ret = __iommu_queue_command(iommu, &cmd); | ||
119 | 118 | ||
120 | if (ret) | 119 | if (ret) |
121 | return ret; | 120 | goto out; |
122 | 121 | ||
123 | while (!ready && (i < EXIT_LOOP_COUNT)) { | 122 | while (!ready && (i < EXIT_LOOP_COUNT)) { |
124 | ++i; | 123 | ++i; |
125 | cpu_relax(); | 124 | /* wait for the bit to become one */ |
125 | status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
126 | ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; | ||
126 | } | 127 | } |
127 | 128 | ||
129 | /* set bit back to zero */ | ||
130 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | ||
131 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
132 | |||
128 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) | 133 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) |
129 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); | 134 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); |
135 | out: | ||
136 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
130 | 137 | ||
131 | return 0; | 138 | return 0; |
132 | } | 139 | } |
@@ -137,6 +144,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
137 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | 144 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) |
138 | { | 145 | { |
139 | struct iommu_cmd cmd; | 146 | struct iommu_cmd cmd; |
147 | int ret; | ||
140 | 148 | ||
141 | BUG_ON(iommu == NULL); | 149 | BUG_ON(iommu == NULL); |
142 | 150 | ||
@@ -144,9 +152,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | |||
144 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | 152 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); |
145 | cmd.data[0] = devid; | 153 | cmd.data[0] = devid; |
146 | 154 | ||
155 | ret = iommu_queue_command(iommu, &cmd); | ||
156 | |||
147 | iommu->need_sync = 1; | 157 | iommu->need_sync = 1; |
148 | 158 | ||
149 | return iommu_queue_command(iommu, &cmd); | 159 | return ret; |
150 | } | 160 | } |
151 | 161 | ||
152 | /* | 162 | /* |
@@ -156,21 +166,24 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
156 | u64 address, u16 domid, int pde, int s) | 166 | u64 address, u16 domid, int pde, int s) |
157 | { | 167 | { |
158 | struct iommu_cmd cmd; | 168 | struct iommu_cmd cmd; |
169 | int ret; | ||
159 | 170 | ||
160 | memset(&cmd, 0, sizeof(cmd)); | 171 | memset(&cmd, 0, sizeof(cmd)); |
161 | address &= PAGE_MASK; | 172 | address &= PAGE_MASK; |
162 | CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); | 173 | CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); |
163 | cmd.data[1] |= domid; | 174 | cmd.data[1] |= domid; |
164 | cmd.data[2] = LOW_U32(address); | 175 | cmd.data[2] = lower_32_bits(address); |
165 | cmd.data[3] = upper_32_bits(address); | 176 | cmd.data[3] = upper_32_bits(address); |
166 | if (s) /* size bit - we flush more than one 4kb page */ | 177 | if (s) /* size bit - we flush more than one 4kb page */ |
167 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | 178 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; |
168 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ | 179 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ |
169 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | 180 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; |
170 | 181 | ||
182 | ret = iommu_queue_command(iommu, &cmd); | ||
183 | |||
171 | iommu->need_sync = 1; | 184 | iommu->need_sync = 1; |
172 | 185 | ||
173 | return iommu_queue_command(iommu, &cmd); | 186 | return ret; |
174 | } | 187 | } |
175 | 188 | ||
176 | /* | 189 | /* |