diff options
-rw-r--r-- | drivers/dma/ioat/dca.c | 200 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 222 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 4 |
5 files changed, 1 insertions, 428 deletions
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index b1cffd2429e4..540d94ce9736 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
@@ -118,30 +118,6 @@ struct ioat_dca_priv { | |||
118 | struct ioat_dca_slot req_slots[0]; | 118 | struct ioat_dca_slot req_slots[0]; |
119 | }; | 119 | }; |
120 | 120 | ||
121 | static u8 ioat_dca_get_tag(struct dca_provider *dca, | ||
122 | struct device *dev, | ||
123 | int cpu) | ||
124 | { | ||
125 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
126 | int i, apic_id, bit, value; | ||
127 | u8 entry, tag; | ||
128 | |||
129 | tag = 0; | ||
130 | apic_id = cpu_physical_id(cpu); | ||
131 | |||
132 | for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { | ||
133 | entry = ioatdca->tag_map[i]; | ||
134 | if (entry & DCA_TAG_MAP_VALID) { | ||
135 | bit = entry & ~DCA_TAG_MAP_VALID; | ||
136 | value = (apic_id & (1 << bit)) ? 1 : 0; | ||
137 | } else { | ||
138 | value = entry ? 1 : 0; | ||
139 | } | ||
140 | tag |= (value << i); | ||
141 | } | ||
142 | return tag; | ||
143 | } | ||
144 | |||
145 | static int ioat_dca_dev_managed(struct dca_provider *dca, | 121 | static int ioat_dca_dev_managed(struct dca_provider *dca, |
146 | struct device *dev) | 122 | struct device *dev) |
147 | { | 123 | { |
@@ -157,182 +133,6 @@ static int ioat_dca_dev_managed(struct dca_provider *dca, | |||
157 | return 0; | 133 | return 0; |
158 | } | 134 | } |
159 | 135 | ||
160 | static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) | ||
161 | { | ||
162 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
163 | struct pci_dev *pdev; | ||
164 | int i; | ||
165 | u16 id; | ||
166 | u16 global_req_table; | ||
167 | |||
168 | /* This implementation only supports PCI-Express */ | ||
169 | if (!dev_is_pci(dev)) | ||
170 | return -ENODEV; | ||
171 | pdev = to_pci_dev(dev); | ||
172 | id = dcaid_from_pcidev(pdev); | ||
173 | |||
174 | if (ioatdca->requester_count == ioatdca->max_requesters) | ||
175 | return -ENODEV; | ||
176 | |||
177 | for (i = 0; i < ioatdca->max_requesters; i++) { | ||
178 | if (ioatdca->req_slots[i].pdev == NULL) { | ||
179 | /* found an empty slot */ | ||
180 | ioatdca->requester_count++; | ||
181 | ioatdca->req_slots[i].pdev = pdev; | ||
182 | ioatdca->req_slots[i].rid = id; | ||
183 | global_req_table = | ||
184 | readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); | ||
185 | writel(id | IOAT_DCA_GREQID_VALID, | ||
186 | ioatdca->iobase + global_req_table + (i * 4)); | ||
187 | return i; | ||
188 | } | ||
189 | } | ||
190 | /* Error, ioatdma->requester_count is out of whack */ | ||
191 | return -EFAULT; | ||
192 | } | ||
193 | |||
194 | static int ioat2_dca_remove_requester(struct dca_provider *dca, | ||
195 | struct device *dev) | ||
196 | { | ||
197 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
198 | struct pci_dev *pdev; | ||
199 | int i; | ||
200 | u16 global_req_table; | ||
201 | |||
202 | /* This implementation only supports PCI-Express */ | ||
203 | if (!dev_is_pci(dev)) | ||
204 | return -ENODEV; | ||
205 | pdev = to_pci_dev(dev); | ||
206 | |||
207 | for (i = 0; i < ioatdca->max_requesters; i++) { | ||
208 | if (ioatdca->req_slots[i].pdev == pdev) { | ||
209 | global_req_table = | ||
210 | readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); | ||
211 | writel(0, ioatdca->iobase + global_req_table + (i * 4)); | ||
212 | ioatdca->req_slots[i].pdev = NULL; | ||
213 | ioatdca->req_slots[i].rid = 0; | ||
214 | ioatdca->requester_count--; | ||
215 | return i; | ||
216 | } | ||
217 | } | ||
218 | return -ENODEV; | ||
219 | } | ||
220 | |||
221 | static u8 ioat2_dca_get_tag(struct dca_provider *dca, | ||
222 | struct device *dev, | ||
223 | int cpu) | ||
224 | { | ||
225 | u8 tag; | ||
226 | |||
227 | tag = ioat_dca_get_tag(dca, dev, cpu); | ||
228 | tag = (~tag) & 0x1F; | ||
229 | return tag; | ||
230 | } | ||
231 | |||
232 | static struct dca_ops ioat2_dca_ops = { | ||
233 | .add_requester = ioat2_dca_add_requester, | ||
234 | .remove_requester = ioat2_dca_remove_requester, | ||
235 | .get_tag = ioat2_dca_get_tag, | ||
236 | .dev_managed = ioat_dca_dev_managed, | ||
237 | }; | ||
238 | |||
239 | static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) | ||
240 | { | ||
241 | int slots = 0; | ||
242 | u32 req; | ||
243 | u16 global_req_table; | ||
244 | |||
245 | global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); | ||
246 | if (global_req_table == 0) | ||
247 | return 0; | ||
248 | do { | ||
249 | req = readl(iobase + global_req_table + (slots * sizeof(u32))); | ||
250 | slots++; | ||
251 | } while ((req & IOAT_DCA_GREQID_LASTID) == 0); | ||
252 | |||
253 | return slots; | ||
254 | } | ||
255 | |||
256 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
257 | { | ||
258 | struct dca_provider *dca; | ||
259 | struct ioat_dca_priv *ioatdca; | ||
260 | int slots; | ||
261 | int i; | ||
262 | int err; | ||
263 | u32 tag_map; | ||
264 | u16 dca_offset; | ||
265 | u16 csi_fsb_control; | ||
266 | u16 pcie_control; | ||
267 | u8 bit; | ||
268 | |||
269 | if (!system_has_dca_enabled(pdev)) | ||
270 | return NULL; | ||
271 | |||
272 | dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); | ||
273 | if (dca_offset == 0) | ||
274 | return NULL; | ||
275 | |||
276 | slots = ioat2_dca_count_dca_slots(iobase, dca_offset); | ||
277 | if (slots == 0) | ||
278 | return NULL; | ||
279 | |||
280 | dca = alloc_dca_provider(&ioat2_dca_ops, | ||
281 | sizeof(*ioatdca) | ||
282 | + (sizeof(struct ioat_dca_slot) * slots)); | ||
283 | if (!dca) | ||
284 | return NULL; | ||
285 | |||
286 | ioatdca = dca_priv(dca); | ||
287 | ioatdca->iobase = iobase; | ||
288 | ioatdca->dca_base = iobase + dca_offset; | ||
289 | ioatdca->max_requesters = slots; | ||
290 | |||
291 | /* some bios might not know to turn these on */ | ||
292 | csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); | ||
293 | if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { | ||
294 | csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; | ||
295 | writew(csi_fsb_control, | ||
296 | ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); | ||
297 | } | ||
298 | pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); | ||
299 | if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { | ||
300 | pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; | ||
301 | writew(pcie_control, | ||
302 | ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); | ||
303 | } | ||
304 | |||
305 | |||
306 | /* TODO version, compatibility and configuration checks */ | ||
307 | |||
308 | /* copy out the APIC to DCA tag map */ | ||
309 | tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); | ||
310 | for (i = 0; i < 5; i++) { | ||
311 | bit = (tag_map >> (4 * i)) & 0x0f; | ||
312 | if (bit < 8) | ||
313 | ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; | ||
314 | else | ||
315 | ioatdca->tag_map[i] = 0; | ||
316 | } | ||
317 | |||
318 | if (!dca2_tag_map_valid(ioatdca->tag_map)) { | ||
319 | WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, | ||
320 | "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", | ||
321 | dev_driver_string(&pdev->dev), | ||
322 | dev_name(&pdev->dev)); | ||
323 | free_dca_provider(dca); | ||
324 | return NULL; | ||
325 | } | ||
326 | |||
327 | err = register_dca_provider(dca, &pdev->dev); | ||
328 | if (err) { | ||
329 | free_dca_provider(dca); | ||
330 | return NULL; | ||
331 | } | ||
332 | |||
333 | return dca; | ||
334 | } | ||
335 | |||
336 | static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) | 136 | static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) |
337 | { | 137 | { |
338 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | 138 | struct ioat_dca_priv *ioatdca = dca_priv(dca); |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index f40768dfc3e6..fe63ff8c1c00 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/workqueue.h> | 32 | #include <linux/workqueue.h> |
33 | #include <linux/prefetch.h> | 33 | #include <linux/prefetch.h> |
34 | #include <linux/i7300_idle.h> | ||
35 | #include "dma.h" | 34 | #include "dma.h" |
36 | #include "registers.h" | 35 | #include "registers.h" |
37 | #include "hw.h" | 36 | #include "hw.h" |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 0fba93c2feb4..2467298843ea 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/workqueue.h> | 32 | #include <linux/workqueue.h> |
33 | #include <linux/prefetch.h> | 33 | #include <linux/prefetch.h> |
34 | #include <linux/i7300_idle.h> | ||
35 | #include "dma.h" | 34 | #include "dma.h" |
36 | #include "dma_v2.h" | 35 | #include "dma_v2.h" |
37 | #include "registers.h" | 36 | #include "registers.h" |
@@ -124,76 +123,6 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |||
124 | spin_unlock_bh(&ioat->prep_lock); | 123 | spin_unlock_bh(&ioat->prep_lock); |
125 | } | 124 | } |
126 | 125 | ||
127 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | ||
128 | { | ||
129 | struct ioat_chan_common *chan = &ioat->base; | ||
130 | struct dma_async_tx_descriptor *tx; | ||
131 | struct ioat_ring_ent *desc; | ||
132 | bool seen_current = false; | ||
133 | u16 active; | ||
134 | int idx = ioat->tail, i; | ||
135 | |||
136 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | ||
137 | __func__, ioat->head, ioat->tail, ioat->issued); | ||
138 | |||
139 | active = ioat2_ring_active(ioat); | ||
140 | for (i = 0; i < active && !seen_current; i++) { | ||
141 | smp_read_barrier_depends(); | ||
142 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); | ||
143 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
144 | tx = &desc->txd; | ||
145 | dump_desc_dbg(ioat, desc); | ||
146 | if (tx->cookie) { | ||
147 | dma_descriptor_unmap(tx); | ||
148 | dma_cookie_complete(tx); | ||
149 | if (tx->callback) { | ||
150 | tx->callback(tx->callback_param); | ||
151 | tx->callback = NULL; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | if (tx->phys == phys_complete) | ||
156 | seen_current = true; | ||
157 | } | ||
158 | smp_mb(); /* finish all descriptor reads before incrementing tail */ | ||
159 | ioat->tail = idx + i; | ||
160 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ | ||
161 | |||
162 | chan->last_completion = phys_complete; | ||
163 | if (active - i == 0) { | ||
164 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | ||
165 | __func__); | ||
166 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
167 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | ||
173 | * @chan: ioat channel to be cleaned up | ||
174 | */ | ||
175 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | ||
176 | { | ||
177 | struct ioat_chan_common *chan = &ioat->base; | ||
178 | dma_addr_t phys_complete; | ||
179 | |||
180 | spin_lock_bh(&chan->cleanup_lock); | ||
181 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
182 | __cleanup(ioat, phys_complete); | ||
183 | spin_unlock_bh(&chan->cleanup_lock); | ||
184 | } | ||
185 | |||
186 | void ioat2_cleanup_event(unsigned long data) | ||
187 | { | ||
188 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
189 | struct ioat_chan_common *chan = &ioat->base; | ||
190 | |||
191 | ioat2_cleanup(ioat); | ||
192 | if (!test_bit(IOAT_RUN, &chan->state)) | ||
193 | return; | ||
194 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
195 | } | ||
196 | |||
197 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) | 126 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) |
198 | { | 127 | { |
199 | struct ioat_chan_common *chan = &ioat->base; | 128 | struct ioat_chan_common *chan = &ioat->base; |
@@ -256,110 +185,6 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) | |||
256 | return err; | 185 | return err; |
257 | } | 186 | } |
258 | 187 | ||
259 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | ||
260 | { | ||
261 | struct ioat_chan_common *chan = &ioat->base; | ||
262 | dma_addr_t phys_complete; | ||
263 | |||
264 | ioat2_quiesce(chan, 0); | ||
265 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
266 | __cleanup(ioat, phys_complete); | ||
267 | |||
268 | __ioat2_restart_chan(ioat); | ||
269 | } | ||
270 | |||
271 | static void check_active(struct ioat2_dma_chan *ioat) | ||
272 | { | ||
273 | struct ioat_chan_common *chan = &ioat->base; | ||
274 | |||
275 | if (ioat2_ring_active(ioat)) { | ||
276 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
277 | return; | ||
278 | } | ||
279 | |||
280 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) | ||
281 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
282 | else if (ioat->alloc_order > ioat_get_alloc_order()) { | ||
283 | /* if the ring is idle, empty, and oversized try to step | ||
284 | * down the size | ||
285 | */ | ||
286 | reshape_ring(ioat, ioat->alloc_order - 1); | ||
287 | |||
288 | /* keep shrinking until we get back to our minimum | ||
289 | * default size | ||
290 | */ | ||
291 | if (ioat->alloc_order > ioat_get_alloc_order()) | ||
292 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
293 | } | ||
294 | |||
295 | } | ||
296 | |||
297 | void ioat2_timer_event(unsigned long data) | ||
298 | { | ||
299 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
300 | struct ioat_chan_common *chan = &ioat->base; | ||
301 | dma_addr_t phys_complete; | ||
302 | u64 status; | ||
303 | |||
304 | status = ioat_chansts(chan); | ||
305 | |||
306 | /* when halted due to errors check for channel | ||
307 | * programming errors before advancing the completion state | ||
308 | */ | ||
309 | if (is_ioat_halted(status)) { | ||
310 | u32 chanerr; | ||
311 | |||
312 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
313 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
314 | __func__, chanerr); | ||
315 | if (test_bit(IOAT_RUN, &chan->state)) | ||
316 | BUG_ON(is_ioat_bug(chanerr)); | ||
317 | else /* we never got off the ground */ | ||
318 | return; | ||
319 | } | ||
320 | |||
321 | /* if we haven't made progress and we have already | ||
322 | * acknowledged a pending completion once, then be more | ||
323 | * forceful with a restart | ||
324 | */ | ||
325 | spin_lock_bh(&chan->cleanup_lock); | ||
326 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
327 | __cleanup(ioat, phys_complete); | ||
328 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
329 | spin_lock_bh(&ioat->prep_lock); | ||
330 | ioat2_restart_channel(ioat); | ||
331 | spin_unlock_bh(&ioat->prep_lock); | ||
332 | spin_unlock_bh(&chan->cleanup_lock); | ||
333 | return; | ||
334 | } else { | ||
335 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
336 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
337 | } | ||
338 | |||
339 | |||
340 | if (ioat2_ring_active(ioat)) | ||
341 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
342 | else { | ||
343 | spin_lock_bh(&ioat->prep_lock); | ||
344 | check_active(ioat); | ||
345 | spin_unlock_bh(&ioat->prep_lock); | ||
346 | } | ||
347 | spin_unlock_bh(&chan->cleanup_lock); | ||
348 | } | ||
349 | |||
350 | static int ioat2_reset_hw(struct ioat_chan_common *chan) | ||
351 | { | ||
352 | /* throw away whatever the channel was doing and get it initialized */ | ||
353 | u32 chanerr; | ||
354 | |||
355 | ioat2_quiesce(chan, msecs_to_jiffies(100)); | ||
356 | |||
357 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
358 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
359 | |||
360 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | ||
361 | } | ||
362 | |||
363 | /** | 188 | /** |
364 | * ioat2_enumerate_channels - find and initialize the device's channels | 189 | * ioat2_enumerate_channels - find and initialize the device's channels |
365 | * @device: the device to be enumerated | 190 | * @device: the device to be enumerated |
@@ -386,11 +211,6 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) | |||
386 | return 0; | 211 | return 0; |
387 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); | 212 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); |
388 | 213 | ||
389 | /* FIXME which i/oat version is i7300? */ | ||
390 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
391 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | ||
392 | dma->chancnt--; | ||
393 | #endif | ||
394 | for (i = 0; i < dma->chancnt; i++) { | 214 | for (i = 0; i < dma->chancnt; i++) { |
395 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | 215 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); |
396 | if (!ioat) | 216 | if (!ioat) |
@@ -872,45 +692,3 @@ struct kobj_type ioat2_ktype = { | |||
872 | .sysfs_ops = &ioat_sysfs_ops, | 692 | .sysfs_ops = &ioat_sysfs_ops, |
873 | .default_attrs = ioat2_attrs, | 693 | .default_attrs = ioat2_attrs, |
874 | }; | 694 | }; |
875 | |||
876 | int ioat2_dma_probe(struct ioatdma_device *device, int dca) | ||
877 | { | ||
878 | struct pci_dev *pdev = device->pdev; | ||
879 | struct dma_device *dma; | ||
880 | struct dma_chan *c; | ||
881 | struct ioat_chan_common *chan; | ||
882 | int err; | ||
883 | |||
884 | device->enumerate_channels = ioat2_enumerate_channels; | ||
885 | device->reset_hw = ioat2_reset_hw; | ||
886 | device->cleanup_fn = ioat2_cleanup_event; | ||
887 | device->timer_fn = ioat2_timer_event; | ||
888 | device->self_test = ioat_dma_self_test; | ||
889 | dma = &device->common; | ||
890 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | ||
891 | dma->device_issue_pending = ioat2_issue_pending; | ||
892 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | ||
893 | dma->device_free_chan_resources = ioat2_free_chan_resources; | ||
894 | dma->device_tx_status = ioat_dma_tx_status; | ||
895 | |||
896 | err = ioat_probe(device); | ||
897 | if (err) | ||
898 | return err; | ||
899 | |||
900 | list_for_each_entry(c, &dma->channels, device_node) { | ||
901 | chan = to_chan_common(c); | ||
902 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | ||
903 | chan->reg_base + IOAT_DCACTRL_OFFSET); | ||
904 | } | ||
905 | |||
906 | err = ioat_register(device); | ||
907 | if (err) | ||
908 | return err; | ||
909 | |||
910 | ioat_kobject_add(device, &ioat2_ktype); | ||
911 | |||
912 | if (dca) | ||
913 | device->dca = ioat2_dca_init(pdev, device->reg_base); | ||
914 | |||
915 | return err; | ||
916 | } | ||
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index bf24ebe874b0..b7d35839e68b 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -153,7 +153,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | |||
153 | 153 | ||
154 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); | 154 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
155 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); | 155 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); |
156 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | ||
157 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 156 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
158 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); | 157 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); |
159 | int ioat2_enumerate_channels(struct ioatdma_device *device); | 158 | int ioat2_enumerate_channels(struct ioatdma_device *device); |
@@ -166,7 +165,6 @@ void ioat2_free_chan_resources(struct dma_chan *c); | |||
166 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); | 165 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); |
167 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order); | 166 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order); |
168 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); | 167 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); |
169 | void ioat2_cleanup_event(unsigned long data); | ||
170 | void ioat2_timer_event(unsigned long data); | 168 | void ioat2_timer_event(unsigned long data); |
171 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); | 169 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); |
172 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); | 170 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 130db77120aa..b979a667f501 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -210,9 +210,7 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
210 | pci_set_drvdata(pdev, device); | 210 | pci_set_drvdata(pdev, device); |
211 | 211 | ||
212 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | 212 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
213 | if (device->version == IOAT_VER_2_0) | 213 | if (device->version >= IOAT_VER_3_0) |
214 | err = ioat2_dma_probe(device, ioat_dca_enabled); | ||
215 | else if (device->version >= IOAT_VER_3_0) | ||
216 | err = ioat3_dma_probe(device, ioat_dca_enabled); | 214 | err = ioat3_dma_probe(device, ioat_dca_enabled); |
217 | else | 215 | else |
218 | return -ENODEV; | 216 | return -ENODEV; |