diff options
-rw-r--r-- | drivers/dma/ioat/dca.c | 151 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 674 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 65 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 4 |
4 files changed, 2 insertions, 892 deletions
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index ea1e107ae884..b1cffd2429e4 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
@@ -71,14 +71,6 @@ static inline int dca2_tag_map_valid(u8 *tag_map) | |||
71 | #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) | 71 | #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) |
72 | #define IOAT_TAG_MAP_LEN 8 | 72 | #define IOAT_TAG_MAP_LEN 8 |
73 | 73 | ||
74 | static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = { | ||
75 | 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; | ||
76 | static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = { | ||
77 | 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; | ||
78 | static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = { | ||
79 | 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), }; | ||
80 | static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 }; | ||
81 | |||
82 | /* pack PCI B/D/F into a u16 */ | 74 | /* pack PCI B/D/F into a u16 */ |
83 | static inline u16 dcaid_from_pcidev(struct pci_dev *pci) | 75 | static inline u16 dcaid_from_pcidev(struct pci_dev *pci) |
84 | { | 76 | { |
@@ -126,72 +118,6 @@ struct ioat_dca_priv { | |||
126 | struct ioat_dca_slot req_slots[0]; | 118 | struct ioat_dca_slot req_slots[0]; |
127 | }; | 119 | }; |
128 | 120 | ||
129 | /* 5000 series chipset DCA Port Requester ID Table Entry Format | ||
130 | * [15:8] PCI-Express Bus Number | ||
131 | * [7:3] PCI-Express Device Number | ||
132 | * [2:0] PCI-Express Function Number | ||
133 | * | ||
134 | * 5000 series chipset DCA control register format | ||
135 | * [7:1] Reserved (0) | ||
136 | * [0] Ignore Function Number | ||
137 | */ | ||
138 | |||
139 | static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) | ||
140 | { | ||
141 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
142 | struct pci_dev *pdev; | ||
143 | int i; | ||
144 | u16 id; | ||
145 | |||
146 | /* This implementation only supports PCI-Express */ | ||
147 | if (!dev_is_pci(dev)) | ||
148 | return -ENODEV; | ||
149 | pdev = to_pci_dev(dev); | ||
150 | id = dcaid_from_pcidev(pdev); | ||
151 | |||
152 | if (ioatdca->requester_count == ioatdca->max_requesters) | ||
153 | return -ENODEV; | ||
154 | |||
155 | for (i = 0; i < ioatdca->max_requesters; i++) { | ||
156 | if (ioatdca->req_slots[i].pdev == NULL) { | ||
157 | /* found an empty slot */ | ||
158 | ioatdca->requester_count++; | ||
159 | ioatdca->req_slots[i].pdev = pdev; | ||
160 | ioatdca->req_slots[i].rid = id; | ||
161 | writew(id, ioatdca->dca_base + (i * 4)); | ||
162 | /* make sure the ignore function bit is off */ | ||
163 | writeb(0, ioatdca->dca_base + (i * 4) + 2); | ||
164 | return i; | ||
165 | } | ||
166 | } | ||
167 | /* Error, ioatdma->requester_count is out of whack */ | ||
168 | return -EFAULT; | ||
169 | } | ||
170 | |||
171 | static int ioat_dca_remove_requester(struct dca_provider *dca, | ||
172 | struct device *dev) | ||
173 | { | ||
174 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | ||
175 | struct pci_dev *pdev; | ||
176 | int i; | ||
177 | |||
178 | /* This implementation only supports PCI-Express */ | ||
179 | if (!dev_is_pci(dev)) | ||
180 | return -ENODEV; | ||
181 | pdev = to_pci_dev(dev); | ||
182 | |||
183 | for (i = 0; i < ioatdca->max_requesters; i++) { | ||
184 | if (ioatdca->req_slots[i].pdev == pdev) { | ||
185 | writew(0, ioatdca->dca_base + (i * 4)); | ||
186 | ioatdca->req_slots[i].pdev = NULL; | ||
187 | ioatdca->req_slots[i].rid = 0; | ||
188 | ioatdca->requester_count--; | ||
189 | return i; | ||
190 | } | ||
191 | } | ||
192 | return -ENODEV; | ||
193 | } | ||
194 | |||
195 | static u8 ioat_dca_get_tag(struct dca_provider *dca, | 121 | static u8 ioat_dca_get_tag(struct dca_provider *dca, |
196 | struct device *dev, | 122 | struct device *dev, |
197 | int cpu) | 123 | int cpu) |
@@ -231,83 +157,6 @@ static int ioat_dca_dev_managed(struct dca_provider *dca, | |||
231 | return 0; | 157 | return 0; |
232 | } | 158 | } |
233 | 159 | ||
234 | static struct dca_ops ioat_dca_ops = { | ||
235 | .add_requester = ioat_dca_add_requester, | ||
236 | .remove_requester = ioat_dca_remove_requester, | ||
237 | .get_tag = ioat_dca_get_tag, | ||
238 | .dev_managed = ioat_dca_dev_managed, | ||
239 | }; | ||
240 | |||
241 | |||
242 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) | ||
243 | { | ||
244 | struct dca_provider *dca; | ||
245 | struct ioat_dca_priv *ioatdca; | ||
246 | u8 *tag_map = NULL; | ||
247 | int i; | ||
248 | int err; | ||
249 | u8 version; | ||
250 | u8 max_requesters; | ||
251 | |||
252 | if (!system_has_dca_enabled(pdev)) | ||
253 | return NULL; | ||
254 | |||
255 | /* I/OAT v1 systems must have a known tag_map to support DCA */ | ||
256 | switch (pdev->vendor) { | ||
257 | case PCI_VENDOR_ID_INTEL: | ||
258 | switch (pdev->device) { | ||
259 | case PCI_DEVICE_ID_INTEL_IOAT: | ||
260 | tag_map = ioat_tag_map_BNB; | ||
261 | break; | ||
262 | case PCI_DEVICE_ID_INTEL_IOAT_CNB: | ||
263 | tag_map = ioat_tag_map_CNB; | ||
264 | break; | ||
265 | case PCI_DEVICE_ID_INTEL_IOAT_SCNB: | ||
266 | tag_map = ioat_tag_map_SCNB; | ||
267 | break; | ||
268 | } | ||
269 | break; | ||
270 | case PCI_VENDOR_ID_UNISYS: | ||
271 | switch (pdev->device) { | ||
272 | case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR: | ||
273 | tag_map = ioat_tag_map_UNISYS; | ||
274 | break; | ||
275 | } | ||
276 | break; | ||
277 | } | ||
278 | if (tag_map == NULL) | ||
279 | return NULL; | ||
280 | |||
281 | version = readb(iobase + IOAT_VER_OFFSET); | ||
282 | if (version == IOAT_VER_3_0) | ||
283 | max_requesters = IOAT3_DCA_MAX_REQ; | ||
284 | else | ||
285 | max_requesters = IOAT_DCA_MAX_REQ; | ||
286 | |||
287 | dca = alloc_dca_provider(&ioat_dca_ops, | ||
288 | sizeof(*ioatdca) + | ||
289 | (sizeof(struct ioat_dca_slot) * max_requesters)); | ||
290 | if (!dca) | ||
291 | return NULL; | ||
292 | |||
293 | ioatdca = dca_priv(dca); | ||
294 | ioatdca->max_requesters = max_requesters; | ||
295 | ioatdca->dca_base = iobase + 0x54; | ||
296 | |||
297 | /* copy over the APIC ID to DCA tag mapping */ | ||
298 | for (i = 0; i < IOAT_TAG_MAP_LEN; i++) | ||
299 | ioatdca->tag_map[i] = tag_map[i]; | ||
300 | |||
301 | err = register_dca_provider(dca, &pdev->dev); | ||
302 | if (err) { | ||
303 | free_dca_provider(dca); | ||
304 | return NULL; | ||
305 | } | ||
306 | |||
307 | return dca; | ||
308 | } | ||
309 | |||
310 | |||
311 | static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) | 160 | static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) |
312 | { | 161 | { |
313 | struct ioat_dca_priv *ioatdca = dca_priv(dca); | 162 | struct ioat_dca_priv *ioatdca = dca_priv(dca); |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index ee0aa9f4ccfa..f40768dfc3e6 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Intel I/OAT DMA Linux driver | 2 | * Intel I/OAT DMA Linux driver |
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | 3 | * Copyright(c) 2004 - 2015 Intel Corporation. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms and conditions of the GNU General Public License, | 6 | * under the terms and conditions of the GNU General Public License, |
@@ -43,10 +43,6 @@ module_param(ioat_pending_level, int, 0644); | |||
43 | MODULE_PARM_DESC(ioat_pending_level, | 43 | MODULE_PARM_DESC(ioat_pending_level, |
44 | "high-water mark for pushing ioat descriptors (default: 4)"); | 44 | "high-water mark for pushing ioat descriptors (default: 4)"); |
45 | 45 | ||
46 | /* internal functions */ | ||
47 | static void ioat1_cleanup(struct ioat_dma_chan *ioat); | ||
48 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); | ||
49 | |||
50 | /** | 46 | /** |
51 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | 47 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode |
52 | * @irq: interrupt id | 48 | * @irq: interrupt id |
@@ -116,248 +112,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c | |||
116 | tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); | 112 | tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); |
117 | } | 113 | } |
118 | 114 | ||
119 | /** | ||
120 | * ioat1_dma_enumerate_channels - find and initialize the device's channels | ||
121 | * @device: the device to be enumerated | ||
122 | */ | ||
123 | static int ioat1_enumerate_channels(struct ioatdma_device *device) | ||
124 | { | ||
125 | u8 xfercap_scale; | ||
126 | u32 xfercap; | ||
127 | int i; | ||
128 | struct ioat_dma_chan *ioat; | ||
129 | struct device *dev = &device->pdev->dev; | ||
130 | struct dma_device *dma = &device->common; | ||
131 | |||
132 | INIT_LIST_HEAD(&dma->channels); | ||
133 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | ||
134 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ | ||
135 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | ||
136 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | ||
137 | dma->chancnt, ARRAY_SIZE(device->idx)); | ||
138 | dma->chancnt = ARRAY_SIZE(device->idx); | ||
139 | } | ||
140 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | ||
141 | xfercap_scale &= 0x1f; /* bits [4:0] valid */ | ||
142 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | ||
143 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); | ||
144 | |||
145 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | ||
146 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | ||
147 | dma->chancnt--; | ||
148 | #endif | ||
149 | for (i = 0; i < dma->chancnt; i++) { | ||
150 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | ||
151 | if (!ioat) | ||
152 | break; | ||
153 | |||
154 | ioat_init_channel(device, &ioat->base, i); | ||
155 | ioat->xfercap = xfercap; | ||
156 | spin_lock_init(&ioat->desc_lock); | ||
157 | INIT_LIST_HEAD(&ioat->free_desc); | ||
158 | INIT_LIST_HEAD(&ioat->used_desc); | ||
159 | } | ||
160 | dma->chancnt = i; | ||
161 | return i; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | ||
166 | * descriptors to hw | ||
167 | * @chan: DMA channel handle | ||
168 | */ | ||
169 | static inline void | ||
170 | __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) | ||
171 | { | ||
172 | void __iomem *reg_base = ioat->base.reg_base; | ||
173 | |||
174 | dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", | ||
175 | __func__, ioat->pending); | ||
176 | ioat->pending = 0; | ||
177 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); | ||
178 | } | ||
179 | |||
180 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
181 | { | ||
182 | struct ioat_dma_chan *ioat = to_ioat_chan(chan); | ||
183 | |||
184 | if (ioat->pending > 0) { | ||
185 | spin_lock_bh(&ioat->desc_lock); | ||
186 | __ioat1_dma_memcpy_issue_pending(ioat); | ||
187 | spin_unlock_bh(&ioat->desc_lock); | ||
188 | } | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * ioat1_reset_channel - restart a channel | ||
193 | * @ioat: IOAT DMA channel handle | ||
194 | */ | ||
195 | static void ioat1_reset_channel(struct ioat_dma_chan *ioat) | ||
196 | { | ||
197 | struct ioat_chan_common *chan = &ioat->base; | ||
198 | void __iomem *reg_base = chan->reg_base; | ||
199 | u32 chansts, chanerr; | ||
200 | |||
201 | dev_warn(to_dev(chan), "reset\n"); | ||
202 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); | ||
203 | chansts = *chan->completion & IOAT_CHANSTS_STATUS; | ||
204 | if (chanerr) { | ||
205 | dev_err(to_dev(chan), | ||
206 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | ||
207 | chan_num(chan), chansts, chanerr); | ||
208 | writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * whack it upside the head with a reset | ||
213 | * and wait for things to settle out. | ||
214 | * force the pending count to a really big negative | ||
215 | * to make sure no one forces an issue_pending | ||
216 | * while we're waiting. | ||
217 | */ | ||
218 | |||
219 | ioat->pending = INT_MIN; | ||
220 | writeb(IOAT_CHANCMD_RESET, | ||
221 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
222 | set_bit(IOAT_RESET_PENDING, &chan->state); | ||
223 | mod_timer(&chan->timer, jiffies + RESET_DELAY); | ||
224 | } | ||
225 | |||
226 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | ||
227 | { | ||
228 | struct dma_chan *c = tx->chan; | ||
229 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
230 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); | ||
231 | struct ioat_chan_common *chan = &ioat->base; | ||
232 | struct ioat_desc_sw *first; | ||
233 | struct ioat_desc_sw *chain_tail; | ||
234 | dma_cookie_t cookie; | ||
235 | |||
236 | spin_lock_bh(&ioat->desc_lock); | ||
237 | /* cookie incr and addition to used_list must be atomic */ | ||
238 | cookie = dma_cookie_assign(tx); | ||
239 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | ||
240 | |||
241 | /* write address into NextDescriptor field of last desc in chain */ | ||
242 | first = to_ioat_desc(desc->tx_list.next); | ||
243 | chain_tail = to_ioat_desc(ioat->used_desc.prev); | ||
244 | /* make descriptor updates globally visible before chaining */ | ||
245 | wmb(); | ||
246 | chain_tail->hw->next = first->txd.phys; | ||
247 | list_splice_tail_init(&desc->tx_list, &ioat->used_desc); | ||
248 | dump_desc_dbg(ioat, chain_tail); | ||
249 | dump_desc_dbg(ioat, first); | ||
250 | |||
251 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | ||
252 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
253 | |||
254 | ioat->active += desc->hw->tx_cnt; | ||
255 | ioat->pending += desc->hw->tx_cnt; | ||
256 | if (ioat->pending >= ioat_pending_level) | ||
257 | __ioat1_dma_memcpy_issue_pending(ioat); | ||
258 | spin_unlock_bh(&ioat->desc_lock); | ||
259 | |||
260 | return cookie; | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | ||
265 | * @ioat: the channel supplying the memory pool for the descriptors | ||
266 | * @flags: allocation flags | ||
267 | */ | ||
268 | static struct ioat_desc_sw * | ||
269 | ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) | ||
270 | { | ||
271 | struct ioat_dma_descriptor *desc; | ||
272 | struct ioat_desc_sw *desc_sw; | ||
273 | struct ioatdma_device *ioatdma_device; | ||
274 | dma_addr_t phys; | ||
275 | |||
276 | ioatdma_device = ioat->base.device; | ||
277 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); | ||
278 | if (unlikely(!desc)) | ||
279 | return NULL; | ||
280 | |||
281 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | ||
282 | if (unlikely(!desc_sw)) { | ||
283 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); | ||
284 | return NULL; | ||
285 | } | ||
286 | |||
287 | memset(desc, 0, sizeof(*desc)); | ||
288 | |||
289 | INIT_LIST_HEAD(&desc_sw->tx_list); | ||
290 | dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); | ||
291 | desc_sw->txd.tx_submit = ioat1_tx_submit; | ||
292 | desc_sw->hw = desc; | ||
293 | desc_sw->txd.phys = phys; | ||
294 | set_desc_id(desc_sw, -1); | ||
295 | |||
296 | return desc_sw; | ||
297 | } | ||
298 | |||
299 | static int ioat_initial_desc_count = 256; | ||
300 | module_param(ioat_initial_desc_count, int, 0644); | ||
301 | MODULE_PARM_DESC(ioat_initial_desc_count, | ||
302 | "ioat1: initial descriptors per channel (default: 256)"); | ||
303 | /** | ||
304 | * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors | ||
305 | * @chan: the channel to be filled out | ||
306 | */ | ||
307 | static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) | ||
308 | { | ||
309 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
310 | struct ioat_chan_common *chan = &ioat->base; | ||
311 | struct ioat_desc_sw *desc; | ||
312 | u32 chanerr; | ||
313 | int i; | ||
314 | LIST_HEAD(tmp_list); | ||
315 | |||
316 | /* have we already been set up? */ | ||
317 | if (!list_empty(&ioat->free_desc)) | ||
318 | return ioat->desccount; | ||
319 | |||
320 | /* Setup register to interrupt and write completion status on error */ | ||
321 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
322 | |||
323 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
324 | if (chanerr) { | ||
325 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | ||
326 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
327 | } | ||
328 | |||
329 | /* Allocate descriptors */ | ||
330 | for (i = 0; i < ioat_initial_desc_count; i++) { | ||
331 | desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); | ||
332 | if (!desc) { | ||
333 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); | ||
334 | break; | ||
335 | } | ||
336 | set_desc_id(desc, i); | ||
337 | list_add_tail(&desc->node, &tmp_list); | ||
338 | } | ||
339 | spin_lock_bh(&ioat->desc_lock); | ||
340 | ioat->desccount = i; | ||
341 | list_splice(&tmp_list, &ioat->free_desc); | ||
342 | spin_unlock_bh(&ioat->desc_lock); | ||
343 | |||
344 | /* allocate a completion writeback area */ | ||
345 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
346 | chan->completion = pci_pool_alloc(chan->device->completion_pool, | ||
347 | GFP_KERNEL, &chan->completion_dma); | ||
348 | memset(chan->completion, 0, sizeof(*chan->completion)); | ||
349 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | ||
350 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | ||
351 | writel(((u64) chan->completion_dma) >> 32, | ||
352 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | ||
353 | |||
354 | set_bit(IOAT_RUN, &chan->state); | ||
355 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ | ||
356 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | ||
357 | __func__, ioat->desccount); | ||
358 | return ioat->desccount; | ||
359 | } | ||
360 | |||
361 | void ioat_stop(struct ioat_chan_common *chan) | 115 | void ioat_stop(struct ioat_chan_common *chan) |
362 | { | 116 | { |
363 | struct ioatdma_device *device = chan->device; | 117 | struct ioatdma_device *device = chan->device; |
@@ -394,177 +148,6 @@ void ioat_stop(struct ioat_chan_common *chan) | |||
394 | device->cleanup_fn((unsigned long) &chan->common); | 148 | device->cleanup_fn((unsigned long) &chan->common); |
395 | } | 149 | } |
396 | 150 | ||
397 | /** | ||
398 | * ioat1_dma_free_chan_resources - release all the descriptors | ||
399 | * @chan: the channel to be cleaned | ||
400 | */ | ||
401 | static void ioat1_dma_free_chan_resources(struct dma_chan *c) | ||
402 | { | ||
403 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
404 | struct ioat_chan_common *chan = &ioat->base; | ||
405 | struct ioatdma_device *ioatdma_device = chan->device; | ||
406 | struct ioat_desc_sw *desc, *_desc; | ||
407 | int in_use_descs = 0; | ||
408 | |||
409 | /* Before freeing channel resources first check | ||
410 | * if they have been previously allocated for this channel. | ||
411 | */ | ||
412 | if (ioat->desccount == 0) | ||
413 | return; | ||
414 | |||
415 | ioat_stop(chan); | ||
416 | |||
417 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
418 | * before removing DMA descriptor resources. | ||
419 | */ | ||
420 | writeb(IOAT_CHANCMD_RESET, | ||
421 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
422 | mdelay(100); | ||
423 | |||
424 | spin_lock_bh(&ioat->desc_lock); | ||
425 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { | ||
426 | dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", | ||
427 | __func__, desc_id(desc)); | ||
428 | dump_desc_dbg(ioat, desc); | ||
429 | in_use_descs++; | ||
430 | list_del(&desc->node); | ||
431 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
432 | desc->txd.phys); | ||
433 | kfree(desc); | ||
434 | } | ||
435 | list_for_each_entry_safe(desc, _desc, | ||
436 | &ioat->free_desc, node) { | ||
437 | list_del(&desc->node); | ||
438 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | ||
439 | desc->txd.phys); | ||
440 | kfree(desc); | ||
441 | } | ||
442 | spin_unlock_bh(&ioat->desc_lock); | ||
443 | |||
444 | pci_pool_free(ioatdma_device->completion_pool, | ||
445 | chan->completion, | ||
446 | chan->completion_dma); | ||
447 | |||
448 | /* one is ok since we left it on there on purpose */ | ||
449 | if (in_use_descs > 1) | ||
450 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | ||
451 | in_use_descs - 1); | ||
452 | |||
453 | chan->last_completion = 0; | ||
454 | chan->completion_dma = 0; | ||
455 | ioat->pending = 0; | ||
456 | ioat->desccount = 0; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * ioat1_dma_get_next_descriptor - return the next available descriptor | ||
461 | * @ioat: IOAT DMA channel handle | ||
462 | * | ||
463 | * Gets the next descriptor from the chain, and must be called with the | ||
464 | * channel's desc_lock held. Allocates more descriptors if the channel | ||
465 | * has run out. | ||
466 | */ | ||
467 | static struct ioat_desc_sw * | ||
468 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) | ||
469 | { | ||
470 | struct ioat_desc_sw *new; | ||
471 | |||
472 | if (!list_empty(&ioat->free_desc)) { | ||
473 | new = to_ioat_desc(ioat->free_desc.next); | ||
474 | list_del(&new->node); | ||
475 | } else { | ||
476 | /* try to get another desc */ | ||
477 | new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); | ||
478 | if (!new) { | ||
479 | dev_err(to_dev(&ioat->base), "alloc failed\n"); | ||
480 | return NULL; | ||
481 | } | ||
482 | } | ||
483 | dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", | ||
484 | __func__, desc_id(new)); | ||
485 | prefetch(new->hw); | ||
486 | return new; | ||
487 | } | ||
488 | |||
489 | static struct dma_async_tx_descriptor * | ||
490 | ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, | ||
491 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
492 | { | ||
493 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
494 | struct ioat_desc_sw *desc; | ||
495 | size_t copy; | ||
496 | LIST_HEAD(chain); | ||
497 | dma_addr_t src = dma_src; | ||
498 | dma_addr_t dest = dma_dest; | ||
499 | size_t total_len = len; | ||
500 | struct ioat_dma_descriptor *hw = NULL; | ||
501 | int tx_cnt = 0; | ||
502 | |||
503 | spin_lock_bh(&ioat->desc_lock); | ||
504 | desc = ioat1_dma_get_next_descriptor(ioat); | ||
505 | do { | ||
506 | if (!desc) | ||
507 | break; | ||
508 | |||
509 | tx_cnt++; | ||
510 | copy = min_t(size_t, len, ioat->xfercap); | ||
511 | |||
512 | hw = desc->hw; | ||
513 | hw->size = copy; | ||
514 | hw->ctl = 0; | ||
515 | hw->src_addr = src; | ||
516 | hw->dst_addr = dest; | ||
517 | |||
518 | list_add_tail(&desc->node, &chain); | ||
519 | |||
520 | len -= copy; | ||
521 | dest += copy; | ||
522 | src += copy; | ||
523 | if (len) { | ||
524 | struct ioat_desc_sw *next; | ||
525 | |||
526 | async_tx_ack(&desc->txd); | ||
527 | next = ioat1_dma_get_next_descriptor(ioat); | ||
528 | hw->next = next ? next->txd.phys : 0; | ||
529 | dump_desc_dbg(ioat, desc); | ||
530 | desc = next; | ||
531 | } else | ||
532 | hw->next = 0; | ||
533 | } while (len); | ||
534 | |||
535 | if (!desc) { | ||
536 | struct ioat_chan_common *chan = &ioat->base; | ||
537 | |||
538 | dev_err(to_dev(chan), | ||
539 | "chan%d - get_next_desc failed\n", chan_num(chan)); | ||
540 | list_splice(&chain, &ioat->free_desc); | ||
541 | spin_unlock_bh(&ioat->desc_lock); | ||
542 | return NULL; | ||
543 | } | ||
544 | spin_unlock_bh(&ioat->desc_lock); | ||
545 | |||
546 | desc->txd.flags = flags; | ||
547 | desc->len = total_len; | ||
548 | list_splice(&chain, &desc->tx_list); | ||
549 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
550 | hw->ctl_f.compl_write = 1; | ||
551 | hw->tx_cnt = tx_cnt; | ||
552 | dump_desc_dbg(ioat, desc); | ||
553 | |||
554 | return &desc->txd; | ||
555 | } | ||
556 | |||
557 | static void ioat1_cleanup_event(unsigned long data) | ||
558 | { | ||
559 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); | ||
560 | struct ioat_chan_common *chan = &ioat->base; | ||
561 | |||
562 | ioat1_cleanup(ioat); | ||
563 | if (!test_bit(IOAT_RUN, &chan->state)) | ||
564 | return; | ||
565 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
566 | } | ||
567 | |||
568 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) | 151 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) |
569 | { | 152 | { |
570 | dma_addr_t phys_complete; | 153 | dma_addr_t phys_complete; |
@@ -599,150 +182,6 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | |||
599 | return true; | 182 | return true; |
600 | } | 183 | } |
601 | 184 | ||
602 | static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) | ||
603 | { | ||
604 | struct ioat_chan_common *chan = &ioat->base; | ||
605 | struct list_head *_desc, *n; | ||
606 | struct dma_async_tx_descriptor *tx; | ||
607 | |||
608 | dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", | ||
609 | __func__, (unsigned long long) phys_complete); | ||
610 | list_for_each_safe(_desc, n, &ioat->used_desc) { | ||
611 | struct ioat_desc_sw *desc; | ||
612 | |||
613 | prefetch(n); | ||
614 | desc = list_entry(_desc, typeof(*desc), node); | ||
615 | tx = &desc->txd; | ||
616 | /* | ||
617 | * Incoming DMA requests may use multiple descriptors, | ||
618 | * due to exceeding xfercap, perhaps. If so, only the | ||
619 | * last one will have a cookie, and require unmapping. | ||
620 | */ | ||
621 | dump_desc_dbg(ioat, desc); | ||
622 | if (tx->cookie) { | ||
623 | dma_cookie_complete(tx); | ||
624 | dma_descriptor_unmap(tx); | ||
625 | ioat->active -= desc->hw->tx_cnt; | ||
626 | if (tx->callback) { | ||
627 | tx->callback(tx->callback_param); | ||
628 | tx->callback = NULL; | ||
629 | } | ||
630 | } | ||
631 | |||
632 | if (tx->phys != phys_complete) { | ||
633 | /* | ||
634 | * a completed entry, but not the last, so clean | ||
635 | * up if the client is done with the descriptor | ||
636 | */ | ||
637 | if (async_tx_test_ack(tx)) | ||
638 | list_move_tail(&desc->node, &ioat->free_desc); | ||
639 | } else { | ||
640 | /* | ||
641 | * last used desc. Do not remove, so we can | ||
642 | * append from it. | ||
643 | */ | ||
644 | |||
645 | /* if nothing else is pending, cancel the | ||
646 | * completion timeout | ||
647 | */ | ||
648 | if (n == &ioat->used_desc) { | ||
649 | dev_dbg(to_dev(chan), | ||
650 | "%s cancel completion timeout\n", | ||
651 | __func__); | ||
652 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
653 | } | ||
654 | |||
655 | /* TODO check status bits? */ | ||
656 | break; | ||
657 | } | ||
658 | } | ||
659 | |||
660 | chan->last_completion = phys_complete; | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * ioat1_cleanup - cleanup up finished descriptors | ||
665 | * @chan: ioat channel to be cleaned up | ||
666 | * | ||
667 | * To prevent lock contention we defer cleanup when the locks are | ||
668 | * contended with a terminal timeout that forces cleanup and catches | ||
669 | * completion notification errors. | ||
670 | */ | ||
671 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | ||
672 | { | ||
673 | struct ioat_chan_common *chan = &ioat->base; | ||
674 | dma_addr_t phys_complete; | ||
675 | |||
676 | prefetch(chan->completion); | ||
677 | |||
678 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
679 | return; | ||
680 | |||
681 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
682 | spin_unlock_bh(&chan->cleanup_lock); | ||
683 | return; | ||
684 | } | ||
685 | |||
686 | if (!spin_trylock_bh(&ioat->desc_lock)) { | ||
687 | spin_unlock_bh(&chan->cleanup_lock); | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | __cleanup(ioat, phys_complete); | ||
692 | |||
693 | spin_unlock_bh(&ioat->desc_lock); | ||
694 | spin_unlock_bh(&chan->cleanup_lock); | ||
695 | } | ||
696 | |||
697 | static void ioat1_timer_event(unsigned long data) | ||
698 | { | ||
699 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); | ||
700 | struct ioat_chan_common *chan = &ioat->base; | ||
701 | |||
702 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); | ||
703 | |||
704 | spin_lock_bh(&chan->cleanup_lock); | ||
705 | if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { | ||
706 | struct ioat_desc_sw *desc; | ||
707 | |||
708 | spin_lock_bh(&ioat->desc_lock); | ||
709 | |||
710 | /* restart active descriptors */ | ||
711 | desc = to_ioat_desc(ioat->used_desc.prev); | ||
712 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
713 | ioat_start(chan); | ||
714 | |||
715 | ioat->pending = 0; | ||
716 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | ||
717 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
718 | spin_unlock_bh(&ioat->desc_lock); | ||
719 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | ||
720 | dma_addr_t phys_complete; | ||
721 | |||
722 | spin_lock_bh(&ioat->desc_lock); | ||
723 | /* if we haven't made progress and we have already | ||
724 | * acknowledged a pending completion once, then be more | ||
725 | * forceful with a restart | ||
726 | */ | ||
727 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
728 | __cleanup(ioat, phys_complete); | ||
729 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | ||
730 | ioat1_reset_channel(ioat); | ||
731 | else { | ||
732 | u64 status = ioat_chansts(chan); | ||
733 | |||
734 | /* manually update the last completion address */ | ||
735 | if (ioat_chansts_to_addr(status) != 0) | ||
736 | *chan->completion = status; | ||
737 | |||
738 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
739 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
740 | } | ||
741 | spin_unlock_bh(&ioat->desc_lock); | ||
742 | } | ||
743 | spin_unlock_bh(&chan->cleanup_lock); | ||
744 | } | ||
745 | |||
746 | enum dma_status | 185 | enum dma_status |
747 | ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 186 | ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
748 | struct dma_tx_state *txstate) | 187 | struct dma_tx_state *txstate) |
@@ -760,42 +199,6 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
760 | return dma_cookie_status(c, cookie, txstate); | 199 | return dma_cookie_status(c, cookie, txstate); |
761 | } | 200 | } |
762 | 201 | ||
763 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) | ||
764 | { | ||
765 | struct ioat_chan_common *chan = &ioat->base; | ||
766 | struct ioat_desc_sw *desc; | ||
767 | struct ioat_dma_descriptor *hw; | ||
768 | |||
769 | spin_lock_bh(&ioat->desc_lock); | ||
770 | |||
771 | desc = ioat1_dma_get_next_descriptor(ioat); | ||
772 | |||
773 | if (!desc) { | ||
774 | dev_err(to_dev(chan), | ||
775 | "Unable to start null desc - get next desc failed\n"); | ||
776 | spin_unlock_bh(&ioat->desc_lock); | ||
777 | return; | ||
778 | } | ||
779 | |||
780 | hw = desc->hw; | ||
781 | hw->ctl = 0; | ||
782 | hw->ctl_f.null = 1; | ||
783 | hw->ctl_f.int_en = 1; | ||
784 | hw->ctl_f.compl_write = 1; | ||
785 | /* set size to non-zero value (channel returns error when size is 0) */ | ||
786 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
787 | hw->src_addr = 0; | ||
788 | hw->dst_addr = 0; | ||
789 | async_tx_ack(&desc->txd); | ||
790 | hw->next = 0; | ||
791 | list_add_tail(&desc->node, &ioat->used_desc); | ||
792 | dump_desc_dbg(ioat, desc); | ||
793 | |||
794 | ioat_set_chainaddr(ioat, desc->txd.phys); | ||
795 | ioat_start(chan); | ||
796 | spin_unlock_bh(&ioat->desc_lock); | ||
797 | } | ||
798 | |||
799 | /* | 202 | /* |
800 | * Perform a IOAT transaction to verify the HW works. | 203 | * Perform a IOAT transaction to verify the HW works. |
801 | */ | 204 | */ |
@@ -1077,36 +480,6 @@ int ioat_register(struct ioatdma_device *device) | |||
1077 | return err; | 480 | return err; |
1078 | } | 481 | } |
1079 | 482 | ||
1080 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | ||
1081 | static void ioat1_intr_quirk(struct ioatdma_device *device) | ||
1082 | { | ||
1083 | struct pci_dev *pdev = device->pdev; | ||
1084 | u32 dmactrl; | ||
1085 | |||
1086 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | ||
1087 | if (pdev->msi_enabled) | ||
1088 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | ||
1089 | else | ||
1090 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | ||
1091 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | ||
1092 | } | ||
1093 | |||
1094 | static ssize_t ring_size_show(struct dma_chan *c, char *page) | ||
1095 | { | ||
1096 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
1097 | |||
1098 | return sprintf(page, "%d\n", ioat->desccount); | ||
1099 | } | ||
1100 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | ||
1101 | |||
1102 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | ||
1103 | { | ||
1104 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | ||
1105 | |||
1106 | return sprintf(page, "%d\n", ioat->active); | ||
1107 | } | ||
1108 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | ||
1109 | |||
1110 | static ssize_t cap_show(struct dma_chan *c, char *page) | 483 | static ssize_t cap_show(struct dma_chan *c, char *page) |
1111 | { | 484 | { |
1112 | struct dma_device *dma = c->device; | 485 | struct dma_device *dma = c->device; |
@@ -1131,14 +504,6 @@ static ssize_t version_show(struct dma_chan *c, char *page) | |||
1131 | } | 504 | } |
1132 | struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); | 505 | struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); |
1133 | 506 | ||
1134 | static struct attribute *ioat1_attrs[] = { | ||
1135 | &ring_size_attr.attr, | ||
1136 | &ring_active_attr.attr, | ||
1137 | &ioat_cap_attr.attr, | ||
1138 | &ioat_version_attr.attr, | ||
1139 | NULL, | ||
1140 | }; | ||
1141 | |||
1142 | static ssize_t | 507 | static ssize_t |
1143 | ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 508 | ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
1144 | { | 509 | { |
@@ -1157,11 +522,6 @@ const struct sysfs_ops ioat_sysfs_ops = { | |||
1157 | .show = ioat_attr_show, | 522 | .show = ioat_attr_show, |
1158 | }; | 523 | }; |
1159 | 524 | ||
1160 | static struct kobj_type ioat1_ktype = { | ||
1161 | .sysfs_ops = &ioat_sysfs_ops, | ||
1162 | .default_attrs = ioat1_attrs, | ||
1163 | }; | ||
1164 | |||
1165 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) | 525 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) |
1166 | { | 526 | { |
1167 | struct dma_device *dma = &device->common; | 527 | struct dma_device *dma = &device->common; |
@@ -1197,38 +557,6 @@ void ioat_kobject_del(struct ioatdma_device *device) | |||
1197 | } | 557 | } |
1198 | } | 558 | } |
1199 | 559 | ||
1200 | int ioat1_dma_probe(struct ioatdma_device *device, int dca) | ||
1201 | { | ||
1202 | struct pci_dev *pdev = device->pdev; | ||
1203 | struct dma_device *dma; | ||
1204 | int err; | ||
1205 | |||
1206 | device->intr_quirk = ioat1_intr_quirk; | ||
1207 | device->enumerate_channels = ioat1_enumerate_channels; | ||
1208 | device->self_test = ioat_dma_self_test; | ||
1209 | device->timer_fn = ioat1_timer_event; | ||
1210 | device->cleanup_fn = ioat1_cleanup_event; | ||
1211 | dma = &device->common; | ||
1212 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | ||
1213 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | ||
1214 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; | ||
1215 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | ||
1216 | dma->device_tx_status = ioat_dma_tx_status; | ||
1217 | |||
1218 | err = ioat_probe(device); | ||
1219 | if (err) | ||
1220 | return err; | ||
1221 | err = ioat_register(device); | ||
1222 | if (err) | ||
1223 | return err; | ||
1224 | ioat_kobject_add(device, &ioat1_ktype); | ||
1225 | |||
1226 | if (dca) | ||
1227 | device->dca = ioat_dca_init(pdev, device->reg_base); | ||
1228 | |||
1229 | return err; | ||
1230 | } | ||
1231 | |||
1232 | void ioat_dma_remove(struct ioatdma_device *device) | 560 | void ioat_dma_remove(struct ioatdma_device *device) |
1233 | { | 561 | { |
1234 | struct dma_device *dma = &device->common; | 562 | struct dma_device *dma = &device->common; |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 30f5c7eede16..1e96eaa29068 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -28,12 +28,9 @@ | |||
28 | 28 | ||
29 | #define IOAT_DMA_VERSION "4.00" | 29 | #define IOAT_DMA_VERSION "4.00" |
30 | 30 | ||
31 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 | ||
32 | #define IOAT_DMA_DCA_ANY_CPU ~0 | 31 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
33 | 32 | ||
34 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | 33 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) |
35 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | ||
36 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) | ||
37 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) | 34 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) |
38 | #define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) | 35 | #define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) |
39 | 36 | ||
@@ -123,23 +120,6 @@ struct ioat_sysfs_entry { | |||
123 | }; | 120 | }; |
124 | 121 | ||
125 | /** | 122 | /** |
126 | * struct ioat_dma_chan - internal representation of a DMA channel | ||
127 | */ | ||
128 | struct ioat_dma_chan { | ||
129 | struct ioat_chan_common base; | ||
130 | |||
131 | size_t xfercap; /* XFERCAP register value expanded out */ | ||
132 | |||
133 | spinlock_t desc_lock; | ||
134 | struct list_head free_desc; | ||
135 | struct list_head used_desc; | ||
136 | |||
137 | int pending; | ||
138 | u16 desccount; | ||
139 | u16 active; | ||
140 | }; | ||
141 | |||
142 | /** | ||
143 | * struct ioat_sed_ent - wrapper around super extended hardware descriptor | 123 | * struct ioat_sed_ent - wrapper around super extended hardware descriptor |
144 | * @hw: hardware SED | 124 | * @hw: hardware SED |
145 | * @sed_dma: dma address for the SED | 125 | * @sed_dma: dma address for the SED |
@@ -158,34 +138,8 @@ static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) | |||
158 | return container_of(c, struct ioat_chan_common, common); | 138 | return container_of(c, struct ioat_chan_common, common); |
159 | } | 139 | } |
160 | 140 | ||
161 | static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) | ||
162 | { | ||
163 | struct ioat_chan_common *chan = to_chan_common(c); | ||
164 | |||
165 | return container_of(chan, struct ioat_dma_chan, base); | ||
166 | } | ||
167 | |||
168 | /* wrapper around hardware descriptor format + additional software fields */ | 141 | /* wrapper around hardware descriptor format + additional software fields */ |
169 | 142 | ||
170 | /** | ||
171 | * struct ioat_desc_sw - wrapper around hardware descriptor | ||
172 | * @hw: hardware DMA descriptor (for memcpy) | ||
173 | * @node: this descriptor will either be on the free list, | ||
174 | * or attached to a transaction list (tx_list) | ||
175 | * @txd: the generic software descriptor for all engines | ||
176 | * @id: identifier for debug | ||
177 | */ | ||
178 | struct ioat_desc_sw { | ||
179 | struct ioat_dma_descriptor *hw; | ||
180 | struct list_head node; | ||
181 | size_t len; | ||
182 | struct list_head tx_list; | ||
183 | struct dma_async_tx_descriptor txd; | ||
184 | #ifdef DEBUG | ||
185 | int id; | ||
186 | #endif | ||
187 | }; | ||
188 | |||
189 | #ifdef DEBUG | 143 | #ifdef DEBUG |
190 | #define set_desc_id(desc, i) ((desc)->id = (i)) | 144 | #define set_desc_id(desc, i) ((desc)->id = (i)) |
191 | #define desc_id(desc) ((desc)->id) | 145 | #define desc_id(desc) ((desc)->id) |
@@ -253,13 +207,6 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan) | |||
253 | #define ioat_chansts ioat_chansts_32 | 207 | #define ioat_chansts ioat_chansts_32 |
254 | #endif | 208 | #endif |
255 | 209 | ||
256 | static inline void ioat_start(struct ioat_chan_common *chan) | ||
257 | { | ||
258 | u8 ver = chan->device->version; | ||
259 | |||
260 | writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
261 | } | ||
262 | |||
263 | static inline u64 ioat_chansts_to_addr(u64 status) | 210 | static inline u64 ioat_chansts_to_addr(u64 status) |
264 | { | 211 | { |
265 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | 212 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; |
@@ -293,16 +240,6 @@ static inline bool ioat_reset_pending(struct ioat_chan_common *chan) | |||
293 | return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; | 240 | return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; |
294 | } | 241 | } |
295 | 242 | ||
296 | static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) | ||
297 | { | ||
298 | struct ioat_chan_common *chan = &ioat->base; | ||
299 | |||
300 | writel(addr & 0x00000000FFFFFFFF, | ||
301 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | ||
302 | writel(addr >> 32, | ||
303 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | ||
304 | } | ||
305 | |||
306 | static inline bool is_ioat_active(unsigned long status) | 243 | static inline bool is_ioat_active(unsigned long status) |
307 | { | 244 | { |
308 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); | 245 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); |
@@ -331,11 +268,9 @@ static inline bool is_ioat_bug(unsigned long err) | |||
331 | 268 | ||
332 | int ioat_probe(struct ioatdma_device *device); | 269 | int ioat_probe(struct ioatdma_device *device); |
333 | int ioat_register(struct ioatdma_device *device); | 270 | int ioat_register(struct ioatdma_device *device); |
334 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); | ||
335 | int ioat_dma_self_test(struct ioatdma_device *device); | 271 | int ioat_dma_self_test(struct ioatdma_device *device); |
336 | void ioat_dma_remove(struct ioatdma_device *device); | 272 | void ioat_dma_remove(struct ioatdma_device *device); |
337 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 273 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
338 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan); | ||
339 | void ioat_init_channel(struct ioatdma_device *device, | 274 | void ioat_init_channel(struct ioatdma_device *device, |
340 | struct ioat_chan_common *chan, int idx); | 275 | struct ioat_chan_common *chan, int idx); |
341 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 276 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 459873148bef..130db77120aa 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -210,9 +210,7 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
210 | pci_set_drvdata(pdev, device); | 210 | pci_set_drvdata(pdev, device); |
211 | 211 | ||
212 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | 212 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
213 | if (device->version == IOAT_VER_1_2) | 213 | if (device->version == IOAT_VER_2_0) |
214 | err = ioat1_dma_probe(device, ioat_dca_enabled); | ||
215 | else if (device->version == IOAT_VER_2_0) | ||
216 | err = ioat2_dma_probe(device, ioat_dca_enabled); | 214 | err = ioat2_dma_probe(device, ioat_dca_enabled); |
217 | else if (device->version >= IOAT_VER_3_0) | 215 | else if (device->version >= IOAT_VER_3_0) |
218 | err = ioat3_dma_probe(device, ioat_dca_enabled); | 216 | err = ioat3_dma_probe(device, ioat_dca_enabled); |