aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-08-26 16:01:44 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:29:55 -0400
commit5cbafa65b92ee4f5b8ba915cddf94b91f186b989 (patch)
treef074c9dbcdedf05c5567a4e456a15120895363a6 /drivers/dma/ioat/dma.h
parentdcbc853af6f0c056088e4df0794d9bf36184809e (diff)
ioat2,3: convert to a true ring buffer
Replace the current linked list munged into a ring with a native ring buffer implementation. The benefit of this approach is reduced overhead as many parameters can be derived from ring position with simple pointer comparisons and descriptor allocation/freeing becomes just a manipulation of head/tail pointers. It requires a contiguous allocation for the software descriptor information. Since this arrangement is significantly different from the ioat1 chain, move ioat2,3 support into its own file and header. Common routines are exported from driver/dma/ioat/dma.[ch]. Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma.h')
-rw-r--r--drivers/dma/ioat/dma.h50
1 files changed, 45 insertions, 5 deletions
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5b31db73ad8e..84065dfa4d40 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -62,6 +62,7 @@
62 * @idx: per channel data 62 * @idx: per channel data
63 * @dca: direct cache access context 63 * @dca: direct cache access context
64 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) 64 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
65 * @enumerate_channels: hw version specific channel enumeration
65 */ 66 */
66 67
67struct ioatdma_device { 68struct ioatdma_device {
@@ -76,6 +77,7 @@ struct ioatdma_device {
76 struct ioat_chan_common *idx[4]; 77 struct ioat_chan_common *idx[4];
77 struct dca_provider *dca; 78 struct dca_provider *dca;
78 void (*intr_quirk)(struct ioatdma_device *device); 79 void (*intr_quirk)(struct ioatdma_device *device);
80 int (*enumerate_channels)(struct ioatdma_device *device);
79}; 81};
80 82
81struct ioat_chan_common { 83struct ioat_chan_common {
@@ -106,6 +108,7 @@ struct ioat_chan_common {
106 struct tasklet_struct cleanup_task; 108 struct tasklet_struct cleanup_task;
107}; 109};
108 110
111
109/** 112/**
110 * struct ioat_dma_chan - internal representation of a DMA channel 113 * struct ioat_dma_chan - internal representation of a DMA channel
111 */ 114 */
@@ -119,7 +122,6 @@ struct ioat_dma_chan {
119 struct list_head used_desc; 122 struct list_head used_desc;
120 123
121 int pending; 124 int pending;
122 u16 dmacount;
123 u16 desccount; 125 u16 desccount;
124}; 126};
125 127
@@ -135,6 +137,33 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
135 return container_of(chan, struct ioat_dma_chan, base); 137 return container_of(chan, struct ioat_dma_chan, base);
136} 138}
137 139
140/**
141 * ioat_is_complete - poll the status of an ioat transaction
142 * @c: channel handle
143 * @cookie: transaction identifier
144 * @done: if set, updated with last completed transaction
145 * @used: if set, updated with last used transaction
146 */
147static inline enum dma_status
148ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
149 dma_cookie_t *done, dma_cookie_t *used)
150{
151 struct ioat_chan_common *chan = to_chan_common(c);
152 dma_cookie_t last_used;
153 dma_cookie_t last_complete;
154
155 last_used = c->cookie;
156 last_complete = chan->completed_cookie;
157 chan->watchdog_tcp_cookie = cookie;
158
159 if (done)
160 *done = last_complete;
161 if (used)
162 *used = last_used;
163
164 return dma_async_is_complete(cookie, last_complete, last_used);
165}
166
138/* wrapper around hardware descriptor format + additional software fields */ 167/* wrapper around hardware descriptor format + additional software fields */
139 168
140/** 169/**
@@ -162,11 +191,22 @@ static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
162 #endif 191 #endif
163} 192}
164 193
194static inline struct ioat_chan_common *
195ioat_chan_by_index(struct ioatdma_device *device, int index)
196{
197 return device->idx[index];
198}
199
200int ioat_probe(struct ioatdma_device *device);
201int ioat_register(struct ioatdma_device *device);
165int ioat1_dma_probe(struct ioatdma_device *dev, int dca); 202int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
166int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
167int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
168void ioat_dma_remove(struct ioatdma_device *device); 203void ioat_dma_remove(struct ioatdma_device *device);
169struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); 204struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
170struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 205unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
171struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); 206void ioat_init_channel(struct ioatdma_device *device,
207 struct ioat_chan_common *chan, int idx,
208 work_func_t work_fn, void (*tasklet)(unsigned long),
209 unsigned long tasklet_data);
210void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
211 size_t len, struct ioat_dma_descriptor *hw);
172#endif /* IOATDMA_H */ 212#endif /* IOATDMA_H */