aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:43:02 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:43:02 -0400
commite3232714d465c42ac631929b990f5e35e2d8a955 (patch)
treef6b9fe66bd11cdae420f558bebf7e8d4b89b52b4 /drivers/dma
parentb265b11fc1a0bd6ae5a7fde12e374583a52ab326 (diff)
ioat3: segregate raid engines
The cleanup routine for the raid cases imposes extra checks for handling raid descriptors and extended descriptors. If the channel does not support raid it can avoid this extra overhead by using the ioat2 cleanup path. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/ioat/dma_v2.c4
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c25
3 files changed, 22 insertions, 9 deletions
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 12c64e1a7e31..7bbbd83d12e6 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -206,7 +206,7 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
206 spin_unlock_bh(&chan->cleanup_lock); 206 spin_unlock_bh(&chan->cleanup_lock);
207} 207}
208 208
209static void ioat2_cleanup_tasklet(unsigned long data) 209void ioat2_cleanup_tasklet(unsigned long data)
210{ 210{
211 struct ioat2_dma_chan *ioat = (void *) data; 211 struct ioat2_dma_chan *ioat = (void *) data;
212 212
@@ -258,7 +258,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
258 __ioat2_restart_chan(ioat); 258 __ioat2_restart_chan(ioat);
259} 259}
260 260
261static void ioat2_timer_event(unsigned long data) 261void ioat2_timer_event(unsigned long data)
262{ 262{
263 struct ioat2_dma_chan *ioat = (void *) data; 263 struct ioat2_dma_chan *ioat = (void *) data;
264 struct ioat_chan_common *chan = &ioat->base; 264 struct ioat_chan_common *chan = &ioat->base;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index e23027d3dcbd..246e646b1904 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -183,5 +183,7 @@ enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
183void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); 183void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
184bool reshape_ring(struct ioat2_dma_chan *ioat, int order); 184bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); 185void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
186void ioat2_cleanup_tasklet(unsigned long data);
187void ioat2_timer_event(unsigned long data);
186extern struct kobj_type ioat2_ktype; 188extern struct kobj_type ioat2_ktype;
187#endif /* IOATDMA_V2_H */ 189#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index ff4afdc8e59b..3686dddf6bff 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1117,30 +1117,25 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1117 struct dma_device *dma; 1117 struct dma_device *dma;
1118 struct dma_chan *c; 1118 struct dma_chan *c;
1119 struct ioat_chan_common *chan; 1119 struct ioat_chan_common *chan;
1120 bool is_raid_device = false;
1120 int err; 1121 int err;
1121 u16 dev_id; 1122 u16 dev_id;
1122 u32 cap; 1123 u32 cap;
1123 1124
1124 device->enumerate_channels = ioat2_enumerate_channels; 1125 device->enumerate_channels = ioat2_enumerate_channels;
1125 device->cleanup_tasklet = ioat3_cleanup_tasklet;
1126 device->timer_fn = ioat3_timer_event;
1127 device->self_test = ioat3_dma_self_test; 1126 device->self_test = ioat3_dma_self_test;
1128 dma = &device->common; 1127 dma = &device->common;
1129 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 1128 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1130 dma->device_issue_pending = ioat2_issue_pending; 1129 dma->device_issue_pending = ioat2_issue_pending;
1131 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1130 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1132 dma->device_free_chan_resources = ioat2_free_chan_resources; 1131 dma->device_free_chan_resources = ioat2_free_chan_resources;
1133 dma->device_is_tx_complete = ioat3_is_complete;
1134 1132
1135 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1133 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1136 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1134 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1137 1135
1138 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1136 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1139 if (cap & IOAT_CAP_FILL_BLOCK) {
1140 dma_cap_set(DMA_MEMSET, dma->cap_mask);
1141 dma->device_prep_dma_memset = ioat3_prep_memset_lock;
1142 }
1143 if (cap & IOAT_CAP_XOR) { 1137 if (cap & IOAT_CAP_XOR) {
1138 is_raid_device = true;
1144 dma->max_xor = 8; 1139 dma->max_xor = 8;
1145 dma->xor_align = 2; 1140 dma->xor_align = 2;
1146 1141
@@ -1151,6 +1146,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1151 dma->device_prep_dma_xor_val = ioat3_prep_xor_val; 1146 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1152 } 1147 }
1153 if (cap & IOAT_CAP_PQ) { 1148 if (cap & IOAT_CAP_PQ) {
1149 is_raid_device = true;
1154 dma_set_maxpq(dma, 8, 0); 1150 dma_set_maxpq(dma, 8, 0);
1155 dma->pq_align = 2; 1151 dma->pq_align = 2;
1156 1152
@@ -1171,6 +1167,21 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1171 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; 1167 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1172 } 1168 }
1173 } 1169 }
1170 if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
1171 dma_cap_set(DMA_MEMSET, dma->cap_mask);
1172 dma->device_prep_dma_memset = ioat3_prep_memset_lock;
1173 }
1174
1175
1176 if (is_raid_device) {
1177 dma->device_is_tx_complete = ioat3_is_complete;
1178 device->cleanup_tasklet = ioat3_cleanup_tasklet;
1179 device->timer_fn = ioat3_timer_event;
1180 } else {
1181 dma->device_is_tx_complete = ioat2_is_complete;
1182 device->cleanup_tasklet = ioat2_cleanup_tasklet;
1183 device->timer_fn = ioat2_timer_event;
1184 }
1174 1185
1175 /* -= IOAT ver.3 workarounds =- */ 1186 /* -= IOAT ver.3 workarounds =- */
1176 /* Write CHANERRMSK_INT with 3E07h to mask out the errors 1187 /* Write CHANERRMSK_INT with 3E07h to mask out the errors