diff options
author | Dan Williams <dan.j.williams@intel.com> | 2010-05-17 19:30:58 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2010-05-17 19:30:58 -0400 |
commit | 0b28330e39bbe0ffee4c56b09fc415fcec595ea3 (patch) | |
tree | fcf504879883763557e696eff81427b1ab78f76b /drivers/dma | |
parent | 058276303dbc4ed089c1f7dad0871810b1f5ddf1 (diff) | |
parent | caa20d974c86af496b419eef70010e63b7fab7ac (diff) |
Merge branch 'ioat' into dmaengine
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/at_hdmac.c | 1 | ||||
-rw-r--r-- | drivers/dma/coh901318_lli.c | 1 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 17 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 1 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 185 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 33 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 124 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 8 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 1 | ||||
-rw-r--r-- | drivers/dma/iovlock.c | 1 | ||||
-rw-r--r-- | drivers/dma/mpc512x_dma.c | 1 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 1 | ||||
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 1 | ||||
-rw-r--r-- | drivers/dma/shdma.c | 1 |
17 files changed, 170 insertions, 209 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index ee805a43f879..bd5250e8c00c 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | ||
25 | 26 | ||
26 | #include "at_hdmac_regs.h" | 27 | #include "at_hdmac_regs.h" |
27 | 28 | ||
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 71d58c1a1e86..9f7e0e6a7eea 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/dmapool.h> | 12 | #include <linux/dmapool.h> |
13 | #include <linux/memory.h> | 13 | #include <linux/memory.h> |
14 | #include <linux/gfp.h> | ||
14 | #include <mach/coh901318.h> | 15 | #include <mach/coh901318.h> |
15 | 16 | ||
16 | #include "coh901318_lli.h" | 17 | #include "coh901318_lli.h" |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 21eb896f4dfd..9d31d5eb95c1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <linux/jiffies.h> | 58 | #include <linux/jiffies.h> |
59 | #include <linux/rculist.h> | 59 | #include <linux/rculist.h> |
60 | #include <linux/idr.h> | 60 | #include <linux/idr.h> |
61 | #include <linux/slab.h> | ||
61 | 62 | ||
62 | static DEFINE_MUTEX(dma_list_mutex); | 63 | static DEFINE_MUTEX(dma_list_mutex); |
63 | static LIST_HEAD(dma_device_list); | 64 | static LIST_HEAD(dma_device_list); |
@@ -975,7 +976,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
975 | struct dma_chan *chan) | 976 | struct dma_chan *chan) |
976 | { | 977 | { |
977 | tx->chan = chan; | 978 | tx->chan = chan; |
979 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
978 | spin_lock_init(&tx->lock); | 980 | spin_lock_init(&tx->lock); |
981 | #endif | ||
979 | } | 982 | } |
980 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 983 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
981 | 984 | ||
@@ -1008,7 +1011,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |||
1008 | */ | 1011 | */ |
1009 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | 1012 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) |
1010 | { | 1013 | { |
1011 | struct dma_async_tx_descriptor *dep = tx->next; | 1014 | struct dma_async_tx_descriptor *dep = txd_next(tx); |
1012 | struct dma_async_tx_descriptor *dep_next; | 1015 | struct dma_async_tx_descriptor *dep_next; |
1013 | struct dma_chan *chan; | 1016 | struct dma_chan *chan; |
1014 | 1017 | ||
@@ -1016,7 +1019,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |||
1016 | return; | 1019 | return; |
1017 | 1020 | ||
1018 | /* we'll submit tx->next now, so clear the link */ | 1021 | /* we'll submit tx->next now, so clear the link */ |
1019 | tx->next = NULL; | 1022 | txd_clear_next(tx); |
1020 | chan = dep->chan; | 1023 | chan = dep->chan; |
1021 | 1024 | ||
1022 | /* keep submitting up until a channel switch is detected | 1025 | /* keep submitting up until a channel switch is detected |
@@ -1024,14 +1027,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |||
1024 | * processing the interrupt from async_tx_channel_switch | 1027 | * processing the interrupt from async_tx_channel_switch |
1025 | */ | 1028 | */ |
1026 | for (; dep; dep = dep_next) { | 1029 | for (; dep; dep = dep_next) { |
1027 | spin_lock_bh(&dep->lock); | 1030 | txd_lock(dep); |
1028 | dep->parent = NULL; | 1031 | txd_clear_parent(dep); |
1029 | dep_next = dep->next; | 1032 | dep_next = txd_next(dep); |
1030 | if (dep_next && dep_next->chan == chan) | 1033 | if (dep_next && dep_next->chan == chan) |
1031 | dep->next = NULL; /* ->next will be submitted */ | 1034 | txd_clear_next(dep); /* ->next will be submitted */ |
1032 | else | 1035 | else |
1033 | dep_next = NULL; /* submit current dep and terminate */ | 1036 | dep_next = NULL; /* submit current dep and terminate */ |
1034 | spin_unlock_bh(&dep->lock); | 1037 | txd_unlock(dep); |
1035 | 1038 | ||
1036 | dep->tx_submit(dep); | 1039 | dep->tx_submit(dep); |
1037 | } | 1040 | } |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 6fa55fe3dd24..68d58c414cf0 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
16 | #include <linux/random.h> | 16 | #include <linux/random.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
18 | 19 | ||
19 | static unsigned int test_buf_size = 16384; | 20 | static unsigned int test_buf_size = 16384; |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 005329d496bd..1fdf180cbd67 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <linux/slab.h> | ||
30 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
31 | #include <linux/dmaengine.h> | 32 | #include <linux/dmaengine.h> |
32 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 59cebbfc89ec..c9213ead4a26 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/slab.h> | ||
30 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
31 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
32 | #include <linux/dmaengine.h> | 33 | #include <linux/dmaengine.h> |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 26f48ef94c5c..6d3a73b57e54 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -96,6 +96,7 @@ struct ioat_chan_common { | |||
96 | #define IOAT_COMPLETION_ACK 1 | 96 | #define IOAT_COMPLETION_ACK 1 |
97 | #define IOAT_RESET_PENDING 2 | 97 | #define IOAT_RESET_PENDING 2 |
98 | #define IOAT_KOBJ_INIT_FAIL 3 | 98 | #define IOAT_KOBJ_INIT_FAIL 3 |
99 | #define IOAT_RESHAPE_PENDING 4 | ||
99 | struct timer_list timer; | 100 | struct timer_list timer; |
100 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | 101 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) |
101 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) | 102 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index f540e0be7f31..3c8b32a83794 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/slab.h> | ||
30 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
31 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
32 | #include <linux/dmaengine.h> | 33 | #include <linux/dmaengine.h> |
@@ -55,8 +56,6 @@ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) | |||
55 | 56 | ||
56 | ioat->dmacount += ioat2_ring_pending(ioat); | 57 | ioat->dmacount += ioat2_ring_pending(ioat); |
57 | ioat->issued = ioat->head; | 58 | ioat->issued = ioat->head; |
58 | /* make descriptor updates globally visible before notifying channel */ | ||
59 | wmb(); | ||
60 | writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | 59 | writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); |
61 | dev_dbg(to_dev(chan), | 60 | dev_dbg(to_dev(chan), |
62 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | 61 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", |
@@ -68,9 +67,9 @@ void ioat2_issue_pending(struct dma_chan *c) | |||
68 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 67 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
69 | 68 | ||
70 | if (ioat2_ring_pending(ioat)) { | 69 | if (ioat2_ring_pending(ioat)) { |
71 | spin_lock_bh(&ioat->ring_lock); | 70 | spin_lock_bh(&ioat->prep_lock); |
72 | __ioat2_issue_pending(ioat); | 71 | __ioat2_issue_pending(ioat); |
73 | spin_unlock_bh(&ioat->ring_lock); | 72 | spin_unlock_bh(&ioat->prep_lock); |
74 | } | 73 | } |
75 | } | 74 | } |
76 | 75 | ||
@@ -79,7 +78,7 @@ void ioat2_issue_pending(struct dma_chan *c) | |||
79 | * @ioat: ioat2+ channel | 78 | * @ioat: ioat2+ channel |
80 | * | 79 | * |
81 | * Check if the number of unsubmitted descriptors has exceeded the | 80 | * Check if the number of unsubmitted descriptors has exceeded the |
82 | * watermark. Called with ring_lock held | 81 | * watermark. Called with prep_lock held |
83 | */ | 82 | */ |
84 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | 83 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) |
85 | { | 84 | { |
@@ -91,7 +90,6 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |||
91 | { | 90 | { |
92 | struct ioat_ring_ent *desc; | 91 | struct ioat_ring_ent *desc; |
93 | struct ioat_dma_descriptor *hw; | 92 | struct ioat_dma_descriptor *hw; |
94 | int idx; | ||
95 | 93 | ||
96 | if (ioat2_ring_space(ioat) < 1) { | 94 | if (ioat2_ring_space(ioat) < 1) { |
97 | dev_err(to_dev(&ioat->base), | 95 | dev_err(to_dev(&ioat->base), |
@@ -101,8 +99,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |||
101 | 99 | ||
102 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", | 100 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", |
103 | __func__, ioat->head, ioat->tail, ioat->issued); | 101 | __func__, ioat->head, ioat->tail, ioat->issued); |
104 | idx = ioat2_desc_alloc(ioat, 1); | 102 | desc = ioat2_get_ring_ent(ioat, ioat->head); |
105 | desc = ioat2_get_ring_ent(ioat, idx); | ||
106 | 103 | ||
107 | hw = desc->hw; | 104 | hw = desc->hw; |
108 | hw->ctl = 0; | 105 | hw->ctl = 0; |
@@ -116,14 +113,16 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |||
116 | async_tx_ack(&desc->txd); | 113 | async_tx_ack(&desc->txd); |
117 | ioat2_set_chainaddr(ioat, desc->txd.phys); | 114 | ioat2_set_chainaddr(ioat, desc->txd.phys); |
118 | dump_desc_dbg(ioat, desc); | 115 | dump_desc_dbg(ioat, desc); |
116 | wmb(); | ||
117 | ioat->head += 1; | ||
119 | __ioat2_issue_pending(ioat); | 118 | __ioat2_issue_pending(ioat); |
120 | } | 119 | } |
121 | 120 | ||
122 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | 121 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) |
123 | { | 122 | { |
124 | spin_lock_bh(&ioat->ring_lock); | 123 | spin_lock_bh(&ioat->prep_lock); |
125 | __ioat2_start_null_desc(ioat); | 124 | __ioat2_start_null_desc(ioat); |
126 | spin_unlock_bh(&ioat->ring_lock); | 125 | spin_unlock_bh(&ioat->prep_lock); |
127 | } | 126 | } |
128 | 127 | ||
129 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | 128 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
@@ -133,15 +132,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
133 | struct ioat_ring_ent *desc; | 132 | struct ioat_ring_ent *desc; |
134 | bool seen_current = false; | 133 | bool seen_current = false; |
135 | u16 active; | 134 | u16 active; |
136 | int i; | 135 | int idx = ioat->tail, i; |
137 | 136 | ||
138 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | 137 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
139 | __func__, ioat->head, ioat->tail, ioat->issued); | 138 | __func__, ioat->head, ioat->tail, ioat->issued); |
140 | 139 | ||
141 | active = ioat2_ring_active(ioat); | 140 | active = ioat2_ring_active(ioat); |
142 | for (i = 0; i < active && !seen_current; i++) { | 141 | for (i = 0; i < active && !seen_current; i++) { |
143 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | 142 | smp_read_barrier_depends(); |
144 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | 143 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); |
144 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
145 | tx = &desc->txd; | 145 | tx = &desc->txd; |
146 | dump_desc_dbg(ioat, desc); | 146 | dump_desc_dbg(ioat, desc); |
147 | if (tx->cookie) { | 147 | if (tx->cookie) { |
@@ -157,11 +157,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
157 | if (tx->phys == phys_complete) | 157 | if (tx->phys == phys_complete) |
158 | seen_current = true; | 158 | seen_current = true; |
159 | } | 159 | } |
160 | ioat->tail += i; | 160 | smp_mb(); /* finish all descriptor reads before incrementing tail */ |
161 | ioat->tail = idx + i; | ||
161 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ | 162 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ |
162 | 163 | ||
163 | chan->last_completion = phys_complete; | 164 | chan->last_completion = phys_complete; |
164 | if (ioat->head == ioat->tail) { | 165 | if (active - i == 0) { |
165 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | 166 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", |
166 | __func__); | 167 | __func__); |
167 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | 168 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); |
@@ -178,24 +179,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | |||
178 | struct ioat_chan_common *chan = &ioat->base; | 179 | struct ioat_chan_common *chan = &ioat->base; |
179 | unsigned long phys_complete; | 180 | unsigned long phys_complete; |
180 | 181 | ||
181 | prefetch(chan->completion); | 182 | spin_lock_bh(&chan->cleanup_lock); |
182 | 183 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
183 | if (!spin_trylock_bh(&chan->cleanup_lock)) | 184 | __cleanup(ioat, phys_complete); |
184 | return; | ||
185 | |||
186 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
187 | spin_unlock_bh(&chan->cleanup_lock); | ||
188 | return; | ||
189 | } | ||
190 | |||
191 | if (!spin_trylock_bh(&ioat->ring_lock)) { | ||
192 | spin_unlock_bh(&chan->cleanup_lock); | ||
193 | return; | ||
194 | } | ||
195 | |||
196 | __cleanup(ioat, phys_complete); | ||
197 | |||
198 | spin_unlock_bh(&ioat->ring_lock); | ||
199 | spin_unlock_bh(&chan->cleanup_lock); | 185 | spin_unlock_bh(&chan->cleanup_lock); |
200 | } | 186 | } |
201 | 187 | ||
@@ -286,12 +272,10 @@ void ioat2_timer_event(unsigned long data) | |||
286 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | 272 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
287 | struct ioat_chan_common *chan = &ioat->base; | 273 | struct ioat_chan_common *chan = &ioat->base; |
288 | 274 | ||
289 | spin_lock_bh(&chan->cleanup_lock); | ||
290 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 275 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
291 | unsigned long phys_complete; | 276 | unsigned long phys_complete; |
292 | u64 status; | 277 | u64 status; |
293 | 278 | ||
294 | spin_lock_bh(&ioat->ring_lock); | ||
295 | status = ioat_chansts(chan); | 279 | status = ioat_chansts(chan); |
296 | 280 | ||
297 | /* when halted due to errors check for channel | 281 | /* when halted due to errors check for channel |
@@ -310,26 +294,31 @@ void ioat2_timer_event(unsigned long data) | |||
310 | * acknowledged a pending completion once, then be more | 294 | * acknowledged a pending completion once, then be more |
311 | * forceful with a restart | 295 | * forceful with a restart |
312 | */ | 296 | */ |
313 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 297 | spin_lock_bh(&chan->cleanup_lock); |
298 | if (ioat_cleanup_preamble(chan, &phys_complete)) { | ||
314 | __cleanup(ioat, phys_complete); | 299 | __cleanup(ioat, phys_complete); |
315 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | 300 | } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { |
301 | spin_lock_bh(&ioat->prep_lock); | ||
316 | ioat2_restart_channel(ioat); | 302 | ioat2_restart_channel(ioat); |
317 | else { | 303 | spin_unlock_bh(&ioat->prep_lock); |
304 | } else { | ||
318 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | 305 | set_bit(IOAT_COMPLETION_ACK, &chan->state); |
319 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | 306 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
320 | } | 307 | } |
321 | spin_unlock_bh(&ioat->ring_lock); | 308 | spin_unlock_bh(&chan->cleanup_lock); |
322 | } else { | 309 | } else { |
323 | u16 active; | 310 | u16 active; |
324 | 311 | ||
325 | /* if the ring is idle, empty, and oversized try to step | 312 | /* if the ring is idle, empty, and oversized try to step |
326 | * down the size | 313 | * down the size |
327 | */ | 314 | */ |
328 | spin_lock_bh(&ioat->ring_lock); | 315 | spin_lock_bh(&chan->cleanup_lock); |
316 | spin_lock_bh(&ioat->prep_lock); | ||
329 | active = ioat2_ring_active(ioat); | 317 | active = ioat2_ring_active(ioat); |
330 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | 318 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) |
331 | reshape_ring(ioat, ioat->alloc_order-1); | 319 | reshape_ring(ioat, ioat->alloc_order-1); |
332 | spin_unlock_bh(&ioat->ring_lock); | 320 | spin_unlock_bh(&ioat->prep_lock); |
321 | spin_unlock_bh(&chan->cleanup_lock); | ||
333 | 322 | ||
334 | /* keep shrinking until we get back to our minimum | 323 | /* keep shrinking until we get back to our minimum |
335 | * default size | 324 | * default size |
@@ -337,7 +326,6 @@ void ioat2_timer_event(unsigned long data) | |||
337 | if (ioat->alloc_order > ioat_get_alloc_order()) | 326 | if (ioat->alloc_order > ioat_get_alloc_order()) |
338 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | 327 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
339 | } | 328 | } |
340 | spin_unlock_bh(&chan->cleanup_lock); | ||
341 | } | 329 | } |
342 | 330 | ||
343 | static int ioat2_reset_hw(struct ioat_chan_common *chan) | 331 | static int ioat2_reset_hw(struct ioat_chan_common *chan) |
@@ -391,7 +379,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) | |||
391 | 379 | ||
392 | ioat_init_channel(device, &ioat->base, i); | 380 | ioat_init_channel(device, &ioat->base, i); |
393 | ioat->xfercap_log = xfercap_log; | 381 | ioat->xfercap_log = xfercap_log; |
394 | spin_lock_init(&ioat->ring_lock); | 382 | spin_lock_init(&ioat->prep_lock); |
395 | if (device->reset_hw(&ioat->base)) { | 383 | if (device->reset_hw(&ioat->base)) { |
396 | i = 0; | 384 | i = 0; |
397 | break; | 385 | break; |
@@ -417,8 +405,17 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
417 | 405 | ||
418 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | 406 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
419 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | 407 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
408 | |||
409 | /* make descriptor updates visible before advancing ioat->head, | ||
410 | * this is purposefully not smp_wmb() since we are also | ||
411 | * publishing the descriptor updates to a dma device | ||
412 | */ | ||
413 | wmb(); | ||
414 | |||
415 | ioat->head += ioat->produce; | ||
416 | |||
420 | ioat2_update_pending(ioat); | 417 | ioat2_update_pending(ioat); |
421 | spin_unlock_bh(&ioat->ring_lock); | 418 | spin_unlock_bh(&ioat->prep_lock); |
422 | 419 | ||
423 | return cookie; | 420 | return cookie; |
424 | } | 421 | } |
@@ -530,13 +527,15 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
530 | if (!ring) | 527 | if (!ring) |
531 | return -ENOMEM; | 528 | return -ENOMEM; |
532 | 529 | ||
533 | spin_lock_bh(&ioat->ring_lock); | 530 | spin_lock_bh(&chan->cleanup_lock); |
531 | spin_lock_bh(&ioat->prep_lock); | ||
534 | ioat->ring = ring; | 532 | ioat->ring = ring; |
535 | ioat->head = 0; | 533 | ioat->head = 0; |
536 | ioat->issued = 0; | 534 | ioat->issued = 0; |
537 | ioat->tail = 0; | 535 | ioat->tail = 0; |
538 | ioat->alloc_order = order; | 536 | ioat->alloc_order = order; |
539 | spin_unlock_bh(&ioat->ring_lock); | 537 | spin_unlock_bh(&ioat->prep_lock); |
538 | spin_unlock_bh(&chan->cleanup_lock); | ||
540 | 539 | ||
541 | tasklet_enable(&chan->cleanup_task); | 540 | tasklet_enable(&chan->cleanup_task); |
542 | ioat2_start_null_desc(ioat); | 541 | ioat2_start_null_desc(ioat); |
@@ -552,7 +551,7 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order) | |||
552 | */ | 551 | */ |
553 | struct ioat_chan_common *chan = &ioat->base; | 552 | struct ioat_chan_common *chan = &ioat->base; |
554 | struct dma_chan *c = &chan->common; | 553 | struct dma_chan *c = &chan->common; |
555 | const u16 curr_size = ioat2_ring_mask(ioat) + 1; | 554 | const u16 curr_size = ioat2_ring_size(ioat); |
556 | const u16 active = ioat2_ring_active(ioat); | 555 | const u16 active = ioat2_ring_active(ioat); |
557 | const u16 new_size = 1 << order; | 556 | const u16 new_size = 1 << order; |
558 | struct ioat_ring_ent **ring; | 557 | struct ioat_ring_ent **ring; |
@@ -652,54 +651,61 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order) | |||
652 | } | 651 | } |
653 | 652 | ||
654 | /** | 653 | /** |
655 | * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops | 654 | * ioat2_check_space_lock - verify space and grab ring producer lock |
656 | * @idx: gets starting descriptor index on successful allocation | ||
657 | * @ioat: ioat2,3 channel (ring) to operate on | 655 | * @ioat: ioat2,3 channel (ring) to operate on |
658 | * @num_descs: allocation length | 656 | * @num_descs: allocation length |
659 | */ | 657 | */ |
660 | int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) | 658 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) |
661 | { | 659 | { |
662 | struct ioat_chan_common *chan = &ioat->base; | 660 | struct ioat_chan_common *chan = &ioat->base; |
661 | bool retry; | ||
663 | 662 | ||
664 | spin_lock_bh(&ioat->ring_lock); | 663 | retry: |
664 | spin_lock_bh(&ioat->prep_lock); | ||
665 | /* never allow the last descriptor to be consumed, we need at | 665 | /* never allow the last descriptor to be consumed, we need at |
666 | * least one free at all times to allow for on-the-fly ring | 666 | * least one free at all times to allow for on-the-fly ring |
667 | * resizing. | 667 | * resizing. |
668 | */ | 668 | */ |
669 | while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { | 669 | if (likely(ioat2_ring_space(ioat) > num_descs)) { |
670 | if (reshape_ring(ioat, ioat->alloc_order + 1) && | 670 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", |
671 | ioat2_ring_space(ioat) > num_descs) | 671 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); |
672 | break; | 672 | ioat->produce = num_descs; |
673 | 673 | return 0; /* with ioat->prep_lock held */ | |
674 | if (printk_ratelimit()) | ||
675 | dev_dbg(to_dev(chan), | ||
676 | "%s: ring full! num_descs: %d (%x:%x:%x)\n", | ||
677 | __func__, num_descs, ioat->head, ioat->tail, | ||
678 | ioat->issued); | ||
679 | spin_unlock_bh(&ioat->ring_lock); | ||
680 | |||
681 | /* progress reclaim in the allocation failure case we | ||
682 | * may be called under bh_disabled so we need to trigger | ||
683 | * the timer event directly | ||
684 | */ | ||
685 | spin_lock_bh(&chan->cleanup_lock); | ||
686 | if (jiffies > chan->timer.expires && | ||
687 | timer_pending(&chan->timer)) { | ||
688 | struct ioatdma_device *device = chan->device; | ||
689 | |||
690 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
691 | spin_unlock_bh(&chan->cleanup_lock); | ||
692 | device->timer_fn((unsigned long) &chan->common); | ||
693 | } else | ||
694 | spin_unlock_bh(&chan->cleanup_lock); | ||
695 | return -ENOMEM; | ||
696 | } | 674 | } |
675 | retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state); | ||
676 | spin_unlock_bh(&ioat->prep_lock); | ||
697 | 677 | ||
698 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", | 678 | /* is another cpu already trying to expand the ring? */ |
699 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | 679 | if (retry) |
680 | goto retry; | ||
700 | 681 | ||
701 | *idx = ioat2_desc_alloc(ioat, num_descs); | 682 | spin_lock_bh(&chan->cleanup_lock); |
702 | return 0; /* with ioat->ring_lock held */ | 683 | spin_lock_bh(&ioat->prep_lock); |
684 | retry = reshape_ring(ioat, ioat->alloc_order + 1); | ||
685 | clear_bit(IOAT_RESHAPE_PENDING, &chan->state); | ||
686 | spin_unlock_bh(&ioat->prep_lock); | ||
687 | spin_unlock_bh(&chan->cleanup_lock); | ||
688 | |||
689 | /* if we were able to expand the ring retry the allocation */ | ||
690 | if (retry) | ||
691 | goto retry; | ||
692 | |||
693 | if (printk_ratelimit()) | ||
694 | dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", | ||
695 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | ||
696 | |||
697 | /* progress reclaim in the allocation failure case we may be | ||
698 | * called under bh_disabled so we need to trigger the timer | ||
699 | * event directly | ||
700 | */ | ||
701 | if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { | ||
702 | struct ioatdma_device *device = chan->device; | ||
703 | |||
704 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
705 | device->timer_fn((unsigned long) &chan->common); | ||
706 | } | ||
707 | |||
708 | return -ENOMEM; | ||
703 | } | 709 | } |
704 | 710 | ||
705 | struct dma_async_tx_descriptor * | 711 | struct dma_async_tx_descriptor * |
@@ -712,14 +718,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |||
712 | dma_addr_t dst = dma_dest; | 718 | dma_addr_t dst = dma_dest; |
713 | dma_addr_t src = dma_src; | 719 | dma_addr_t src = dma_src; |
714 | size_t total_len = len; | 720 | size_t total_len = len; |
715 | int num_descs; | 721 | int num_descs, idx, i; |
716 | u16 idx; | ||
717 | int i; | ||
718 | 722 | ||
719 | num_descs = ioat2_xferlen_to_descs(ioat, len); | 723 | num_descs = ioat2_xferlen_to_descs(ioat, len); |
720 | if (likely(num_descs) && | 724 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) |
721 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | 725 | idx = ioat->head; |
722 | /* pass */; | ||
723 | else | 726 | else |
724 | return NULL; | 727 | return NULL; |
725 | i = 0; | 728 | i = 0; |
@@ -776,7 +779,8 @@ void ioat2_free_chan_resources(struct dma_chan *c) | |||
776 | device->cleanup_fn((unsigned long) c); | 779 | device->cleanup_fn((unsigned long) c); |
777 | device->reset_hw(chan); | 780 | device->reset_hw(chan); |
778 | 781 | ||
779 | spin_lock_bh(&ioat->ring_lock); | 782 | spin_lock_bh(&chan->cleanup_lock); |
783 | spin_lock_bh(&ioat->prep_lock); | ||
780 | descs = ioat2_ring_space(ioat); | 784 | descs = ioat2_ring_space(ioat); |
781 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); | 785 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); |
782 | for (i = 0; i < descs; i++) { | 786 | for (i = 0; i < descs; i++) { |
@@ -799,7 +803,8 @@ void ioat2_free_chan_resources(struct dma_chan *c) | |||
799 | ioat->alloc_order = 0; | 803 | ioat->alloc_order = 0; |
800 | pci_pool_free(device->completion_pool, chan->completion, | 804 | pci_pool_free(device->completion_pool, chan->completion, |
801 | chan->completion_dma); | 805 | chan->completion_dma); |
802 | spin_unlock_bh(&ioat->ring_lock); | 806 | spin_unlock_bh(&ioat->prep_lock); |
807 | spin_unlock_bh(&chan->cleanup_lock); | ||
803 | 808 | ||
804 | chan->last_completion = 0; | 809 | chan->last_completion = 0; |
805 | chan->completion_dma = 0; | 810 | chan->completion_dma = 0; |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index ef2871fd7868..a2c413b2b8d8 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define IOATDMA_V2_H | 22 | #define IOATDMA_V2_H |
23 | 23 | ||
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include <linux/circ_buf.h> | ||
25 | #include "dma.h" | 26 | #include "dma.h" |
26 | #include "hw.h" | 27 | #include "hw.h" |
27 | 28 | ||
@@ -49,8 +50,9 @@ extern int ioat_ring_alloc_order; | |||
49 | * @tail: cleanup index | 50 | * @tail: cleanup index |
50 | * @dmacount: identical to 'head' except for occasionally resetting to zero | 51 | * @dmacount: identical to 'head' except for occasionally resetting to zero |
51 | * @alloc_order: log2 of the number of allocated descriptors | 52 | * @alloc_order: log2 of the number of allocated descriptors |
53 | * @produce: number of descriptors to produce at submit time | ||
52 | * @ring: software ring buffer implementation of hardware ring | 54 | * @ring: software ring buffer implementation of hardware ring |
53 | * @ring_lock: protects ring attributes | 55 | * @prep_lock: serializes descriptor preparation (producers) |
54 | */ | 56 | */ |
55 | struct ioat2_dma_chan { | 57 | struct ioat2_dma_chan { |
56 | struct ioat_chan_common base; | 58 | struct ioat_chan_common base; |
@@ -60,8 +62,9 @@ struct ioat2_dma_chan { | |||
60 | u16 tail; | 62 | u16 tail; |
61 | u16 dmacount; | 63 | u16 dmacount; |
62 | u16 alloc_order; | 64 | u16 alloc_order; |
65 | u16 produce; | ||
63 | struct ioat_ring_ent **ring; | 66 | struct ioat_ring_ent **ring; |
64 | spinlock_t ring_lock; | 67 | spinlock_t prep_lock; |
65 | }; | 68 | }; |
66 | 69 | ||
67 | static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) | 70 | static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) |
@@ -71,38 +74,26 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) | |||
71 | return container_of(chan, struct ioat2_dma_chan, base); | 74 | return container_of(chan, struct ioat2_dma_chan, base); |
72 | } | 75 | } |
73 | 76 | ||
74 | static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) | 77 | static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) |
75 | { | 78 | { |
76 | return (1 << ioat->alloc_order) - 1; | 79 | return 1 << ioat->alloc_order; |
77 | } | 80 | } |
78 | 81 | ||
79 | /* count of descriptors in flight with the engine */ | 82 | /* count of descriptors in flight with the engine */ |
80 | static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) | 83 | static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) |
81 | { | 84 | { |
82 | return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); | 85 | return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat)); |
83 | } | 86 | } |
84 | 87 | ||
85 | /* count of descriptors pending submission to hardware */ | 88 | /* count of descriptors pending submission to hardware */ |
86 | static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) | 89 | static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) |
87 | { | 90 | { |
88 | return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); | 91 | return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); |
89 | } | 92 | } |
90 | 93 | ||
91 | static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) | 94 | static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) |
92 | { | 95 | { |
93 | u16 num_descs = ioat2_ring_mask(ioat) + 1; | 96 | return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); |
94 | u16 active = ioat2_ring_active(ioat); | ||
95 | |||
96 | BUG_ON(active > num_descs); | ||
97 | |||
98 | return num_descs - active; | ||
99 | } | ||
100 | |||
101 | /* assumes caller already checked space */ | ||
102 | static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len) | ||
103 | { | ||
104 | ioat->head += len; | ||
105 | return ioat->head - len; | ||
106 | } | 97 | } |
107 | 98 | ||
108 | static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) | 99 | static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) |
@@ -151,7 +142,7 @@ struct ioat_ring_ent { | |||
151 | static inline struct ioat_ring_ent * | 142 | static inline struct ioat_ring_ent * |
152 | ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) | 143 | ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) |
153 | { | 144 | { |
154 | return ioat->ring[idx & ioat2_ring_mask(ioat)]; | 145 | return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)]; |
155 | } | 146 | } |
156 | 147 | ||
157 | static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | 148 | static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) |
@@ -168,7 +159,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); | |||
168 | int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); | 159 | int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); |
169 | struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 160 | struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
170 | struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 161 | struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
171 | int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs); | 162 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); |
172 | int ioat2_enumerate_channels(struct ioatdma_device *device); | 163 | int ioat2_enumerate_channels(struct ioatdma_device *device); |
173 | struct dma_async_tx_descriptor * | 164 | struct dma_async_tx_descriptor * |
174 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | 165 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d1adbf35268c..1cdd22e1051b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -57,6 +57,7 @@ | |||
57 | */ | 57 | */ |
58 | 58 | ||
59 | #include <linux/pci.h> | 59 | #include <linux/pci.h> |
60 | #include <linux/gfp.h> | ||
60 | #include <linux/dmaengine.h> | 61 | #include <linux/dmaengine.h> |
61 | #include <linux/dma-mapping.h> | 62 | #include <linux/dma-mapping.h> |
62 | #include "registers.h" | 63 | #include "registers.h" |
@@ -259,8 +260,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
259 | struct ioat_chan_common *chan = &ioat->base; | 260 | struct ioat_chan_common *chan = &ioat->base; |
260 | struct ioat_ring_ent *desc; | 261 | struct ioat_ring_ent *desc; |
261 | bool seen_current = false; | 262 | bool seen_current = false; |
263 | int idx = ioat->tail, i; | ||
262 | u16 active; | 264 | u16 active; |
263 | int i; | ||
264 | 265 | ||
265 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | 266 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
266 | __func__, ioat->head, ioat->tail, ioat->issued); | 267 | __func__, ioat->head, ioat->tail, ioat->issued); |
@@ -269,13 +270,14 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
269 | for (i = 0; i < active && !seen_current; i++) { | 270 | for (i = 0; i < active && !seen_current; i++) { |
270 | struct dma_async_tx_descriptor *tx; | 271 | struct dma_async_tx_descriptor *tx; |
271 | 272 | ||
272 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | 273 | smp_read_barrier_depends(); |
273 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | 274 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); |
275 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
274 | dump_desc_dbg(ioat, desc); | 276 | dump_desc_dbg(ioat, desc); |
275 | tx = &desc->txd; | 277 | tx = &desc->txd; |
276 | if (tx->cookie) { | 278 | if (tx->cookie) { |
277 | chan->completed_cookie = tx->cookie; | 279 | chan->completed_cookie = tx->cookie; |
278 | ioat3_dma_unmap(ioat, desc, ioat->tail + i); | 280 | ioat3_dma_unmap(ioat, desc, idx + i); |
279 | tx->cookie = 0; | 281 | tx->cookie = 0; |
280 | if (tx->callback) { | 282 | if (tx->callback) { |
281 | tx->callback(tx->callback_param); | 283 | tx->callback(tx->callback_param); |
@@ -292,69 +294,30 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
292 | i++; | 294 | i++; |
293 | } | 295 | } |
294 | } | 296 | } |
295 | ioat->tail += i; | 297 | smp_mb(); /* finish all descriptor reads before incrementing tail */ |
298 | ioat->tail = idx + i; | ||
296 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ | 299 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ |
297 | chan->last_completion = phys_complete; | 300 | chan->last_completion = phys_complete; |
298 | 301 | ||
299 | active = ioat2_ring_active(ioat); | 302 | if (active - i == 0) { |
300 | if (active == 0) { | ||
301 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | 303 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", |
302 | __func__); | 304 | __func__); |
303 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | 305 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); |
304 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | 306 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
305 | } | 307 | } |
306 | /* 5 microsecond delay per pending descriptor */ | 308 | /* 5 microsecond delay per pending descriptor */ |
307 | writew(min((5 * active), IOAT_INTRDELAY_MASK), | 309 | writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), |
308 | chan->device->reg_base + IOAT_INTRDELAY_OFFSET); | 310 | chan->device->reg_base + IOAT_INTRDELAY_OFFSET); |
309 | } | 311 | } |
310 | 312 | ||
311 | /* try to cleanup, but yield (via spin_trylock) to incoming submissions | 313 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) |
312 | * with the expectation that we will immediately poll again shortly | ||
313 | */ | ||
314 | static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) | ||
315 | { | 314 | { |
316 | struct ioat_chan_common *chan = &ioat->base; | 315 | struct ioat_chan_common *chan = &ioat->base; |
317 | unsigned long phys_complete; | 316 | unsigned long phys_complete; |
318 | 317 | ||
319 | prefetch(chan->completion); | ||
320 | |||
321 | if (!spin_trylock_bh(&chan->cleanup_lock)) | ||
322 | return; | ||
323 | |||
324 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
325 | spin_unlock_bh(&chan->cleanup_lock); | ||
326 | return; | ||
327 | } | ||
328 | |||
329 | if (!spin_trylock_bh(&ioat->ring_lock)) { | ||
330 | spin_unlock_bh(&chan->cleanup_lock); | ||
331 | return; | ||
332 | } | ||
333 | |||
334 | __cleanup(ioat, phys_complete); | ||
335 | |||
336 | spin_unlock_bh(&ioat->ring_lock); | ||
337 | spin_unlock_bh(&chan->cleanup_lock); | ||
338 | } | ||
339 | |||
340 | /* run cleanup now because we already delayed the interrupt via INTRDELAY */ | ||
341 | static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) | ||
342 | { | ||
343 | struct ioat_chan_common *chan = &ioat->base; | ||
344 | unsigned long phys_complete; | ||
345 | |||
346 | prefetch(chan->completion); | ||
347 | |||
348 | spin_lock_bh(&chan->cleanup_lock); | 318 | spin_lock_bh(&chan->cleanup_lock); |
349 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | 319 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
350 | spin_unlock_bh(&chan->cleanup_lock); | 320 | __cleanup(ioat, phys_complete); |
351 | return; | ||
352 | } | ||
353 | spin_lock_bh(&ioat->ring_lock); | ||
354 | |||
355 | __cleanup(ioat, phys_complete); | ||
356 | |||
357 | spin_unlock_bh(&ioat->ring_lock); | ||
358 | spin_unlock_bh(&chan->cleanup_lock); | 321 | spin_unlock_bh(&chan->cleanup_lock); |
359 | } | 322 | } |
360 | 323 | ||
@@ -362,7 +325,7 @@ static void ioat3_cleanup_event(unsigned long data) | |||
362 | { | 325 | { |
363 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | 326 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
364 | 327 | ||
365 | ioat3_cleanup_sync(ioat); | 328 | ioat3_cleanup(ioat); |
366 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | 329 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
367 | } | 330 | } |
368 | 331 | ||
@@ -383,12 +346,10 @@ static void ioat3_timer_event(unsigned long data) | |||
383 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | 346 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
384 | struct ioat_chan_common *chan = &ioat->base; | 347 | struct ioat_chan_common *chan = &ioat->base; |
385 | 348 | ||
386 | spin_lock_bh(&chan->cleanup_lock); | ||
387 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 349 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
388 | unsigned long phys_complete; | 350 | unsigned long phys_complete; |
389 | u64 status; | 351 | u64 status; |
390 | 352 | ||
391 | spin_lock_bh(&ioat->ring_lock); | ||
392 | status = ioat_chansts(chan); | 353 | status = ioat_chansts(chan); |
393 | 354 | ||
394 | /* when halted due to errors check for channel | 355 | /* when halted due to errors check for channel |
@@ -407,26 +368,31 @@ static void ioat3_timer_event(unsigned long data) | |||
407 | * acknowledged a pending completion once, then be more | 368 | * acknowledged a pending completion once, then be more |
408 | * forceful with a restart | 369 | * forceful with a restart |
409 | */ | 370 | */ |
371 | spin_lock_bh(&chan->cleanup_lock); | ||
410 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 372 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
411 | __cleanup(ioat, phys_complete); | 373 | __cleanup(ioat, phys_complete); |
412 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | 374 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { |
375 | spin_lock_bh(&ioat->prep_lock); | ||
413 | ioat3_restart_channel(ioat); | 376 | ioat3_restart_channel(ioat); |
414 | else { | 377 | spin_unlock_bh(&ioat->prep_lock); |
378 | } else { | ||
415 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | 379 | set_bit(IOAT_COMPLETION_ACK, &chan->state); |
416 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | 380 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
417 | } | 381 | } |
418 | spin_unlock_bh(&ioat->ring_lock); | 382 | spin_unlock_bh(&chan->cleanup_lock); |
419 | } else { | 383 | } else { |
420 | u16 active; | 384 | u16 active; |
421 | 385 | ||
422 | /* if the ring is idle, empty, and oversized try to step | 386 | /* if the ring is idle, empty, and oversized try to step |
423 | * down the size | 387 | * down the size |
424 | */ | 388 | */ |
425 | spin_lock_bh(&ioat->ring_lock); | 389 | spin_lock_bh(&chan->cleanup_lock); |
390 | spin_lock_bh(&ioat->prep_lock); | ||
426 | active = ioat2_ring_active(ioat); | 391 | active = ioat2_ring_active(ioat); |
427 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | 392 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) |
428 | reshape_ring(ioat, ioat->alloc_order-1); | 393 | reshape_ring(ioat, ioat->alloc_order-1); |
429 | spin_unlock_bh(&ioat->ring_lock); | 394 | spin_unlock_bh(&ioat->prep_lock); |
395 | spin_unlock_bh(&chan->cleanup_lock); | ||
430 | 396 | ||
431 | /* keep shrinking until we get back to our minimum | 397 | /* keep shrinking until we get back to our minimum |
432 | * default size | 398 | * default size |
@@ -434,7 +400,6 @@ static void ioat3_timer_event(unsigned long data) | |||
434 | if (ioat->alloc_order > ioat_get_alloc_order()) | 400 | if (ioat->alloc_order > ioat_get_alloc_order()) |
435 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | 401 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
436 | } | 402 | } |
437 | spin_unlock_bh(&chan->cleanup_lock); | ||
438 | } | 403 | } |
439 | 404 | ||
440 | static enum dma_status | 405 | static enum dma_status |
@@ -446,7 +411,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
446 | if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) | 411 | if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) |
447 | return DMA_SUCCESS; | 412 | return DMA_SUCCESS; |
448 | 413 | ||
449 | ioat3_cleanup_poll(ioat); | 414 | ioat3_cleanup(ioat); |
450 | 415 | ||
451 | return ioat_tx_status(c, cookie, txstate); | 416 | return ioat_tx_status(c, cookie, txstate); |
452 | } | 417 | } |
@@ -459,15 +424,12 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, | |||
459 | struct ioat_ring_ent *desc; | 424 | struct ioat_ring_ent *desc; |
460 | size_t total_len = len; | 425 | size_t total_len = len; |
461 | struct ioat_fill_descriptor *fill; | 426 | struct ioat_fill_descriptor *fill; |
462 | int num_descs; | ||
463 | u64 src_data = (0x0101010101010101ULL) * (value & 0xff); | 427 | u64 src_data = (0x0101010101010101ULL) * (value & 0xff); |
464 | u16 idx; | 428 | int num_descs, idx, i; |
465 | int i; | ||
466 | 429 | ||
467 | num_descs = ioat2_xferlen_to_descs(ioat, len); | 430 | num_descs = ioat2_xferlen_to_descs(ioat, len); |
468 | if (likely(num_descs) && | 431 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) |
469 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | 432 | idx = ioat->head; |
470 | /* pass */; | ||
471 | else | 433 | else |
472 | return NULL; | 434 | return NULL; |
473 | i = 0; | 435 | i = 0; |
@@ -512,11 +474,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
512 | struct ioat_xor_descriptor *xor; | 474 | struct ioat_xor_descriptor *xor; |
513 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | 475 | struct ioat_xor_ext_descriptor *xor_ex = NULL; |
514 | struct ioat_dma_descriptor *hw; | 476 | struct ioat_dma_descriptor *hw; |
477 | int num_descs, with_ext, idx, i; | ||
515 | u32 offset = 0; | 478 | u32 offset = 0; |
516 | int num_descs; | ||
517 | int with_ext; | ||
518 | int i; | ||
519 | u16 idx; | ||
520 | u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; | 479 | u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; |
521 | 480 | ||
522 | BUG_ON(src_cnt < 2); | 481 | BUG_ON(src_cnt < 2); |
@@ -536,9 +495,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
536 | * (legacy) descriptor to ensure all completion writes arrive in | 495 | * (legacy) descriptor to ensure all completion writes arrive in |
537 | * order. | 496 | * order. |
538 | */ | 497 | */ |
539 | if (likely(num_descs) && | 498 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) |
540 | ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) | 499 | idx = ioat->head; |
541 | /* pass */; | ||
542 | else | 500 | else |
543 | return NULL; | 501 | return NULL; |
544 | i = 0; | 502 | i = 0; |
@@ -656,11 +614,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
656 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | 614 | struct ioat_pq_ext_descriptor *pq_ex = NULL; |
657 | struct ioat_dma_descriptor *hw; | 615 | struct ioat_dma_descriptor *hw; |
658 | u32 offset = 0; | 616 | u32 offset = 0; |
659 | int num_descs; | ||
660 | int with_ext; | ||
661 | int i, s; | ||
662 | u16 idx; | ||
663 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; | 617 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; |
618 | int i, s, idx, with_ext, num_descs; | ||
664 | 619 | ||
665 | dev_dbg(to_dev(chan), "%s\n", __func__); | 620 | dev_dbg(to_dev(chan), "%s\n", __func__); |
666 | /* the engine requires at least two sources (we provide | 621 | /* the engine requires at least two sources (we provide |
@@ -686,8 +641,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
686 | * order. | 641 | * order. |
687 | */ | 642 | */ |
688 | if (likely(num_descs) && | 643 | if (likely(num_descs) && |
689 | ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) | 644 | ioat2_check_space_lock(ioat, num_descs+1) == 0) |
690 | /* pass */; | 645 | idx = ioat->head; |
691 | else | 646 | else |
692 | return NULL; | 647 | return NULL; |
693 | i = 0; | 648 | i = 0; |
@@ -850,10 +805,9 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | |||
850 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 805 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
851 | struct ioat_ring_ent *desc; | 806 | struct ioat_ring_ent *desc; |
852 | struct ioat_dma_descriptor *hw; | 807 | struct ioat_dma_descriptor *hw; |
853 | u16 idx; | ||
854 | 808 | ||
855 | if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0) | 809 | if (ioat2_check_space_lock(ioat, 1) == 0) |
856 | desc = ioat2_get_ring_ent(ioat, idx); | 810 | desc = ioat2_get_ring_ent(ioat, ioat->head); |
857 | else | 811 | else |
858 | return NULL; | 812 | return NULL; |
859 | 813 | ||
@@ -1221,7 +1175,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1221 | if (cap & IOAT_CAP_XOR) { | 1175 | if (cap & IOAT_CAP_XOR) { |
1222 | is_raid_device = true; | 1176 | is_raid_device = true; |
1223 | dma->max_xor = 8; | 1177 | dma->max_xor = 8; |
1224 | dma->xor_align = 2; | 1178 | dma->xor_align = 6; |
1225 | 1179 | ||
1226 | dma_cap_set(DMA_XOR, dma->cap_mask); | 1180 | dma_cap_set(DMA_XOR, dma->cap_mask); |
1227 | dma->device_prep_dma_xor = ioat3_prep_xor; | 1181 | dma->device_prep_dma_xor = ioat3_prep_xor; |
@@ -1232,7 +1186,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1232 | if (cap & IOAT_CAP_PQ) { | 1186 | if (cap & IOAT_CAP_PQ) { |
1233 | is_raid_device = true; | 1187 | is_raid_device = true; |
1234 | dma_set_maxpq(dma, 8, 0); | 1188 | dma_set_maxpq(dma, 8, 0); |
1235 | dma->pq_align = 2; | 1189 | dma->pq_align = 6; |
1236 | 1190 | ||
1237 | dma_cap_set(DMA_PQ, dma->cap_mask); | 1191 | dma_cap_set(DMA_PQ, dma->cap_mask); |
1238 | dma->device_prep_dma_pq = ioat3_prep_pq; | 1192 | dma->device_prep_dma_pq = ioat3_prep_pq; |
@@ -1242,7 +1196,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1242 | 1196 | ||
1243 | if (!(cap & IOAT_CAP_XOR)) { | 1197 | if (!(cap & IOAT_CAP_XOR)) { |
1244 | dma->max_xor = 8; | 1198 | dma->max_xor = 8; |
1245 | dma->xor_align = 2; | 1199 | dma->xor_align = 6; |
1246 | 1200 | ||
1247 | dma_cap_set(DMA_XOR, dma->cap_mask); | 1201 | dma_cap_set(DMA_XOR, dma->cap_mask); |
1248 | dma->device_prep_dma_xor = ioat3_prep_pqxor; | 1202 | dma->device_prep_dma_xor = ioat3_prep_pqxor; |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index d545fae30f37..fab37d1cf48d 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/dca.h> | 32 | #include <linux/dca.h> |
33 | #include <linux/slab.h> | ||
33 | #include "dma.h" | 34 | #include "dma.h" |
34 | #include "dma_v2.h" | 35 | #include "dma_v2.h" |
35 | #include "registers.h" | 36 | #include "registers.h" |
@@ -137,15 +138,10 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic | |||
137 | if (err) | 138 | if (err) |
138 | return err; | 139 | return err; |
139 | 140 | ||
140 | device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); | ||
141 | if (!device) | ||
142 | return -ENOMEM; | ||
143 | |||
144 | pci_set_master(pdev); | ||
145 | |||
146 | device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); | 141 | device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); |
147 | if (!device) | 142 | if (!device) |
148 | return -ENOMEM; | 143 | return -ENOMEM; |
144 | pci_set_master(pdev); | ||
149 | pci_set_drvdata(pdev, device); | 145 | pci_set_drvdata(pdev, device); |
150 | 146 | ||
151 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | 147 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index e5d4b97b7fd5..161c452923b8 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/memory.h> | 32 | #include <linux/memory.h> |
33 | #include <linux/ioport.h> | 33 | #include <linux/ioport.h> |
34 | #include <linux/raid/pq.h> | 34 | #include <linux/raid/pq.h> |
35 | #include <linux/slab.h> | ||
35 | 36 | ||
36 | #include <mach/adma.h> | 37 | #include <mach/adma.h> |
37 | 38 | ||
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c index c0a272c73682..bb48a57c2fc1 100644 --- a/drivers/dma/iovlock.c +++ b/drivers/dma/iovlock.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include <linux/dmaengine.h> | 28 | #include <linux/dmaengine.h> |
29 | #include <linux/pagemap.h> | 29 | #include <linux/pagemap.h> |
30 | #include <linux/slab.h> | ||
30 | #include <net/tcp.h> /* for memcpy_toiovec */ | 31 | #include <net/tcp.h> /* for memcpy_toiovec */ |
31 | #include <asm/io.h> | 32 | #include <asm/io.h> |
32 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index e41fb8b671d3..201e6e19c344 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/dma-mapping.h> | 37 | #include <linux/dma-mapping.h> |
38 | #include <linux/interrupt.h> | 38 | #include <linux/interrupt.h> |
39 | #include <linux/io.h> | 39 | #include <linux/io.h> |
40 | #include <linux/slab.h> | ||
40 | #include <linux/of_device.h> | 41 | #include <linux/of_device.h> |
41 | #include <linux/of_platform.h> | 42 | #include <linux/of_platform.h> |
42 | 43 | ||
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 4b8c1fcc834d..86c5ae9fde34 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/slab.h> | ||
21 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
22 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
23 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 5558419876e8..c6079fcca13f 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
39 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
41 | #include <linux/slab.h> | ||
41 | #include <linux/uaccess.h> | 42 | #include <linux/uaccess.h> |
42 | #include <linux/proc_fs.h> | 43 | #include <linux/proc_fs.h> |
43 | #include <linux/of.h> | 44 | #include <linux/of.h> |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index e9de1d35c20d..544340d7a464 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/slab.h> | ||
22 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
23 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |