diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:14 -0500 |
|---|---|---|
| committer | Dan Williams <dan.j.williams@intel.com> | 2009-01-06 13:38:14 -0500 |
| commit | bec085134e446577a983f17f57d642a88d1af53b (patch) | |
| tree | 7d29afc53fedc72349ee78112fb71f68ff48ce24 /crypto | |
| parent | 6f49a57aa5a0c6d4e4e27c85f7af6c83325a12d1 (diff) | |
dmaengine: centralize channel allocation, introduce dma_find_channel
Allowing multiple clients to each define their own channel allocation
scheme quickly leads to a pathological situation. For memory-to-memory
offload all clients can share a central allocator.
This simply moves the existing async_tx allocator to dmaengine with
minimal fixups:
* async_tx.c:get_chan_ref_by_cap --> dmaengine.c:nth_chan
* async_tx.c:async_tx_rebalance --> dmaengine.c:dma_channel_rebalance
* split out common code from async_tx.c:__async_tx_find_channel -->
dma_find_channel
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'crypto')
| -rw-r--r-- | crypto/async_tx/async_tx.c | 146 |
1 files changed, 4 insertions, 142 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 43fe4cbe71e6..b88bb1f608fc 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
| @@ -38,25 +38,10 @@ static struct dma_client async_tx_dma = { | |||
| 38 | }; | 38 | }; |
| 39 | 39 | ||
| 40 | /** | 40 | /** |
| 41 | * dma_cap_mask_all - enable iteration over all operation types | ||
| 42 | */ | ||
| 43 | static dma_cap_mask_t dma_cap_mask_all; | ||
| 44 | |||
| 45 | /** | ||
| 46 | * chan_ref_percpu - tracks channel allocations per core/opertion | ||
| 47 | */ | ||
| 48 | struct chan_ref_percpu { | ||
| 49 | struct dma_chan_ref *ref; | ||
| 50 | }; | ||
| 51 | |||
| 52 | static int channel_table_initialized; | ||
| 53 | static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END]; | ||
| 54 | |||
| 55 | /** | ||
| 56 | * async_tx_lock - protect modification of async_tx_master_list and serialize | 41 | * async_tx_lock - protect modification of async_tx_master_list and serialize |
| 57 | * rebalance operations | 42 | * rebalance operations |
| 58 | */ | 43 | */ |
| 59 | static spinlock_t async_tx_lock; | 44 | static DEFINE_SPINLOCK(async_tx_lock); |
| 60 | 45 | ||
| 61 | static LIST_HEAD(async_tx_master_list); | 46 | static LIST_HEAD(async_tx_master_list); |
| 62 | 47 | ||
| @@ -89,85 +74,6 @@ init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan) | |||
| 89 | atomic_set(&ref->count, 0); | 74 | atomic_set(&ref->count, 0); |
| 90 | } | 75 | } |
| 91 | 76 | ||
| 92 | /** | ||
| 93 | * get_chan_ref_by_cap - returns the nth channel of the given capability | ||
| 94 | * defaults to returning the channel with the desired capability and the | ||
| 95 | * lowest reference count if the index can not be satisfied | ||
| 96 | * @cap: capability to match | ||
| 97 | * @index: nth channel desired, passing -1 has the effect of forcing the | ||
| 98 | * default return value | ||
| 99 | */ | ||
| 100 | static struct dma_chan_ref * | ||
| 101 | get_chan_ref_by_cap(enum dma_transaction_type cap, int index) | ||
| 102 | { | ||
| 103 | struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref; | ||
| 104 | |||
| 105 | rcu_read_lock(); | ||
| 106 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
| 107 | if (dma_has_cap(cap, ref->chan->device->cap_mask)) { | ||
| 108 | if (!min_ref) | ||
| 109 | min_ref = ref; | ||
| 110 | else if (atomic_read(&ref->count) < | ||
| 111 | atomic_read(&min_ref->count)) | ||
| 112 | min_ref = ref; | ||
| 113 | |||
| 114 | if (index-- == 0) { | ||
| 115 | ret_ref = ref; | ||
| 116 | break; | ||
| 117 | } | ||
| 118 | } | ||
| 119 | rcu_read_unlock(); | ||
| 120 | |||
| 121 | if (!ret_ref) | ||
| 122 | ret_ref = min_ref; | ||
| 123 | |||
| 124 | if (ret_ref) | ||
| 125 | atomic_inc(&ret_ref->count); | ||
| 126 | |||
| 127 | return ret_ref; | ||
| 128 | } | ||
| 129 | |||
| 130 | /** | ||
| 131 | * async_tx_rebalance - redistribute the available channels, optimize | ||
| 132 | * for cpu isolation in the SMP case, and opertaion isolation in the | ||
| 133 | * uniprocessor case | ||
| 134 | */ | ||
| 135 | static void async_tx_rebalance(void) | ||
| 136 | { | ||
| 137 | int cpu, cap, cpu_idx = 0; | ||
| 138 | unsigned long flags; | ||
| 139 | |||
| 140 | if (!channel_table_initialized) | ||
| 141 | return; | ||
| 142 | |||
| 143 | spin_lock_irqsave(&async_tx_lock, flags); | ||
| 144 | |||
| 145 | /* undo the last distribution */ | ||
| 146 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
| 147 | for_each_possible_cpu(cpu) { | ||
| 148 | struct dma_chan_ref *ref = | ||
| 149 | per_cpu_ptr(channel_table[cap], cpu)->ref; | ||
| 150 | if (ref) { | ||
| 151 | atomic_set(&ref->count, 0); | ||
| 152 | per_cpu_ptr(channel_table[cap], cpu)->ref = | ||
| 153 | NULL; | ||
| 154 | } | ||
| 155 | } | ||
| 156 | |||
| 157 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
| 158 | for_each_online_cpu(cpu) { | ||
| 159 | struct dma_chan_ref *new; | ||
| 160 | if (NR_CPUS > 1) | ||
| 161 | new = get_chan_ref_by_cap(cap, cpu_idx++); | ||
| 162 | else | ||
| 163 | new = get_chan_ref_by_cap(cap, -1); | ||
| 164 | |||
| 165 | per_cpu_ptr(channel_table[cap], cpu)->ref = new; | ||
| 166 | } | ||
| 167 | |||
| 168 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
| 169 | } | ||
| 170 | |||
| 171 | static enum dma_state_client | 77 | static enum dma_state_client |
| 172 | dma_channel_add_remove(struct dma_client *client, | 78 | dma_channel_add_remove(struct dma_client *client, |
| 173 | struct dma_chan *chan, enum dma_state state) | 79 | struct dma_chan *chan, enum dma_state state) |
| @@ -211,8 +117,6 @@ dma_channel_add_remove(struct dma_client *client, | |||
| 211 | " (-ENOMEM)\n"); | 117 | " (-ENOMEM)\n"); |
| 212 | return 0; | 118 | return 0; |
| 213 | } | 119 | } |
| 214 | |||
| 215 | async_tx_rebalance(); | ||
| 216 | break; | 120 | break; |
| 217 | case DMA_RESOURCE_REMOVED: | 121 | case DMA_RESOURCE_REMOVED: |
| 218 | found = 0; | 122 | found = 0; |
| @@ -233,8 +137,6 @@ dma_channel_add_remove(struct dma_client *client, | |||
| 233 | ack = DMA_ACK; | 137 | ack = DMA_ACK; |
| 234 | else | 138 | else |
| 235 | break; | 139 | break; |
| 236 | |||
| 237 | async_tx_rebalance(); | ||
| 238 | break; | 140 | break; |
| 239 | case DMA_RESOURCE_SUSPEND: | 141 | case DMA_RESOURCE_SUSPEND: |
| 240 | case DMA_RESOURCE_RESUME: | 142 | case DMA_RESOURCE_RESUME: |
| @@ -248,51 +150,18 @@ dma_channel_add_remove(struct dma_client *client, | |||
| 248 | return ack; | 150 | return ack; |
| 249 | } | 151 | } |
| 250 | 152 | ||
| 251 | static int __init | 153 | static int __init async_tx_init(void) |
| 252 | async_tx_init(void) | ||
| 253 | { | 154 | { |
| 254 | enum dma_transaction_type cap; | ||
| 255 | |||
| 256 | spin_lock_init(&async_tx_lock); | ||
| 257 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | ||
| 258 | |||
| 259 | /* an interrupt will never be an explicit operation type. | ||
| 260 | * clearing this bit prevents allocation to a slot in 'channel_table' | ||
| 261 | */ | ||
| 262 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | ||
| 263 | |||
| 264 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | ||
| 265 | channel_table[cap] = alloc_percpu(struct chan_ref_percpu); | ||
| 266 | if (!channel_table[cap]) | ||
| 267 | goto err; | ||
| 268 | } | ||
| 269 | |||
| 270 | channel_table_initialized = 1; | ||
| 271 | dma_async_client_register(&async_tx_dma); | 155 | dma_async_client_register(&async_tx_dma); |
| 272 | dma_async_client_chan_request(&async_tx_dma); | 156 | dma_async_client_chan_request(&async_tx_dma); |
| 273 | 157 | ||
| 274 | printk(KERN_INFO "async_tx: api initialized (async)\n"); | 158 | printk(KERN_INFO "async_tx: api initialized (async)\n"); |
| 275 | 159 | ||
| 276 | return 0; | 160 | return 0; |
| 277 | err: | ||
| 278 | printk(KERN_ERR "async_tx: initialization failure\n"); | ||
| 279 | |||
| 280 | while (--cap >= 0) | ||
| 281 | free_percpu(channel_table[cap]); | ||
| 282 | |||
| 283 | return 1; | ||
| 284 | } | 161 | } |
| 285 | 162 | ||
| 286 | static void __exit async_tx_exit(void) | 163 | static void __exit async_tx_exit(void) |
| 287 | { | 164 | { |
| 288 | enum dma_transaction_type cap; | ||
| 289 | |||
| 290 | channel_table_initialized = 0; | ||
| 291 | |||
| 292 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
| 293 | if (channel_table[cap]) | ||
| 294 | free_percpu(channel_table[cap]); | ||
| 295 | |||
| 296 | dma_async_client_unregister(&async_tx_dma); | 165 | dma_async_client_unregister(&async_tx_dma); |
| 297 | } | 166 | } |
| 298 | 167 | ||
| @@ -308,16 +177,9 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
| 308 | { | 177 | { |
| 309 | /* see if we can keep the chain on one channel */ | 178 | /* see if we can keep the chain on one channel */ |
| 310 | if (depend_tx && | 179 | if (depend_tx && |
| 311 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) | 180 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
| 312 | return depend_tx->chan; | 181 | return depend_tx->chan; |
| 313 | else if (likely(channel_table_initialized)) { | 182 | return dma_find_channel(tx_type); |
| 314 | struct dma_chan_ref *ref; | ||
| 315 | int cpu = get_cpu(); | ||
| 316 | ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref; | ||
| 317 | put_cpu(); | ||
| 318 | return ref ? ref->chan : NULL; | ||
| 319 | } else | ||
| 320 | return NULL; | ||
| 321 | } | 183 | } |
| 322 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); | 184 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
| 323 | #else | 185 | #else |
