diff options
author | James Morris <jmorris@namei.org> | 2009-02-05 19:01:45 -0500 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2009-02-05 19:01:45 -0500 |
commit | cb5629b10d64a8006622ce3a52bc887d91057d69 (patch) | |
tree | 7c06d8f30783115e3384721046258ce615b129c5 /crypto | |
parent | 8920d5ad6ba74ae8ab020e90cc4d976980e68701 (diff) | |
parent | f01d1d546abb2f4028b5299092f529eefb01253a (diff) |
Merge branch 'master' into next
Conflicts:
fs/namei.c
Manually merged per:
diff --cc fs/namei.c
index 734f2b5,bbc15c2..0000000
--- a/fs/namei.c
+++ b/fs/namei.c
@@@ -860,9 -848,8 +849,10 @@@ static int __link_path_walk(const char
nd->flags |= LOOKUP_CONTINUE;
err = exec_permission_lite(inode);
if (err == -EAGAIN)
- err = vfs_permission(nd, MAY_EXEC);
+ err = inode_permission(nd->path.dentry->d_inode,
+ MAY_EXEC);
+ if (!err)
+ err = ima_path_check(&nd->path, MAY_EXEC);
if (err)
break;
@@@ -1525,14 -1506,9 +1509,14 @@@ int may_open(struct path *path, int acc
flag &= ~O_TRUNC;
}
- error = vfs_permission(nd, acc_mode);
+ error = inode_permission(inode, acc_mode);
if (error)
return error;
+
- error = ima_path_check(&nd->path,
++ error = ima_path_check(path,
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
+ if (error)
+ return error;
/*
* An append-only file must be opened in append mode for writing.
*/
Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/async_tx/async_tx.c | 350 | ||||
-rw-r--r-- | crypto/authenc.c | 24 | ||||
-rw-r--r-- | crypto/blkcipher.c | 2 | ||||
-rw-r--r-- | crypto/ccm.c | 2 |
4 files changed, 23 insertions, 355 deletions
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index dcbf1be149f3..f21147f3626a 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -28,351 +28,18 @@ | |||
28 | #include <linux/async_tx.h> | 28 | #include <linux/async_tx.h> |
29 | 29 | ||
30 | #ifdef CONFIG_DMA_ENGINE | 30 | #ifdef CONFIG_DMA_ENGINE |
31 | static enum dma_state_client | 31 | static int __init async_tx_init(void) |
32 | dma_channel_add_remove(struct dma_client *client, | ||
33 | struct dma_chan *chan, enum dma_state state); | ||
34 | |||
35 | static struct dma_client async_tx_dma = { | ||
36 | .event_callback = dma_channel_add_remove, | ||
37 | /* .cap_mask == 0 defaults to all channels */ | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * dma_cap_mask_all - enable iteration over all operation types | ||
42 | */ | ||
43 | static dma_cap_mask_t dma_cap_mask_all; | ||
44 | |||
45 | /** | ||
46 | * chan_ref_percpu - tracks channel allocations per core/opertion | ||
47 | */ | ||
48 | struct chan_ref_percpu { | ||
49 | struct dma_chan_ref *ref; | ||
50 | }; | ||
51 | |||
52 | static int channel_table_initialized; | ||
53 | static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END]; | ||
54 | |||
55 | /** | ||
56 | * async_tx_lock - protect modification of async_tx_master_list and serialize | ||
57 | * rebalance operations | ||
58 | */ | ||
59 | static spinlock_t async_tx_lock; | ||
60 | |||
61 | static LIST_HEAD(async_tx_master_list); | ||
62 | |||
63 | /* async_tx_issue_pending_all - start all transactions on all channels */ | ||
64 | void async_tx_issue_pending_all(void) | ||
65 | { | ||
66 | struct dma_chan_ref *ref; | ||
67 | |||
68 | rcu_read_lock(); | ||
69 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
70 | ref->chan->device->device_issue_pending(ref->chan); | ||
71 | rcu_read_unlock(); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); | ||
74 | |||
75 | /* dma_wait_for_async_tx - spin wait for a transcation to complete | ||
76 | * @tx: transaction to wait on | ||
77 | */ | ||
78 | enum dma_status | ||
79 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | ||
80 | { | ||
81 | enum dma_status status; | ||
82 | struct dma_async_tx_descriptor *iter; | ||
83 | struct dma_async_tx_descriptor *parent; | ||
84 | |||
85 | if (!tx) | ||
86 | return DMA_SUCCESS; | ||
87 | |||
88 | /* poll through the dependency chain, return when tx is complete */ | ||
89 | do { | ||
90 | iter = tx; | ||
91 | |||
92 | /* find the root of the unsubmitted dependency chain */ | ||
93 | do { | ||
94 | parent = iter->parent; | ||
95 | if (!parent) | ||
96 | break; | ||
97 | else | ||
98 | iter = parent; | ||
99 | } while (parent); | ||
100 | |||
101 | /* there is a small window for ->parent == NULL and | ||
102 | * ->cookie == -EBUSY | ||
103 | */ | ||
104 | while (iter->cookie == -EBUSY) | ||
105 | cpu_relax(); | ||
106 | |||
107 | status = dma_sync_wait(iter->chan, iter->cookie); | ||
108 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | ||
109 | |||
110 | return status; | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | ||
113 | |||
114 | /* async_tx_run_dependencies - helper routine for dma drivers to process | ||
115 | * (start) dependent operations on their target channel | ||
116 | * @tx: transaction with dependencies | ||
117 | */ | ||
118 | void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) | ||
119 | { | ||
120 | struct dma_async_tx_descriptor *dep = tx->next; | ||
121 | struct dma_async_tx_descriptor *dep_next; | ||
122 | struct dma_chan *chan; | ||
123 | |||
124 | if (!dep) | ||
125 | return; | ||
126 | |||
127 | chan = dep->chan; | ||
128 | |||
129 | /* keep submitting up until a channel switch is detected | ||
130 | * in that case we will be called again as a result of | ||
131 | * processing the interrupt from async_tx_channel_switch | ||
132 | */ | ||
133 | for (; dep; dep = dep_next) { | ||
134 | spin_lock_bh(&dep->lock); | ||
135 | dep->parent = NULL; | ||
136 | dep_next = dep->next; | ||
137 | if (dep_next && dep_next->chan == chan) | ||
138 | dep->next = NULL; /* ->next will be submitted */ | ||
139 | else | ||
140 | dep_next = NULL; /* submit current dep and terminate */ | ||
141 | spin_unlock_bh(&dep->lock); | ||
142 | |||
143 | dep->tx_submit(dep); | ||
144 | } | ||
145 | |||
146 | chan->device->device_issue_pending(chan); | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(async_tx_run_dependencies); | ||
149 | |||
150 | static void | ||
151 | free_dma_chan_ref(struct rcu_head *rcu) | ||
152 | { | ||
153 | struct dma_chan_ref *ref; | ||
154 | ref = container_of(rcu, struct dma_chan_ref, rcu); | ||
155 | kfree(ref); | ||
156 | } | ||
157 | |||
158 | static void | ||
159 | init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan) | ||
160 | { | ||
161 | INIT_LIST_HEAD(&ref->node); | ||
162 | INIT_RCU_HEAD(&ref->rcu); | ||
163 | ref->chan = chan; | ||
164 | atomic_set(&ref->count, 0); | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * get_chan_ref_by_cap - returns the nth channel of the given capability | ||
169 | * defaults to returning the channel with the desired capability and the | ||
170 | * lowest reference count if the index can not be satisfied | ||
171 | * @cap: capability to match | ||
172 | * @index: nth channel desired, passing -1 has the effect of forcing the | ||
173 | * default return value | ||
174 | */ | ||
175 | static struct dma_chan_ref * | ||
176 | get_chan_ref_by_cap(enum dma_transaction_type cap, int index) | ||
177 | { | ||
178 | struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref; | ||
179 | |||
180 | rcu_read_lock(); | ||
181 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
182 | if (dma_has_cap(cap, ref->chan->device->cap_mask)) { | ||
183 | if (!min_ref) | ||
184 | min_ref = ref; | ||
185 | else if (atomic_read(&ref->count) < | ||
186 | atomic_read(&min_ref->count)) | ||
187 | min_ref = ref; | ||
188 | |||
189 | if (index-- == 0) { | ||
190 | ret_ref = ref; | ||
191 | break; | ||
192 | } | ||
193 | } | ||
194 | rcu_read_unlock(); | ||
195 | |||
196 | if (!ret_ref) | ||
197 | ret_ref = min_ref; | ||
198 | |||
199 | if (ret_ref) | ||
200 | atomic_inc(&ret_ref->count); | ||
201 | |||
202 | return ret_ref; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * async_tx_rebalance - redistribute the available channels, optimize | ||
207 | * for cpu isolation in the SMP case, and opertaion isolation in the | ||
208 | * uniprocessor case | ||
209 | */ | ||
210 | static void async_tx_rebalance(void) | ||
211 | { | ||
212 | int cpu, cap, cpu_idx = 0; | ||
213 | unsigned long flags; | ||
214 | |||
215 | if (!channel_table_initialized) | ||
216 | return; | ||
217 | |||
218 | spin_lock_irqsave(&async_tx_lock, flags); | ||
219 | |||
220 | /* undo the last distribution */ | ||
221 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
222 | for_each_possible_cpu(cpu) { | ||
223 | struct dma_chan_ref *ref = | ||
224 | per_cpu_ptr(channel_table[cap], cpu)->ref; | ||
225 | if (ref) { | ||
226 | atomic_set(&ref->count, 0); | ||
227 | per_cpu_ptr(channel_table[cap], cpu)->ref = | ||
228 | NULL; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
233 | for_each_online_cpu(cpu) { | ||
234 | struct dma_chan_ref *new; | ||
235 | if (NR_CPUS > 1) | ||
236 | new = get_chan_ref_by_cap(cap, cpu_idx++); | ||
237 | else | ||
238 | new = get_chan_ref_by_cap(cap, -1); | ||
239 | |||
240 | per_cpu_ptr(channel_table[cap], cpu)->ref = new; | ||
241 | } | ||
242 | |||
243 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
244 | } | ||
245 | |||
246 | static enum dma_state_client | ||
247 | dma_channel_add_remove(struct dma_client *client, | ||
248 | struct dma_chan *chan, enum dma_state state) | ||
249 | { | ||
250 | unsigned long found, flags; | ||
251 | struct dma_chan_ref *master_ref, *ref; | ||
252 | enum dma_state_client ack = DMA_DUP; /* default: take no action */ | ||
253 | |||
254 | switch (state) { | ||
255 | case DMA_RESOURCE_AVAILABLE: | ||
256 | found = 0; | ||
257 | rcu_read_lock(); | ||
258 | list_for_each_entry_rcu(ref, &async_tx_master_list, node) | ||
259 | if (ref->chan == chan) { | ||
260 | found = 1; | ||
261 | break; | ||
262 | } | ||
263 | rcu_read_unlock(); | ||
264 | |||
265 | pr_debug("async_tx: dma resource available [%s]\n", | ||
266 | found ? "old" : "new"); | ||
267 | |||
268 | if (!found) | ||
269 | ack = DMA_ACK; | ||
270 | else | ||
271 | break; | ||
272 | |||
273 | /* add the channel to the generic management list */ | ||
274 | master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); | ||
275 | if (master_ref) { | ||
276 | /* keep a reference until async_tx is unloaded */ | ||
277 | dma_chan_get(chan); | ||
278 | init_dma_chan_ref(master_ref, chan); | ||
279 | spin_lock_irqsave(&async_tx_lock, flags); | ||
280 | list_add_tail_rcu(&master_ref->node, | ||
281 | &async_tx_master_list); | ||
282 | spin_unlock_irqrestore(&async_tx_lock, | ||
283 | flags); | ||
284 | } else { | ||
285 | printk(KERN_WARNING "async_tx: unable to create" | ||
286 | " new master entry in response to" | ||
287 | " a DMA_RESOURCE_ADDED event" | ||
288 | " (-ENOMEM)\n"); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | async_tx_rebalance(); | ||
293 | break; | ||
294 | case DMA_RESOURCE_REMOVED: | ||
295 | found = 0; | ||
296 | spin_lock_irqsave(&async_tx_lock, flags); | ||
297 | list_for_each_entry(ref, &async_tx_master_list, node) | ||
298 | if (ref->chan == chan) { | ||
299 | /* permit backing devices to go away */ | ||
300 | dma_chan_put(ref->chan); | ||
301 | list_del_rcu(&ref->node); | ||
302 | call_rcu(&ref->rcu, free_dma_chan_ref); | ||
303 | found = 1; | ||
304 | break; | ||
305 | } | ||
306 | spin_unlock_irqrestore(&async_tx_lock, flags); | ||
307 | |||
308 | pr_debug("async_tx: dma resource removed [%s]\n", | ||
309 | found ? "ours" : "not ours"); | ||
310 | |||
311 | if (found) | ||
312 | ack = DMA_ACK; | ||
313 | else | ||
314 | break; | ||
315 | |||
316 | async_tx_rebalance(); | ||
317 | break; | ||
318 | case DMA_RESOURCE_SUSPEND: | ||
319 | case DMA_RESOURCE_RESUME: | ||
320 | printk(KERN_WARNING "async_tx: does not support dma channel" | ||
321 | " suspend/resume\n"); | ||
322 | break; | ||
323 | default: | ||
324 | BUG(); | ||
325 | } | ||
326 | |||
327 | return ack; | ||
328 | } | ||
329 | |||
330 | static int __init | ||
331 | async_tx_init(void) | ||
332 | { | 32 | { |
333 | enum dma_transaction_type cap; | 33 | dmaengine_get(); |
334 | |||
335 | spin_lock_init(&async_tx_lock); | ||
336 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | ||
337 | |||
338 | /* an interrupt will never be an explicit operation type. | ||
339 | * clearing this bit prevents allocation to a slot in 'channel_table' | ||
340 | */ | ||
341 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | ||
342 | |||
343 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | ||
344 | channel_table[cap] = alloc_percpu(struct chan_ref_percpu); | ||
345 | if (!channel_table[cap]) | ||
346 | goto err; | ||
347 | } | ||
348 | |||
349 | channel_table_initialized = 1; | ||
350 | dma_async_client_register(&async_tx_dma); | ||
351 | dma_async_client_chan_request(&async_tx_dma); | ||
352 | 34 | ||
353 | printk(KERN_INFO "async_tx: api initialized (async)\n"); | 35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); |
354 | 36 | ||
355 | return 0; | 37 | return 0; |
356 | err: | ||
357 | printk(KERN_ERR "async_tx: initialization failure\n"); | ||
358 | |||
359 | while (--cap >= 0) | ||
360 | free_percpu(channel_table[cap]); | ||
361 | |||
362 | return 1; | ||
363 | } | 38 | } |
364 | 39 | ||
365 | static void __exit async_tx_exit(void) | 40 | static void __exit async_tx_exit(void) |
366 | { | 41 | { |
367 | enum dma_transaction_type cap; | 42 | dmaengine_put(); |
368 | |||
369 | channel_table_initialized = 0; | ||
370 | |||
371 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | ||
372 | if (channel_table[cap]) | ||
373 | free_percpu(channel_table[cap]); | ||
374 | |||
375 | dma_async_client_unregister(&async_tx_dma); | ||
376 | } | 43 | } |
377 | 44 | ||
378 | /** | 45 | /** |
@@ -387,16 +54,9 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | |||
387 | { | 54 | { |
388 | /* see if we can keep the chain on one channel */ | 55 | /* see if we can keep the chain on one channel */ |
389 | if (depend_tx && | 56 | if (depend_tx && |
390 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) | 57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
391 | return depend_tx->chan; | 58 | return depend_tx->chan; |
392 | else if (likely(channel_table_initialized)) { | 59 | return dma_find_channel(tx_type); |
393 | struct dma_chan_ref *ref; | ||
394 | int cpu = get_cpu(); | ||
395 | ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref; | ||
396 | put_cpu(); | ||
397 | return ref ? ref->chan : NULL; | ||
398 | } else | ||
399 | return NULL; | ||
400 | } | 60 | } |
401 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); | 61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
402 | #else | 62 | #else |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 40b6e9ec9e3a..5793b64c81a8 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -158,16 +158,19 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, | |||
158 | dstp = sg_page(dst); | 158 | dstp = sg_page(dst); |
159 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; | 159 | vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; |
160 | 160 | ||
161 | sg_init_table(cipher, 2); | 161 | if (ivsize) { |
162 | sg_set_buf(cipher, iv, ivsize); | 162 | sg_init_table(cipher, 2); |
163 | authenc_chain(cipher, dst, vdst == iv + ivsize); | 163 | sg_set_buf(cipher, iv, ivsize); |
164 | authenc_chain(cipher, dst, vdst == iv + ivsize); | ||
165 | dst = cipher; | ||
166 | } | ||
164 | 167 | ||
165 | cryptlen = req->cryptlen + ivsize; | 168 | cryptlen = req->cryptlen + ivsize; |
166 | hash = crypto_authenc_hash(req, flags, cipher, cryptlen); | 169 | hash = crypto_authenc_hash(req, flags, dst, cryptlen); |
167 | if (IS_ERR(hash)) | 170 | if (IS_ERR(hash)) |
168 | return PTR_ERR(hash); | 171 | return PTR_ERR(hash); |
169 | 172 | ||
170 | scatterwalk_map_and_copy(hash, cipher, cryptlen, | 173 | scatterwalk_map_and_copy(hash, dst, cryptlen, |
171 | crypto_aead_authsize(authenc), 1); | 174 | crypto_aead_authsize(authenc), 1); |
172 | return 0; | 175 | return 0; |
173 | } | 176 | } |
@@ -285,11 +288,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | |||
285 | srcp = sg_page(src); | 288 | srcp = sg_page(src); |
286 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; | 289 | vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset; |
287 | 290 | ||
288 | sg_init_table(cipher, 2); | 291 | if (ivsize) { |
289 | sg_set_buf(cipher, iv, ivsize); | 292 | sg_init_table(cipher, 2); |
290 | authenc_chain(cipher, src, vsrc == iv + ivsize); | 293 | sg_set_buf(cipher, iv, ivsize); |
294 | authenc_chain(cipher, src, vsrc == iv + ivsize); | ||
295 | src = cipher; | ||
296 | } | ||
291 | 297 | ||
292 | return crypto_authenc_verify(req, cipher, cryptlen + ivsize); | 298 | return crypto_authenc_verify(req, src, cryptlen + ivsize); |
293 | } | 299 | } |
294 | 300 | ||
295 | static int crypto_authenc_decrypt(struct aead_request *req) | 301 | static int crypto_authenc_decrypt(struct aead_request *req) |
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 4a7e65c4df4d..d70a41c002df 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c | |||
@@ -124,6 +124,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc, | |||
124 | scatterwalk_done(&walk->in, 0, nbytes); | 124 | scatterwalk_done(&walk->in, 0, nbytes); |
125 | scatterwalk_done(&walk->out, 1, nbytes); | 125 | scatterwalk_done(&walk->out, 1, nbytes); |
126 | 126 | ||
127 | err: | ||
127 | walk->total = nbytes; | 128 | walk->total = nbytes; |
128 | walk->nbytes = nbytes; | 129 | walk->nbytes = nbytes; |
129 | 130 | ||
@@ -132,7 +133,6 @@ int blkcipher_walk_done(struct blkcipher_desc *desc, | |||
132 | return blkcipher_walk_next(desc, walk); | 133 | return blkcipher_walk_next(desc, walk); |
133 | } | 134 | } |
134 | 135 | ||
135 | err: | ||
136 | if (walk->iv != desc->info) | 136 | if (walk->iv != desc->info) |
137 | memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); | 137 | memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); |
138 | if (walk->buffer != walk->page) | 138 | if (walk->buffer != walk->page) |
diff --git a/crypto/ccm.c b/crypto/ccm.c index 7cf7e5a6b781..c36d654cf56a 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c | |||
@@ -266,6 +266,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, | |||
266 | if (assoclen) { | 266 | if (assoclen) { |
267 | pctx->ilen = format_adata(idata, assoclen); | 267 | pctx->ilen = format_adata(idata, assoclen); |
268 | get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); | 268 | get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); |
269 | } else { | ||
270 | pctx->ilen = 0; | ||
269 | } | 271 | } |
270 | 272 | ||
271 | /* compute plaintext into mac */ | 273 | /* compute plaintext into mac */ |