diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2018-10-17 02:14:06 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-10-17 02:14:06 -0400 |
commit | 58c8a863550b19ab3559dd5f5e27cc698c03e53b (patch) | |
tree | a9df76b4b06e65edc07bd2f7add99451bf929d69 | |
parent | ce4e45842de3eb54b8dd6e081765d741f5b92b56 (diff) | |
parent | ba439a6cbfa2936a6713f64cb499de7943673fe3 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Merge crypto tree to pick up chelsio bug fix.
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 32 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_crypto.h | 2 | ||||
-rw-r--r-- | drivers/crypto/mxs-dcp.c | 53 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_c3xxx/adf_drv.c | 6 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_c3xxxvf/adf_drv.c | 6 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_c62x/adf_drv.c | 6 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_c62xvf/adf_drv.c | 6 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 6 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_dh895xccvf/adf_drv.c | 6 |
9 files changed, 72 insertions, 51 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index dfc3a10bb55b..d5e4816b2d3e 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c | |||
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk, | |||
367 | walk->to = (struct phys_sge_pairs *)(dsgl + 1); | 367 | walk->to = (struct phys_sge_pairs *)(dsgl + 1); |
368 | } | 368 | } |
369 | 369 | ||
370 | static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) | 370 | static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid, |
371 | int pci_chan_id) | ||
371 | { | 372 | { |
372 | struct cpl_rx_phys_dsgl *phys_cpl; | 373 | struct cpl_rx_phys_dsgl *phys_cpl; |
373 | 374 | ||
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) | |||
385 | phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; | 386 | phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; |
386 | phys_cpl->rss_hdr_int.qid = htons(qid); | 387 | phys_cpl->rss_hdr_int.qid = htons(qid); |
387 | phys_cpl->rss_hdr_int.hash_val = 0; | 388 | phys_cpl->rss_hdr_int.hash_val = 0; |
389 | phys_cpl->rss_hdr_int.channel = pci_chan_id; | ||
388 | } | 390 | } |
389 | 391 | ||
390 | static inline void dsgl_walk_add_page(struct dsgl_walk *walk, | 392 | static inline void dsgl_walk_add_page(struct dsgl_walk *walk, |
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx, | |||
718 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, | 720 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, |
719 | !!lcb, ctx->tx_qidx); | 721 | !!lcb, ctx->tx_qidx); |
720 | 722 | ||
721 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, | 723 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id, |
722 | qid); | 724 | qid); |
723 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - | 725 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - |
724 | ((sizeof(chcr_req->wreq)) >> 4))); | 726 | ((sizeof(chcr_req->wreq)) >> 4))); |
@@ -1340,16 +1342,23 @@ static int chcr_device_init(struct chcr_context *ctx) | |||
1340 | adap->vres.ncrypto_fc); | 1342 | adap->vres.ncrypto_fc); |
1341 | rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; | 1343 | rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; |
1342 | txq_perchan = ntxq / u_ctx->lldi.nchan; | 1344 | txq_perchan = ntxq / u_ctx->lldi.nchan; |
1343 | rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; | ||
1344 | rxq_idx += id % rxq_perchan; | ||
1345 | txq_idx = ctx->dev->tx_channel_id * txq_perchan; | ||
1346 | txq_idx += id % txq_perchan; | ||
1347 | spin_lock(&ctx->dev->lock_chcr_dev); | 1345 | spin_lock(&ctx->dev->lock_chcr_dev); |
1348 | ctx->rx_qidx = rxq_idx; | 1346 | ctx->tx_chan_id = ctx->dev->tx_channel_id; |
1349 | ctx->tx_qidx = txq_idx; | ||
1350 | ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; | 1347 | ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; |
1351 | ctx->dev->rx_channel_id = 0; | 1348 | ctx->dev->rx_channel_id = 0; |
1352 | spin_unlock(&ctx->dev->lock_chcr_dev); | 1349 | spin_unlock(&ctx->dev->lock_chcr_dev); |
1350 | rxq_idx = ctx->tx_chan_id * rxq_perchan; | ||
1351 | rxq_idx += id % rxq_perchan; | ||
1352 | txq_idx = ctx->tx_chan_id * txq_perchan; | ||
1353 | txq_idx += id % txq_perchan; | ||
1354 | ctx->rx_qidx = rxq_idx; | ||
1355 | ctx->tx_qidx = txq_idx; | ||
1356 | /* Channel Id used by SGE to forward packet to Host. | ||
1357 | * Same value should be used in cpl_fw6_pld RSS_CH field | ||
1358 | * by FW. Driver programs PCI channel ID to be used in fw | ||
1359 | * at the time of queue allocation with value "pi->tx_chan" | ||
1360 | */ | ||
1361 | ctx->pci_chan_id = txq_idx / txq_perchan; | ||
1353 | } | 1362 | } |
1354 | out: | 1363 | out: |
1355 | return err; | 1364 | return err; |
@@ -2504,6 +2513,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req, | |||
2504 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 2513 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2505 | struct dsgl_walk dsgl_walk; | 2514 | struct dsgl_walk dsgl_walk; |
2506 | unsigned int authsize = crypto_aead_authsize(tfm); | 2515 | unsigned int authsize = crypto_aead_authsize(tfm); |
2516 | struct chcr_context *ctx = a_ctx(tfm); | ||
2507 | u32 temp; | 2517 | u32 temp; |
2508 | 2518 | ||
2509 | dsgl_walk_init(&dsgl_walk, phys_cpl); | 2519 | dsgl_walk_init(&dsgl_walk, phys_cpl); |
@@ -2513,7 +2523,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req, | |||
2513 | dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); | 2523 | dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); |
2514 | temp = req->cryptlen + (reqctx->op ? -authsize : authsize); | 2524 | temp = req->cryptlen + (reqctx->op ? -authsize : authsize); |
2515 | dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); | 2525 | dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); |
2516 | dsgl_walk_end(&dsgl_walk, qid); | 2526 | dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id); |
2517 | } | 2527 | } |
2518 | 2528 | ||
2519 | void chcr_add_cipher_src_ent(struct ablkcipher_request *req, | 2529 | void chcr_add_cipher_src_ent(struct ablkcipher_request *req, |
@@ -2545,6 +2555,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, | |||
2545 | unsigned short qid) | 2555 | unsigned short qid) |
2546 | { | 2556 | { |
2547 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 2557 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
2558 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); | ||
2559 | struct chcr_context *ctx = c_ctx(tfm); | ||
2548 | struct dsgl_walk dsgl_walk; | 2560 | struct dsgl_walk dsgl_walk; |
2549 | 2561 | ||
2550 | dsgl_walk_init(&dsgl_walk, phys_cpl); | 2562 | dsgl_walk_init(&dsgl_walk, phys_cpl); |
@@ -2553,7 +2565,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, | |||
2553 | reqctx->dstsg = dsgl_walk.last_sg; | 2565 | reqctx->dstsg = dsgl_walk.last_sg; |
2554 | reqctx->dst_ofst = dsgl_walk.last_sg_len; | 2566 | reqctx->dst_ofst = dsgl_walk.last_sg_len; |
2555 | 2567 | ||
2556 | dsgl_walk_end(&dsgl_walk, qid); | 2568 | dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id); |
2557 | } | 2569 | } |
2558 | 2570 | ||
2559 | void chcr_add_hash_src_ent(struct ahash_request *req, | 2571 | void chcr_add_hash_src_ent(struct ahash_request *req, |
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index e26b72cfe4b6..d37ef41f9ebe 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h | |||
@@ -255,6 +255,8 @@ struct chcr_context { | |||
255 | struct chcr_dev *dev; | 255 | struct chcr_dev *dev; |
256 | unsigned char tx_qidx; | 256 | unsigned char tx_qidx; |
257 | unsigned char rx_qidx; | 257 | unsigned char rx_qidx; |
258 | unsigned char tx_chan_id; | ||
259 | unsigned char pci_chan_id; | ||
258 | struct __crypto_ctx crypto_ctx[0]; | 260 | struct __crypto_ctx crypto_ctx[0]; |
259 | }; | 261 | }; |
260 | 262 | ||
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 54f200eaa3cc..4e6ff32f8a7e 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c | |||
@@ -79,7 +79,7 @@ struct dcp { | |||
79 | struct dcp_coherent_block *coh; | 79 | struct dcp_coherent_block *coh; |
80 | 80 | ||
81 | struct completion completion[DCP_MAX_CHANS]; | 81 | struct completion completion[DCP_MAX_CHANS]; |
82 | struct mutex mutex[DCP_MAX_CHANS]; | 82 | spinlock_t lock[DCP_MAX_CHANS]; |
83 | struct task_struct *thread[DCP_MAX_CHANS]; | 83 | struct task_struct *thread[DCP_MAX_CHANS]; |
84 | struct crypto_queue queue[DCP_MAX_CHANS]; | 84 | struct crypto_queue queue[DCP_MAX_CHANS]; |
85 | }; | 85 | }; |
@@ -399,13 +399,20 @@ static int dcp_chan_thread_aes(void *data) | |||
399 | 399 | ||
400 | int ret; | 400 | int ret; |
401 | 401 | ||
402 | do { | 402 | while (!kthread_should_stop()) { |
403 | __set_current_state(TASK_INTERRUPTIBLE); | 403 | set_current_state(TASK_INTERRUPTIBLE); |
404 | 404 | ||
405 | mutex_lock(&sdcp->mutex[chan]); | 405 | spin_lock(&sdcp->lock[chan]); |
406 | backlog = crypto_get_backlog(&sdcp->queue[chan]); | 406 | backlog = crypto_get_backlog(&sdcp->queue[chan]); |
407 | arq = crypto_dequeue_request(&sdcp->queue[chan]); | 407 | arq = crypto_dequeue_request(&sdcp->queue[chan]); |
408 | mutex_unlock(&sdcp->mutex[chan]); | 408 | spin_unlock(&sdcp->lock[chan]); |
409 | |||
410 | if (!backlog && !arq) { | ||
411 | schedule(); | ||
412 | continue; | ||
413 | } | ||
414 | |||
415 | set_current_state(TASK_RUNNING); | ||
409 | 416 | ||
410 | if (backlog) | 417 | if (backlog) |
411 | backlog->complete(backlog, -EINPROGRESS); | 418 | backlog->complete(backlog, -EINPROGRESS); |
@@ -413,11 +420,8 @@ static int dcp_chan_thread_aes(void *data) | |||
413 | if (arq) { | 420 | if (arq) { |
414 | ret = mxs_dcp_aes_block_crypt(arq); | 421 | ret = mxs_dcp_aes_block_crypt(arq); |
415 | arq->complete(arq, ret); | 422 | arq->complete(arq, ret); |
416 | continue; | ||
417 | } | 423 | } |
418 | 424 | } | |
419 | schedule(); | ||
420 | } while (!kthread_should_stop()); | ||
421 | 425 | ||
422 | return 0; | 426 | return 0; |
423 | } | 427 | } |
@@ -459,9 +463,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) | |||
459 | rctx->ecb = ecb; | 463 | rctx->ecb = ecb; |
460 | actx->chan = DCP_CHAN_CRYPTO; | 464 | actx->chan = DCP_CHAN_CRYPTO; |
461 | 465 | ||
462 | mutex_lock(&sdcp->mutex[actx->chan]); | 466 | spin_lock(&sdcp->lock[actx->chan]); |
463 | ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); | 467 | ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); |
464 | mutex_unlock(&sdcp->mutex[actx->chan]); | 468 | spin_unlock(&sdcp->lock[actx->chan]); |
465 | 469 | ||
466 | wake_up_process(sdcp->thread[actx->chan]); | 470 | wake_up_process(sdcp->thread[actx->chan]); |
467 | 471 | ||
@@ -700,13 +704,20 @@ static int dcp_chan_thread_sha(void *data) | |||
700 | struct ahash_request *req; | 704 | struct ahash_request *req; |
701 | int ret, fini; | 705 | int ret, fini; |
702 | 706 | ||
703 | do { | 707 | while (!kthread_should_stop()) { |
704 | __set_current_state(TASK_INTERRUPTIBLE); | 708 | set_current_state(TASK_INTERRUPTIBLE); |
705 | 709 | ||
706 | mutex_lock(&sdcp->mutex[chan]); | 710 | spin_lock(&sdcp->lock[chan]); |
707 | backlog = crypto_get_backlog(&sdcp->queue[chan]); | 711 | backlog = crypto_get_backlog(&sdcp->queue[chan]); |
708 | arq = crypto_dequeue_request(&sdcp->queue[chan]); | 712 | arq = crypto_dequeue_request(&sdcp->queue[chan]); |
709 | mutex_unlock(&sdcp->mutex[chan]); | 713 | spin_unlock(&sdcp->lock[chan]); |
714 | |||
715 | if (!backlog && !arq) { | ||
716 | schedule(); | ||
717 | continue; | ||
718 | } | ||
719 | |||
720 | set_current_state(TASK_RUNNING); | ||
710 | 721 | ||
711 | if (backlog) | 722 | if (backlog) |
712 | backlog->complete(backlog, -EINPROGRESS); | 723 | backlog->complete(backlog, -EINPROGRESS); |
@@ -718,12 +729,8 @@ static int dcp_chan_thread_sha(void *data) | |||
718 | ret = dcp_sha_req_to_buf(arq); | 729 | ret = dcp_sha_req_to_buf(arq); |
719 | fini = rctx->fini; | 730 | fini = rctx->fini; |
720 | arq->complete(arq, ret); | 731 | arq->complete(arq, ret); |
721 | if (!fini) | ||
722 | continue; | ||
723 | } | 732 | } |
724 | 733 | } | |
725 | schedule(); | ||
726 | } while (!kthread_should_stop()); | ||
727 | 734 | ||
728 | return 0; | 735 | return 0; |
729 | } | 736 | } |
@@ -781,9 +788,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini) | |||
781 | rctx->init = 1; | 788 | rctx->init = 1; |
782 | } | 789 | } |
783 | 790 | ||
784 | mutex_lock(&sdcp->mutex[actx->chan]); | 791 | spin_lock(&sdcp->lock[actx->chan]); |
785 | ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); | 792 | ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); |
786 | mutex_unlock(&sdcp->mutex[actx->chan]); | 793 | spin_unlock(&sdcp->lock[actx->chan]); |
787 | 794 | ||
788 | wake_up_process(sdcp->thread[actx->chan]); | 795 | wake_up_process(sdcp->thread[actx->chan]); |
789 | mutex_unlock(&actx->mutex); | 796 | mutex_unlock(&actx->mutex); |
@@ -1077,7 +1084,7 @@ static int mxs_dcp_probe(struct platform_device *pdev) | |||
1077 | platform_set_drvdata(pdev, sdcp); | 1084 | platform_set_drvdata(pdev, sdcp); |
1078 | 1085 | ||
1079 | for (i = 0; i < DCP_MAX_CHANS; i++) { | 1086 | for (i = 0; i < DCP_MAX_CHANS; i++) { |
1080 | mutex_init(&sdcp->mutex[i]); | 1087 | spin_lock_init(&sdcp->lock[i]); |
1081 | init_completion(&sdcp->completion[i]); | 1088 | init_completion(&sdcp->completion[i]); |
1082 | crypto_init_queue(&sdcp->queue[i], 50); | 1089 | crypto_init_queue(&sdcp->queue[i], 50); |
1083 | } | 1090 | } |
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index ba197f34c252..763c2166ee0e 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c | |||
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
123 | struct adf_hw_device_data *hw_data; | 123 | struct adf_hw_device_data *hw_data; |
124 | char name[ADF_DEVICE_NAME_LENGTH]; | 124 | char name[ADF_DEVICE_NAME_LENGTH]; |
125 | unsigned int i, bar_nr; | 125 | unsigned int i, bar_nr; |
126 | int ret, bar_mask; | 126 | unsigned long bar_mask; |
127 | int ret; | ||
127 | 128 | ||
128 | switch (ent->device) { | 129 | switch (ent->device) { |
129 | case ADF_C3XXX_PCI_DEVICE_ID: | 130 | case ADF_C3XXX_PCI_DEVICE_ID: |
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
235 | /* Find and map all the device's BARS */ | 236 | /* Find and map all the device's BARS */ |
236 | i = 0; | 237 | i = 0; |
237 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | 238 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); |
238 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | 239 | for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { |
239 | ADF_PCI_MAX_BARS * 2) { | ||
240 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | 240 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; |
241 | 241 | ||
242 | bar->base_addr = pci_resource_start(pdev, bar_nr); | 242 | bar->base_addr = pci_resource_start(pdev, bar_nr); |
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 24ec908eb26c..613c7d5644ce 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c | |||
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
125 | struct adf_hw_device_data *hw_data; | 125 | struct adf_hw_device_data *hw_data; |
126 | char name[ADF_DEVICE_NAME_LENGTH]; | 126 | char name[ADF_DEVICE_NAME_LENGTH]; |
127 | unsigned int i, bar_nr; | 127 | unsigned int i, bar_nr; |
128 | int ret, bar_mask; | 128 | unsigned long bar_mask; |
129 | int ret; | ||
129 | 130 | ||
130 | switch (ent->device) { | 131 | switch (ent->device) { |
131 | case ADF_C3XXXIOV_PCI_DEVICE_ID: | 132 | case ADF_C3XXXIOV_PCI_DEVICE_ID: |
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
215 | /* Find and map all the device's BARS */ | 216 | /* Find and map all the device's BARS */ |
216 | i = 0; | 217 | i = 0; |
217 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | 218 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); |
218 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | 219 | for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { |
219 | ADF_PCI_MAX_BARS * 2) { | ||
220 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | 220 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; |
221 | 221 | ||
222 | bar->base_addr = pci_resource_start(pdev, bar_nr); | 222 | bar->base_addr = pci_resource_start(pdev, bar_nr); |
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 59a5a0df50b6..9cb832963357 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c | |||
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
123 | struct adf_hw_device_data *hw_data; | 123 | struct adf_hw_device_data *hw_data; |
124 | char name[ADF_DEVICE_NAME_LENGTH]; | 124 | char name[ADF_DEVICE_NAME_LENGTH]; |
125 | unsigned int i, bar_nr; | 125 | unsigned int i, bar_nr; |
126 | int ret, bar_mask; | 126 | unsigned long bar_mask; |
127 | int ret; | ||
127 | 128 | ||
128 | switch (ent->device) { | 129 | switch (ent->device) { |
129 | case ADF_C62X_PCI_DEVICE_ID: | 130 | case ADF_C62X_PCI_DEVICE_ID: |
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
235 | /* Find and map all the device's BARS */ | 236 | /* Find and map all the device's BARS */ |
236 | i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; | 237 | i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; |
237 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | 238 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); |
238 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | 239 | for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { |
239 | ADF_PCI_MAX_BARS * 2) { | ||
240 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | 240 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; |
241 | 241 | ||
242 | bar->base_addr = pci_resource_start(pdev, bar_nr); | 242 | bar->base_addr = pci_resource_start(pdev, bar_nr); |
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index b9f3e0e4fde9..278452b8ef81 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c | |||
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
125 | struct adf_hw_device_data *hw_data; | 125 | struct adf_hw_device_data *hw_data; |
126 | char name[ADF_DEVICE_NAME_LENGTH]; | 126 | char name[ADF_DEVICE_NAME_LENGTH]; |
127 | unsigned int i, bar_nr; | 127 | unsigned int i, bar_nr; |
128 | int ret, bar_mask; | 128 | unsigned long bar_mask; |
129 | int ret; | ||
129 | 130 | ||
130 | switch (ent->device) { | 131 | switch (ent->device) { |
131 | case ADF_C62XIOV_PCI_DEVICE_ID: | 132 | case ADF_C62XIOV_PCI_DEVICE_ID: |
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
215 | /* Find and map all the device's BARS */ | 216 | /* Find and map all the device's BARS */ |
216 | i = 0; | 217 | i = 0; |
217 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | 218 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); |
218 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | 219 | for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { |
219 | ADF_PCI_MAX_BARS * 2) { | ||
220 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | 220 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; |
221 | 221 | ||
222 | bar->base_addr = pci_resource_start(pdev, bar_nr); | 222 | bar->base_addr = pci_resource_start(pdev, bar_nr); |
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index be5c5a988ca5..3a9708ef4ce2 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c | |||
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
123 | struct adf_hw_device_data *hw_data; | 123 | struct adf_hw_device_data *hw_data; |
124 | char name[ADF_DEVICE_NAME_LENGTH]; | 124 | char name[ADF_DEVICE_NAME_LENGTH]; |
125 | unsigned int i, bar_nr; | 125 | unsigned int i, bar_nr; |
126 | int ret, bar_mask; | 126 | unsigned long bar_mask; |
127 | int ret; | ||
127 | 128 | ||
128 | switch (ent->device) { | 129 | switch (ent->device) { |
129 | case ADF_DH895XCC_PCI_DEVICE_ID: | 130 | case ADF_DH895XCC_PCI_DEVICE_ID: |
@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
237 | /* Find and map all the device's BARS */ | 238 | /* Find and map all the device's BARS */ |
238 | i = 0; | 239 | i = 0; |
239 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | 240 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); |
240 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | 241 | for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { |
241 | ADF_PCI_MAX_BARS * 2) { | ||
242 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | 242 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; |
243 | 243 | ||
244 | bar->base_addr = pci_resource_start(pdev, bar_nr); | 244 | bar->base_addr = pci_resource_start(pdev, bar_nr); |
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 26ab17bfc6da..3da0f951cb59 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c | |||
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
125 | struct adf_hw_device_data *hw_data; | 125 | struct adf_hw_device_data *hw_data; |
126 | char name[ADF_DEVICE_NAME_LENGTH]; | 126 | char name[ADF_DEVICE_NAME_LENGTH]; |
127 | unsigned int i, bar_nr; | 127 | unsigned int i, bar_nr; |
128 | int ret, bar_mask; | 128 | unsigned long bar_mask; |
129 | int ret; | ||
129 | 130 | ||
130 | switch (ent->device) { | 131 | switch (ent->device) { |
131 | case ADF_DH895XCCIOV_PCI_DEVICE_ID: | 132 | case ADF_DH895XCCIOV_PCI_DEVICE_ID: |
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
215 | /* Find and map all the device's BARS */ | 216 | /* Find and map all the device's BARS */ |
216 | i = 0; | 217 | i = 0; |
217 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); | 218 | bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); |
218 | for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, | 219 | for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { |
219 | ADF_PCI_MAX_BARS * 2) { | ||
220 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; | 220 | struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; |
221 | 221 | ||
222 | bar->base_addr = pci_resource_start(pdev, bar_nr); | 222 | bar->base_addr = pci_resource_start(pdev, bar_nr); |