diff options
author | Tadeusz Struk <tadeusz.struk@intel.com> | 2014-07-25 18:55:46 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2014-08-01 10:36:06 -0400 |
commit | 9a147cb3232fd8dbd44ed4628c6c0d05033d4c61 (patch) | |
tree | 56b4b76ad144e0960b04d07402256afbf78d9819 /drivers/crypto | |
parent | 8c1f8e3bbf60d0d06190be81f55d5199d52a463f (diff) |
crypto: qat - change ae_num to ae_id
Change the logic how acceleration engines are indexed to make it
easier to read. Aslo some return code values updates to better reflect
what failed.
Signed-off-by: Pingchao Yang <pingchao.yang@intel.com>
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_hal.c | 26 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_uclo.c | 67 |
2 files changed, 45 insertions, 48 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 28da876ee268..da9626b6b6b4 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c | |||
@@ -424,7 +424,7 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) | |||
424 | SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl & | 424 | SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl & |
425 | (~MC_TIMESTAMP_ENABLE)); | 425 | (~MC_TIMESTAMP_ENABLE)); |
426 | 426 | ||
427 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 427 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
428 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | 428 | if (!(handle->hal_handle->ae_mask & (1 << ae))) |
429 | continue; | 429 | continue; |
430 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); | 430 | qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); |
@@ -492,7 +492,7 @@ int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) | |||
492 | goto out_err; | 492 | goto out_err; |
493 | 493 | ||
494 | /* Set undefined power-up/reset states to reasonable default values */ | 494 | /* Set undefined power-up/reset states to reasonable default values */ |
495 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 495 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
496 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | 496 | if (!(handle->hal_handle->ae_mask & (1 << ae))) |
497 | continue; | 497 | continue; |
498 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, | 498 | qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, |
@@ -608,7 +608,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | |||
608 | unsigned int savctx = 0; | 608 | unsigned int savctx = 0; |
609 | int ret = 0; | 609 | int ret = 0; |
610 | 610 | ||
611 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 611 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
612 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | 612 | if (!(handle->hal_handle->ae_mask & (1 << ae))) |
613 | continue; | 613 | continue; |
614 | for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { | 614 | for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { |
@@ -637,7 +637,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | |||
637 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); | 637 | qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0); |
638 | qat_hal_enable_ctx(handle, ae, ctx_mask); | 638 | qat_hal_enable_ctx(handle, ae, ctx_mask); |
639 | } | 639 | } |
640 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 640 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
641 | if (!(handle->hal_handle->ae_mask & (1 << ae))) | 641 | if (!(handle->hal_handle->ae_mask & (1 << ae))) |
642 | continue; | 642 | continue; |
643 | /* wait for AE to finish */ | 643 | /* wait for AE to finish */ |
@@ -674,17 +674,16 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) | |||
674 | #define ICP_DH895XCC_PMISC_BAR 1 | 674 | #define ICP_DH895XCC_PMISC_BAR 1 |
675 | int qat_hal_init(struct adf_accel_dev *accel_dev) | 675 | int qat_hal_init(struct adf_accel_dev *accel_dev) |
676 | { | 676 | { |
677 | unsigned char ae = 0; | 677 | unsigned char ae; |
678 | unsigned int csr_val = 0; | 678 | unsigned int max_en_ae_id = 0; |
679 | unsigned int max_en_ae_num = 0; | 679 | struct icp_qat_fw_loader_handle *handle; |
680 | struct icp_qat_fw_loader_handle *handle = NULL; | ||
681 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; | 680 | struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; |
682 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; | 681 | struct adf_hw_device_data *hw_data = accel_dev->hw_device; |
683 | struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR]; | 682 | struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR]; |
684 | 683 | ||
685 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); | 684 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
686 | if (!handle) | 685 | if (!handle) |
687 | goto out_handle; | 686 | return -ENOMEM; |
688 | 687 | ||
689 | handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + | 688 | handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr + |
690 | ICP_DH895XCC_CAP_OFFSET; | 689 | ICP_DH895XCC_CAP_OFFSET; |
@@ -713,9 +712,9 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) | |||
713 | handle->hal_handle->max_ustore; | 712 | handle->hal_handle->max_ustore; |
714 | handle->hal_handle->aes[ae].live_ctx_mask = | 713 | handle->hal_handle->aes[ae].live_ctx_mask = |
715 | ICP_QAT_UCLO_AE_ALL_CTX; | 714 | ICP_QAT_UCLO_AE_ALL_CTX; |
716 | max_en_ae_num = ae; | 715 | max_en_ae_id = ae; |
717 | } | 716 | } |
718 | handle->hal_handle->ae_max_num = max_en_ae_num; | 717 | handle->hal_handle->ae_max_num = max_en_ae_id + 1; |
719 | /* take all AEs out of reset */ | 718 | /* take all AEs out of reset */ |
720 | if (qat_hal_clr_reset(handle)) { | 719 | if (qat_hal_clr_reset(handle)) { |
721 | pr_err("QAT: qat_hal_clr_reset error\n"); | 720 | pr_err("QAT: qat_hal_clr_reset error\n"); |
@@ -724,7 +723,9 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) | |||
724 | if (qat_hal_clear_gpr(handle)) | 723 | if (qat_hal_clear_gpr(handle)) |
725 | goto out_err; | 724 | goto out_err; |
726 | /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ | 725 | /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ |
727 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 726 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
727 | unsigned int csr_val = 0; | ||
728 | |||
728 | if (!(hw_data->ae_mask & (1 << ae))) | 729 | if (!(hw_data->ae_mask & (1 << ae))) |
729 | continue; | 730 | continue; |
730 | qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); | 731 | qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); |
@@ -738,7 +739,6 @@ out_err: | |||
738 | kfree(handle->hal_handle); | 739 | kfree(handle->hal_handle); |
739 | out_hal_handle: | 740 | out_hal_handle: |
740 | kfree(handle); | 741 | kfree(handle); |
741 | out_handle: | ||
742 | return -EFAULT; | 742 | return -EFAULT; |
743 | } | 743 | } |
744 | 744 | ||
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 557fa606da89..ebd5da03dc71 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c | |||
@@ -214,11 +214,10 @@ qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, | |||
214 | 214 | ||
215 | static int qat_uclo_parse_num(char *str, unsigned int *num) | 215 | static int qat_uclo_parse_num(char *str, unsigned int *num) |
216 | { | 216 | { |
217 | char buf[16]; | 217 | char buf[16] = {0}; |
218 | unsigned long ae = 0; | 218 | unsigned long ae = 0; |
219 | int i; | 219 | int i; |
220 | 220 | ||
221 | memset(buf, '\0', 16); | ||
222 | strncpy(buf, str, 15); | 221 | strncpy(buf, str, 15); |
223 | for (i = 0; i < 16; i++) { | 222 | for (i = 0; i < 16; i++) { |
224 | if (!isdigit(buf[i])) { | 223 | if (!isdigit(buf[i])) { |
@@ -418,13 +417,13 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, | |||
418 | fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), | 417 | fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), |
419 | GFP_KERNEL); | 418 | GFP_KERNEL); |
420 | if (!fill_data) | 419 | if (!fill_data) |
421 | return -EFAULT; | 420 | return -ENOMEM; |
422 | for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) | 421 | for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) |
423 | memcpy(&fill_data[i], &uof_image->fill_pattern, | 422 | memcpy(&fill_data[i], &uof_image->fill_pattern, |
424 | sizeof(uint64_t)); | 423 | sizeof(uint64_t)); |
425 | page = image->page; | 424 | page = image->page; |
426 | 425 | ||
427 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 426 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
428 | if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) | 427 | if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) |
429 | continue; | 428 | continue; |
430 | ustore_size = obj_handle->ae_data[ae].eff_ustore_size; | 429 | ustore_size = obj_handle->ae_data[ae].eff_ustore_size; |
@@ -442,11 +441,9 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, | |||
442 | 441 | ||
443 | static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) | 442 | static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) |
444 | { | 443 | { |
445 | unsigned int i; | 444 | int i, ae; |
446 | int status = 0; | ||
447 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | 445 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; |
448 | struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; | 446 | struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; |
449 | int ae; | ||
450 | 447 | ||
451 | for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { | 448 | for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { |
452 | if (initmem->num_in_bytes) { | 449 | if (initmem->num_in_bytes) { |
@@ -473,7 +470,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) | |||
473 | &obj_handle-> | 470 | &obj_handle-> |
474 | umem_init_tab[ae]); | 471 | umem_init_tab[ae]); |
475 | } | 472 | } |
476 | return status; | 473 | return 0; |
477 | } | 474 | } |
478 | 475 | ||
479 | static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, | 476 | static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, |
@@ -526,7 +523,7 @@ qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, | |||
526 | { | 523 | { |
527 | struct icp_qat_uof_filechunkhdr *file_chunk; | 524 | struct icp_qat_uof_filechunkhdr *file_chunk; |
528 | struct icp_qat_uclo_objhdr *obj_hdr; | 525 | struct icp_qat_uclo_objhdr *obj_hdr; |
529 | void *chunk; | 526 | char *chunk; |
530 | int i; | 527 | int i; |
531 | 528 | ||
532 | file_chunk = (struct icp_qat_uof_filechunkhdr *) | 529 | file_chunk = (struct icp_qat_uof_filechunkhdr *) |
@@ -536,7 +533,7 @@ qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, | |||
536 | ICP_QAT_UOF_OBJID_LEN)) { | 533 | ICP_QAT_UOF_OBJID_LEN)) { |
537 | chunk = buf + file_chunk->offset; | 534 | chunk = buf + file_chunk->offset; |
538 | if (file_chunk->checksum != qat_uclo_calc_str_checksum( | 535 | if (file_chunk->checksum != qat_uclo_calc_str_checksum( |
539 | (char *)chunk, file_chunk->size)) | 536 | chunk, file_chunk->size)) |
540 | break; | 537 | break; |
541 | obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); | 538 | obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); |
542 | if (!obj_hdr) | 539 | if (!obj_hdr) |
@@ -595,7 +592,7 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, | |||
595 | return 0; | 592 | return 0; |
596 | } | 593 | } |
597 | 594 | ||
598 | static void qat_uclo_map_image_pages(struct icp_qat_uof_encap_obj | 595 | static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj |
599 | *encap_uof_obj, | 596 | *encap_uof_obj, |
600 | struct icp_qat_uof_image *img, | 597 | struct icp_qat_uof_image *img, |
601 | struct icp_qat_uclo_encap_page *page) | 598 | struct icp_qat_uclo_encap_page *page) |
@@ -631,7 +628,7 @@ static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, | |||
631 | struct icp_qat_uclo_encapme *ae_uimage, | 628 | struct icp_qat_uclo_encapme *ae_uimage, |
632 | int max_image) | 629 | int max_image) |
633 | { | 630 | { |
634 | int a = 0, i; | 631 | int i, j; |
635 | struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; | 632 | struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; |
636 | struct icp_qat_uof_image *image; | 633 | struct icp_qat_uof_image *image; |
637 | struct icp_qat_uof_objtable *ae_regtab; | 634 | struct icp_qat_uof_objtable *ae_regtab; |
@@ -640,7 +637,7 @@ static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, | |||
640 | struct icp_qat_uof_encap_obj *encap_uof_obj = | 637 | struct icp_qat_uof_encap_obj *encap_uof_obj = |
641 | &obj_handle->encap_uof_obj; | 638 | &obj_handle->encap_uof_obj; |
642 | 639 | ||
643 | for (a = 0; a < max_image; a++) { | 640 | for (j = 0; j < max_image; j++) { |
644 | chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, | 641 | chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, |
645 | ICP_QAT_UOF_IMAG, chunk_hdr); | 642 | ICP_QAT_UOF_IMAG, chunk_hdr); |
646 | if (!chunk_hdr) | 643 | if (!chunk_hdr) |
@@ -650,37 +647,37 @@ static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, | |||
650 | ae_regtab = (struct icp_qat_uof_objtable *) | 647 | ae_regtab = (struct icp_qat_uof_objtable *) |
651 | (image->reg_tab_offset + | 648 | (image->reg_tab_offset + |
652 | obj_handle->obj_hdr->file_buff); | 649 | obj_handle->obj_hdr->file_buff); |
653 | ae_uimage[a].ae_reg_num = ae_regtab->entry_num; | 650 | ae_uimage[j].ae_reg_num = ae_regtab->entry_num; |
654 | ae_uimage[a].ae_reg = (struct icp_qat_uof_ae_reg *) | 651 | ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *) |
655 | (((char *)ae_regtab) + | 652 | (((char *)ae_regtab) + |
656 | sizeof(struct icp_qat_uof_objtable)); | 653 | sizeof(struct icp_qat_uof_objtable)); |
657 | init_reg_sym_tab = (struct icp_qat_uof_objtable *) | 654 | init_reg_sym_tab = (struct icp_qat_uof_objtable *) |
658 | (image->init_reg_sym_tab + | 655 | (image->init_reg_sym_tab + |
659 | obj_handle->obj_hdr->file_buff); | 656 | obj_handle->obj_hdr->file_buff); |
660 | ae_uimage[a].init_regsym_num = init_reg_sym_tab->entry_num; | 657 | ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; |
661 | ae_uimage[a].init_regsym = (struct icp_qat_uof_init_regsym *) | 658 | ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *) |
662 | (((char *)init_reg_sym_tab) + | 659 | (((char *)init_reg_sym_tab) + |
663 | sizeof(struct icp_qat_uof_objtable)); | 660 | sizeof(struct icp_qat_uof_objtable)); |
664 | sbreak_tab = (struct icp_qat_uof_objtable *) | 661 | sbreak_tab = (struct icp_qat_uof_objtable *) |
665 | (image->sbreak_tab + obj_handle->obj_hdr->file_buff); | 662 | (image->sbreak_tab + obj_handle->obj_hdr->file_buff); |
666 | ae_uimage[a].sbreak_num = sbreak_tab->entry_num; | 663 | ae_uimage[j].sbreak_num = sbreak_tab->entry_num; |
667 | ae_uimage[a].sbreak = (struct icp_qat_uof_sbreak *) | 664 | ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *) |
668 | (((char *)sbreak_tab) + | 665 | (((char *)sbreak_tab) + |
669 | sizeof(struct icp_qat_uof_objtable)); | 666 | sizeof(struct icp_qat_uof_objtable)); |
670 | ae_uimage[a].img_ptr = image; | 667 | ae_uimage[j].img_ptr = image; |
671 | if (qat_uclo_check_image_compat(encap_uof_obj, image)) | 668 | if (qat_uclo_check_image_compat(encap_uof_obj, image)) |
672 | goto out_err; | 669 | goto out_err; |
673 | ae_uimage[a].page = | 670 | ae_uimage[j].page = |
674 | kzalloc(sizeof(struct icp_qat_uclo_encap_page), | 671 | kzalloc(sizeof(struct icp_qat_uclo_encap_page), |
675 | GFP_KERNEL); | 672 | GFP_KERNEL); |
676 | if (!ae_uimage[a].page) | 673 | if (!ae_uimage[j].page) |
677 | goto out_err; | 674 | goto out_err; |
678 | qat_uclo_map_image_pages(encap_uof_obj, image, | 675 | qat_uclo_map_image_page(encap_uof_obj, image, |
679 | ae_uimage[a].page); | 676 | ae_uimage[j].page); |
680 | } | 677 | } |
681 | return a; | 678 | return j; |
682 | out_err: | 679 | out_err: |
683 | for (i = 0; i < a; i++) | 680 | for (i = 0; i < j; i++) |
684 | kfree(ae_uimage[i].page); | 681 | kfree(ae_uimage[i].page); |
685 | return 0; | 682 | return 0; |
686 | } | 683 | } |
@@ -875,7 +872,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) | |||
875 | return -EINVAL; | 872 | return -EINVAL; |
876 | } | 873 | } |
877 | } | 874 | } |
878 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 875 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
879 | for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { | 876 | for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { |
880 | if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) | 877 | if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) |
881 | continue; | 878 | continue; |
@@ -896,7 +893,7 @@ static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) | |||
896 | struct icp_qat_uclo_aedata *ae_data; | 893 | struct icp_qat_uclo_aedata *ae_data; |
897 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | 894 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; |
898 | 895 | ||
899 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 896 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
900 | if (!test_bit(ae, | 897 | if (!test_bit(ae, |
901 | (unsigned long *)&handle->hal_handle->ae_mask)) | 898 | (unsigned long *)&handle->hal_handle->ae_mask)) |
902 | continue; | 899 | continue; |
@@ -1041,7 +1038,7 @@ out_objbuf_err: | |||
1041 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) | 1038 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) |
1042 | { | 1039 | { |
1043 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | 1040 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; |
1044 | int a; | 1041 | unsigned int a; |
1045 | 1042 | ||
1046 | if (!obj_handle) | 1043 | if (!obj_handle) |
1047 | return; | 1044 | return; |
@@ -1050,7 +1047,7 @@ void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) | |||
1050 | for (a = 0; a < obj_handle->uimage_num; a++) | 1047 | for (a = 0; a < obj_handle->uimage_num; a++) |
1051 | kfree(obj_handle->ae_uimage[a].page); | 1048 | kfree(obj_handle->ae_uimage[a].page); |
1052 | 1049 | ||
1053 | for (a = 0; a <= (int)handle->hal_handle->ae_max_num; a++) | 1050 | for (a = 0; a < handle->hal_handle->ae_max_num; a++) |
1054 | qat_uclo_free_ae_data(&obj_handle->ae_data[a]); | 1051 | qat_uclo_free_ae_data(&obj_handle->ae_data[a]); |
1055 | 1052 | ||
1056 | kfree(obj_handle->obj_hdr); | 1053 | kfree(obj_handle->obj_hdr); |
@@ -1127,8 +1124,8 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, | |||
1127 | } | 1124 | } |
1128 | } | 1125 | } |
1129 | 1126 | ||
1130 | static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, | 1127 | static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, |
1131 | struct icp_qat_uof_image *image) | 1128 | struct icp_qat_uof_image *image) |
1132 | { | 1129 | { |
1133 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | 1130 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; |
1134 | unsigned int ctx_mask, s; | 1131 | unsigned int ctx_mask, s; |
@@ -1142,7 +1139,7 @@ static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle, | |||
1142 | ctx_mask = 0x55; | 1139 | ctx_mask = 0x55; |
1143 | /* load the default page and set assigned CTX PC | 1140 | /* load the default page and set assigned CTX PC |
1144 | * to the entrypoint address */ | 1141 | * to the entrypoint address */ |
1145 | for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { | 1142 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { |
1146 | if (!test_bit(ae, (unsigned long *)&image->ae_assigned)) | 1143 | if (!test_bit(ae, (unsigned long *)&image->ae_assigned)) |
1147 | continue; | 1144 | continue; |
1148 | /* find the slice to which this image is assigned */ | 1145 | /* find the slice to which this image is assigned */ |
@@ -1181,8 +1178,8 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) | |||
1181 | return -EINVAL; | 1178 | return -EINVAL; |
1182 | if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) | 1179 | if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) |
1183 | return -EINVAL; | 1180 | return -EINVAL; |
1184 | qat_uclo_wr_uimage_pages(handle, | 1181 | qat_uclo_wr_uimage_page(handle, |
1185 | obj_handle->ae_uimage[i].img_ptr); | 1182 | obj_handle->ae_uimage[i].img_ptr); |
1186 | } | 1183 | } |
1187 | return 0; | 1184 | return 0; |
1188 | } | 1185 | } |