aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat
diff options
context:
space:
mode:
authorTadeusz Struk <tadeusz.struk@intel.com>2014-07-25 18:55:26 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2014-08-01 10:36:03 -0400
commit45cff2608007ab28047cadb33e85b58c40b447ce (patch)
treebdd57b4ab4e331fbe6fede001e53ab6a736d317c /drivers/crypto/qat
parenta7d217617b8528fcf92274be03f20ff2f5ec3dc4 (diff)
crypto: qat - remove unnecessary parentheses
Resolve new strict checkpatch hits CHECK:UNNECESSARY_PARENTHESES: Unnecessary parentheses around ... Signed-off-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/qat')
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h6
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c54
3 files changed, 31 insertions, 31 deletions
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index cc7ec4071929..5031f8c10d75 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -120,14 +120,14 @@ struct icp_qat_hw_auth_config {
120#define QAT_AUTH_ALGO_SHA3_BITPOS 22 120#define QAT_AUTH_ALGO_SHA3_BITPOS 22
121#define QAT_AUTH_ALGO_SHA3_MASK 0x3 121#define QAT_AUTH_ALGO_SHA3_MASK 0x3
122#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ 122#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
123 ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ 123 (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
124 (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ 124 ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
125 (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ 125 (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
126 QAT_AUTH_ALGO_SHA3_BITPOS) | \ 126 QAT_AUTH_ALGO_SHA3_BITPOS) | \
127 (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ 127 (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
128 (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ 128 (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
129 & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ 129 & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
130 (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) 130 ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
131 131
132struct icp_qat_hw_auth_counter { 132struct icp_qat_hw_auth_counter {
133 __be32 counter; 133 __be32 counter;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 946686f83660..59df48872955 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -759,7 +759,7 @@ void qat_alg_callback(void *resp)
759 qat_alg_free_bufl(inst, qat_req); 759 qat_alg_free_bufl(inst, qat_req);
760 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) 760 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
761 res = -EBADMSG; 761 res = -EBADMSG;
762 areq->base.complete(&(areq->base), res); 762 areq->base.complete(&areq->base, res);
763} 763}
764 764
765static int qat_alg_dec(struct aead_request *areq) 765static int qat_alg_dec(struct aead_request *areq)
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index f22b48889d33..17a9954f831e 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -79,11 +79,11 @@ static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
79 } else { 79 } else {
80 ae_slice->ctx_mask_assigned = 0; 80 ae_slice->ctx_mask_assigned = 0;
81 } 81 }
82 ae_slice->regions = kzalloc(sizeof(*(ae_slice->regions)), GFP_KERNEL); 82 ae_slice->regions = kzalloc(sizeof(*ae_slice->regions), GFP_KERNEL);
83 if (!(ae_slice->regions)) 83 if (!ae_slice->regions)
84 return -ENOMEM; 84 return -ENOMEM;
85 ae_slice->page = kzalloc(sizeof(*(ae_slice->page)), GFP_KERNEL); 85 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86 if (!(ae_slice->page)) 86 if (!ae_slice->page)
87 goto out_err; 87 goto out_err;
88 page = ae_slice->page; 88 page = ae_slice->page;
89 page->encap_page = encap_image->page; 89 page->encap_page = encap_image->page;
@@ -248,7 +248,7 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
248 pr_err("QAT: Memory scope for init_mem error\n"); 248 pr_err("QAT: Memory scope for init_mem error\n");
249 return -EINVAL; 249 return -EINVAL;
250 } 250 }
251 str = qat_uclo_get_string(&(obj_handle->str_table), init_mem->sym_name); 251 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
252 if (!str) { 252 if (!str) {
253 pr_err("QAT: AE name assigned in uof init table is NULL\n"); 253 pr_err("QAT: AE name assigned in uof init table is NULL\n");
254 return -EINVAL; 254 return -EINVAL;
@@ -257,7 +257,7 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
257 pr_err("QAT: Parse num for AE number failed\n"); 257 pr_err("QAT: Parse num for AE number failed\n");
258 return -EINVAL; 258 return -EINVAL;
259 } 259 }
260 if (!test_bit(*ae, (unsigned long *)&(handle->hal_handle->ae_mask))) { 260 if (!test_bit(*ae, (unsigned long *)&handle->hal_handle->ae_mask)) {
261 pr_err("QAT: ae %d to be init is fused off\n", *ae); 261 pr_err("QAT: ae %d to be init is fused off\n", *ae);
262 return -EINVAL; 262 return -EINVAL;
263 } 263 }
@@ -332,7 +332,7 @@ static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
332 ICP_QAT_UCLO_MAX_LMEM_REG, &ae)) 332 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
333 return -EINVAL; 333 return -EINVAL;
334 if (qat_uclo_create_batch_init_list(handle, init_mem, ae, 334 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
335 &(obj_handle->lm_init_tab[ae]))) 335 &obj_handle->lm_init_tab[ae]))
336 return -EINVAL; 336 return -EINVAL;
337 return 0; 337 return 0;
338} 338}
@@ -347,7 +347,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
347 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) 347 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
348 return -EINVAL; 348 return -EINVAL;
349 if (qat_uclo_create_batch_init_list(handle, init_mem, ae, 349 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
350 &(obj_handle->umem_init_tab[ae]))) 350 &obj_handle->umem_init_tab[ae]))
351 return -EINVAL; 351 return -EINVAL;
352 /* set the highest ustore address referenced */ 352 /* set the highest ustore address referenced */
353 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; 353 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
@@ -425,7 +425,7 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
425 page = image->page; 425 page = image->page;
426 426
427 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { 427 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
428 if (!test_bit(ae, (unsigned long *)&(uof_image->ae_assigned))) 428 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
429 continue; 429 continue;
430 ustore_size = obj_handle->ae_data[ae].eff_ustore_size; 430 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
431 patt_pos = page->beg_addr_p + page->micro_words_num; 431 patt_pos = page->beg_addr_p + page->micro_words_num;
@@ -486,8 +486,8 @@ static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
486 486
487 for (i = 0; i < obj_hdr->num_chunks; i++) { 487 for (i = 0; i < obj_hdr->num_chunks; i++) {
488 if ((cur < (void *)&chunk_hdr[i]) && 488 if ((cur < (void *)&chunk_hdr[i]) &&
489 !(strncmp(chunk_hdr[i].chunk_id, chunk_id, 489 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
490 ICP_QAT_UOF_OBJID_LEN))) { 490 ICP_QAT_UOF_OBJID_LEN)) {
491 return &chunk_hdr[i]; 491 return &chunk_hdr[i];
492 } 492 }
493 } 493 }
@@ -532,8 +532,8 @@ qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
532 file_chunk = (struct icp_qat_uof_filechunkhdr *) 532 file_chunk = (struct icp_qat_uof_filechunkhdr *)
533 (buf + sizeof(struct icp_qat_uof_filehdr)); 533 (buf + sizeof(struct icp_qat_uof_filehdr));
534 for (i = 0; i < file_hdr->num_chunks; i++) { 534 for (i = 0; i < file_hdr->num_chunks; i++) {
535 if (!(strncmp(file_chunk->chunk_id, chunk_id, 535 if (!strncmp(file_chunk->chunk_id, chunk_id,
536 ICP_QAT_UOF_OBJID_LEN))) { 536 ICP_QAT_UOF_OBJID_LEN)) {
537 chunk = buf + file_chunk->offset; 537 chunk = buf + file_chunk->offset;
538 if (file_chunk->checksum != qat_uclo_calc_str_checksum( 538 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
539 (char *)chunk, file_chunk->size)) 539 (char *)chunk, file_chunk->size))
@@ -692,12 +692,12 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
692 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 692 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
693 693
694 for (ae = 0; ae <= max_ae; ae++) { 694 for (ae = 0; ae <= max_ae; ae++) {
695 if (!test_bit(ae, (unsigned long *) 695 if (!test_bit(ae,
696 &(handle->hal_handle->ae_mask))) 696 (unsigned long *)&handle->hal_handle->ae_mask))
697 continue; 697 continue;
698 for (i = 0; i < obj_handle->uimage_num; i++) { 698 for (i = 0; i < obj_handle->uimage_num; i++) {
699 if (!test_bit(ae, (unsigned long *) 699 if (!test_bit(ae, (unsigned long *)
700 &(obj_handle->ae_uimage[i].img_ptr->ae_assigned))) 700 &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
701 continue; 701 continue;
702 mflag = 1; 702 mflag = 1;
703 if (qat_uclo_init_ae_data(obj_handle, ae, i)) 703 if (qat_uclo_init_ae_data(obj_handle, ae, i))
@@ -898,12 +898,12 @@ static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
898 898
899 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { 899 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
900 if (!test_bit(ae, 900 if (!test_bit(ae,
901 (unsigned long *)&(handle->hal_handle->ae_mask))) 901 (unsigned long *)&handle->hal_handle->ae_mask))
902 continue; 902 continue;
903 ae_data = &(obj_handle->ae_data[ae]); 903 ae_data = &obj_handle->ae_data[ae];
904 for (s = 0; s < ae_data->slice_num && s < ICP_QAT_UCLO_MAX_CTX; 904 for (s = 0; s < ae_data->slice_num && s < ICP_QAT_UCLO_MAX_CTX;
905 s++) { 905 s++) {
906 if (!(obj_handle->ae_data[ae].ae_slices[s].encap_image)) 906 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
907 continue; 907 continue;
908 uof_image = ae_data->ae_slices[s].encap_image->img_ptr; 908 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
909 if (qat_hal_set_ae_ctx_mode(handle, ae, 909 if (qat_hal_set_ae_ctx_mode(handle, ae,
@@ -968,9 +968,9 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
968 return -EINVAL; 968 return -EINVAL;
969 } 969 }
970 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; 970 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
971 if (!(obj_handle->obj_hdr->file_buff) || 971 if (!obj_handle->obj_hdr->file_buff ||
972 !(qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, 972 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
973 &(obj_handle->str_table)))) { 973 &obj_handle->str_table)) {
974 pr_err("QAT: uof doesn't have effective images\n"); 974 pr_err("QAT: uof doesn't have effective images\n");
975 goto out_err; 975 goto out_err;
976 } 976 }
@@ -984,8 +984,8 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
984 goto out_check_uof_aemask_err; 984 goto out_check_uof_aemask_err;
985 } 985 }
986 qat_uclo_init_uword_num(handle); 986 qat_uclo_init_uword_num(handle);
987 qat_uclo_map_initmem_table(&(obj_handle->encap_uof_obj), 987 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
988 &(obj_handle->init_mem_tab)); 988 &obj_handle->init_mem_tab);
989 if (qat_uclo_set_ae_mode(handle)) 989 if (qat_uclo_set_ae_mode(handle))
990 goto out_check_uof_aemask_err; 990 goto out_check_uof_aemask_err;
991 return 0; 991 return 0;
@@ -1143,7 +1143,7 @@ static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle,
1143 /* load the default page and set assigned CTX PC 1143 /* load the default page and set assigned CTX PC
1144 * to the entrypoint address */ 1144 * to the entrypoint address */
1145 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) { 1145 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
1146 if (!test_bit(ae, (unsigned long *)&(image->ae_assigned))) 1146 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1147 continue; 1147 continue;
1148 /* find the slice to which this image is assigned */ 1148 /* find the slice to which this image is assigned */
1149 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { 1149 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
@@ -1177,9 +1177,9 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1177 if (qat_uclo_init_globals(handle)) 1177 if (qat_uclo_init_globals(handle))
1178 return -EINVAL; 1178 return -EINVAL;
1179 for (i = 0; i < obj_handle->uimage_num; i++) { 1179 for (i = 0; i < obj_handle->uimage_num; i++) {
1180 if (!(obj_handle->ae_uimage[i].img_ptr)) 1180 if (!obj_handle->ae_uimage[i].img_ptr)
1181 return -EINVAL; 1181 return -EINVAL;
1182 if (qat_uclo_init_ustore(handle, &(obj_handle->ae_uimage[i]))) 1182 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1183 return -EINVAL; 1183 return -EINVAL;
1184 qat_uclo_wr_uimage_pages(handle, 1184 qat_uclo_wr_uimage_pages(handle,
1185 obj_handle->ae_uimage[i].img_ptr); 1185 obj_handle->ae_uimage[i].img_ptr);