diff options
author | Harsh Jain <harsh@chelsio.com> | 2018-03-19 09:36:22 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-03-30 13:32:50 -0400 |
commit | 6f76672bd65039d68197be12653473cb4529741f (patch) | |
tree | 47c9026d0fb04aab2efde159c7f8c6c69dce881c | |
parent | 3ad618d8e16e844b6f562ae6c9b0984fca30409e (diff) |
crypto: chelsio - Remove declaration of static function from header
It fixes compilation warning introduced in commit
Fixes: 5110e65536f3 ("crypto: chelsio - Split Hash requests for...")
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Harsh Jain <harsh@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 291 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_crypto.h | 1 |
2 files changed, 145 insertions, 147 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 4617c7acf4da..752ed9b25284 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c | |||
@@ -165,60 +165,6 @@ static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, | |||
165 | return nents; | 165 | return nents; |
166 | } | 166 | } |
167 | 167 | ||
168 | static inline void chcr_handle_ahash_resp(struct ahash_request *req, | ||
169 | unsigned char *input, | ||
170 | int err) | ||
171 | { | ||
172 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | ||
173 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; | ||
174 | int digestsize, updated_digestsize; | ||
175 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
176 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); | ||
177 | |||
178 | if (input == NULL) | ||
179 | goto out; | ||
180 | digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); | ||
181 | updated_digestsize = digestsize; | ||
182 | if (digestsize == SHA224_DIGEST_SIZE) | ||
183 | updated_digestsize = SHA256_DIGEST_SIZE; | ||
184 | else if (digestsize == SHA384_DIGEST_SIZE) | ||
185 | updated_digestsize = SHA512_DIGEST_SIZE; | ||
186 | |||
187 | if (hctx_wr->dma_addr) { | ||
188 | dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr, | ||
189 | hctx_wr->dma_len, DMA_TO_DEVICE); | ||
190 | hctx_wr->dma_addr = 0; | ||
191 | } | ||
192 | if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) == | ||
193 | req->nbytes)) { | ||
194 | if (hctx_wr->result == 1) { | ||
195 | hctx_wr->result = 0; | ||
196 | memcpy(req->result, input + sizeof(struct cpl_fw6_pld), | ||
197 | digestsize); | ||
198 | } else { | ||
199 | memcpy(reqctx->partial_hash, | ||
200 | input + sizeof(struct cpl_fw6_pld), | ||
201 | updated_digestsize); | ||
202 | |||
203 | } | ||
204 | goto unmap; | ||
205 | } | ||
206 | memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), | ||
207 | updated_digestsize); | ||
208 | |||
209 | err = chcr_ahash_continue(req); | ||
210 | if (err) | ||
211 | goto unmap; | ||
212 | return; | ||
213 | unmap: | ||
214 | if (hctx_wr->is_sg_map) | ||
215 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | ||
216 | |||
217 | |||
218 | out: | ||
219 | req->base.complete(&req->base, err); | ||
220 | } | ||
221 | |||
222 | static inline int get_aead_subtype(struct crypto_aead *aead) | 168 | static inline int get_aead_subtype(struct crypto_aead *aead) |
223 | { | 169 | { |
224 | struct aead_alg *alg = crypto_aead_alg(aead); | 170 | struct aead_alg *alg = crypto_aead_alg(aead); |
@@ -271,34 +217,6 @@ static inline void chcr_handle_aead_resp(struct aead_request *req, | |||
271 | req->base.complete(&req->base, err); | 217 | req->base.complete(&req->base, err); |
272 | } | 218 | } |
273 | 219 | ||
274 | /* | ||
275 | * chcr_handle_resp - Unmap the DMA buffers associated with the request | ||
276 | * @req: crypto request | ||
277 | */ | ||
278 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | ||
279 | int err) | ||
280 | { | ||
281 | struct crypto_tfm *tfm = req->tfm; | ||
282 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | ||
283 | struct adapter *adap = padap(ctx->dev); | ||
284 | |||
285 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | ||
286 | case CRYPTO_ALG_TYPE_AEAD: | ||
287 | chcr_handle_aead_resp(aead_request_cast(req), input, err); | ||
288 | break; | ||
289 | |||
290 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
291 | err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), | ||
292 | input, err); | ||
293 | break; | ||
294 | |||
295 | case CRYPTO_ALG_TYPE_AHASH: | ||
296 | chcr_handle_ahash_resp(ahash_request_cast(req), input, err); | ||
297 | } | ||
298 | atomic_inc(&adap->chcr_stats.complete); | ||
299 | return err; | ||
300 | } | ||
301 | |||
302 | static void get_aes_decrypt_key(unsigned char *dec_key, | 220 | static void get_aes_decrypt_key(unsigned char *dec_key, |
303 | const unsigned char *key, | 221 | const unsigned char *key, |
304 | unsigned int keylength) | 222 | unsigned int keylength) |
@@ -1784,70 +1702,6 @@ static int chcr_ahash_final(struct ahash_request *req) | |||
1784 | return -EINPROGRESS; | 1702 | return -EINPROGRESS; |
1785 | } | 1703 | } |
1786 | 1704 | ||
1787 | static int chcr_ahash_continue(struct ahash_request *req) | ||
1788 | { | ||
1789 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | ||
1790 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; | ||
1791 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | ||
1792 | struct uld_ctx *u_ctx = NULL; | ||
1793 | struct sk_buff *skb; | ||
1794 | struct hash_wr_param params; | ||
1795 | u8 bs; | ||
1796 | int error; | ||
1797 | |||
1798 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | ||
1799 | u_ctx = ULD_CTX(h_ctx(rtfm)); | ||
1800 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | ||
1801 | h_ctx(rtfm)->tx_qidx))) { | ||
1802 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
1803 | return -EBUSY; | ||
1804 | } | ||
1805 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | ||
1806 | params.kctx_len = roundup(params.alg_prm.result_size, 16); | ||
1807 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | ||
1808 | params.kctx_len *= 2; | ||
1809 | params.opad_needed = 1; | ||
1810 | } else { | ||
1811 | params.opad_needed = 0; | ||
1812 | } | ||
1813 | params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0, | ||
1814 | HASH_SPACE_LEFT(params.kctx_len), | ||
1815 | hctx_wr->src_ofst); | ||
1816 | if ((params.sg_len + hctx_wr->processed) > req->nbytes) | ||
1817 | params.sg_len = req->nbytes - hctx_wr->processed; | ||
1818 | if (!hctx_wr->result || | ||
1819 | ((params.sg_len + hctx_wr->processed) < req->nbytes)) { | ||
1820 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | ||
1821 | params.kctx_len /= 2; | ||
1822 | params.opad_needed = 0; | ||
1823 | } | ||
1824 | params.last = 0; | ||
1825 | params.more = 1; | ||
1826 | params.sg_len = rounddown(params.sg_len, bs); | ||
1827 | params.hash_size = params.alg_prm.result_size; | ||
1828 | params.scmd1 = 0; | ||
1829 | } else { | ||
1830 | params.last = 1; | ||
1831 | params.more = 0; | ||
1832 | params.hash_size = crypto_ahash_digestsize(rtfm); | ||
1833 | params.scmd1 = reqctx->data_len + params.sg_len; | ||
1834 | } | ||
1835 | params.bfr_len = 0; | ||
1836 | reqctx->data_len += params.sg_len; | ||
1837 | skb = create_hash_wr(req, ¶ms); | ||
1838 | if (IS_ERR(skb)) { | ||
1839 | error = PTR_ERR(skb); | ||
1840 | goto err; | ||
1841 | } | ||
1842 | hctx_wr->processed += params.sg_len; | ||
1843 | skb->dev = u_ctx->lldi.ports[0]; | ||
1844 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); | ||
1845 | chcr_send_wr(skb); | ||
1846 | return 0; | ||
1847 | err: | ||
1848 | return error; | ||
1849 | } | ||
1850 | |||
1851 | static int chcr_ahash_finup(struct ahash_request *req) | 1705 | static int chcr_ahash_finup(struct ahash_request *req) |
1852 | { | 1706 | { |
1853 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | 1707 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); |
@@ -2008,6 +1862,151 @@ unmap: | |||
2008 | return error; | 1862 | return error; |
2009 | } | 1863 | } |
2010 | 1864 | ||
1865 | static int chcr_ahash_continue(struct ahash_request *req) | ||
1866 | { | ||
1867 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | ||
1868 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; | ||
1869 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | ||
1870 | struct uld_ctx *u_ctx = NULL; | ||
1871 | struct sk_buff *skb; | ||
1872 | struct hash_wr_param params; | ||
1873 | u8 bs; | ||
1874 | int error; | ||
1875 | |||
1876 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | ||
1877 | u_ctx = ULD_CTX(h_ctx(rtfm)); | ||
1878 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | ||
1879 | h_ctx(rtfm)->tx_qidx))) { | ||
1880 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
1881 | return -EBUSY; | ||
1882 | } | ||
1883 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); | ||
1884 | params.kctx_len = roundup(params.alg_prm.result_size, 16); | ||
1885 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | ||
1886 | params.kctx_len *= 2; | ||
1887 | params.opad_needed = 1; | ||
1888 | } else { | ||
1889 | params.opad_needed = 0; | ||
1890 | } | ||
1891 | params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0, | ||
1892 | HASH_SPACE_LEFT(params.kctx_len), | ||
1893 | hctx_wr->src_ofst); | ||
1894 | if ((params.sg_len + hctx_wr->processed) > req->nbytes) | ||
1895 | params.sg_len = req->nbytes - hctx_wr->processed; | ||
1896 | if (!hctx_wr->result || | ||
1897 | ((params.sg_len + hctx_wr->processed) < req->nbytes)) { | ||
1898 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | ||
1899 | params.kctx_len /= 2; | ||
1900 | params.opad_needed = 0; | ||
1901 | } | ||
1902 | params.last = 0; | ||
1903 | params.more = 1; | ||
1904 | params.sg_len = rounddown(params.sg_len, bs); | ||
1905 | params.hash_size = params.alg_prm.result_size; | ||
1906 | params.scmd1 = 0; | ||
1907 | } else { | ||
1908 | params.last = 1; | ||
1909 | params.more = 0; | ||
1910 | params.hash_size = crypto_ahash_digestsize(rtfm); | ||
1911 | params.scmd1 = reqctx->data_len + params.sg_len; | ||
1912 | } | ||
1913 | params.bfr_len = 0; | ||
1914 | reqctx->data_len += params.sg_len; | ||
1915 | skb = create_hash_wr(req, ¶ms); | ||
1916 | if (IS_ERR(skb)) { | ||
1917 | error = PTR_ERR(skb); | ||
1918 | goto err; | ||
1919 | } | ||
1920 | hctx_wr->processed += params.sg_len; | ||
1921 | skb->dev = u_ctx->lldi.ports[0]; | ||
1922 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); | ||
1923 | chcr_send_wr(skb); | ||
1924 | return 0; | ||
1925 | err: | ||
1926 | return error; | ||
1927 | } | ||
1928 | |||
1929 | static inline void chcr_handle_ahash_resp(struct ahash_request *req, | ||
1930 | unsigned char *input, | ||
1931 | int err) | ||
1932 | { | ||
1933 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | ||
1934 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; | ||
1935 | int digestsize, updated_digestsize; | ||
1936 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
1937 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); | ||
1938 | |||
1939 | if (input == NULL) | ||
1940 | goto out; | ||
1941 | digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); | ||
1942 | updated_digestsize = digestsize; | ||
1943 | if (digestsize == SHA224_DIGEST_SIZE) | ||
1944 | updated_digestsize = SHA256_DIGEST_SIZE; | ||
1945 | else if (digestsize == SHA384_DIGEST_SIZE) | ||
1946 | updated_digestsize = SHA512_DIGEST_SIZE; | ||
1947 | |||
1948 | if (hctx_wr->dma_addr) { | ||
1949 | dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr, | ||
1950 | hctx_wr->dma_len, DMA_TO_DEVICE); | ||
1951 | hctx_wr->dma_addr = 0; | ||
1952 | } | ||
1953 | if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) == | ||
1954 | req->nbytes)) { | ||
1955 | if (hctx_wr->result == 1) { | ||
1956 | hctx_wr->result = 0; | ||
1957 | memcpy(req->result, input + sizeof(struct cpl_fw6_pld), | ||
1958 | digestsize); | ||
1959 | } else { | ||
1960 | memcpy(reqctx->partial_hash, | ||
1961 | input + sizeof(struct cpl_fw6_pld), | ||
1962 | updated_digestsize); | ||
1963 | |||
1964 | } | ||
1965 | goto unmap; | ||
1966 | } | ||
1967 | memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), | ||
1968 | updated_digestsize); | ||
1969 | |||
1970 | err = chcr_ahash_continue(req); | ||
1971 | if (err) | ||
1972 | goto unmap; | ||
1973 | return; | ||
1974 | unmap: | ||
1975 | if (hctx_wr->is_sg_map) | ||
1976 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | ||
1977 | |||
1978 | |||
1979 | out: | ||
1980 | req->base.complete(&req->base, err); | ||
1981 | } | ||
1982 | |||
1983 | /* | ||
1984 | * chcr_handle_resp - Unmap the DMA buffers associated with the request | ||
1985 | * @req: crypto request | ||
1986 | */ | ||
1987 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | ||
1988 | int err) | ||
1989 | { | ||
1990 | struct crypto_tfm *tfm = req->tfm; | ||
1991 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | ||
1992 | struct adapter *adap = padap(ctx->dev); | ||
1993 | |||
1994 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | ||
1995 | case CRYPTO_ALG_TYPE_AEAD: | ||
1996 | chcr_handle_aead_resp(aead_request_cast(req), input, err); | ||
1997 | break; | ||
1998 | |||
1999 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | ||
2000 | err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), | ||
2001 | input, err); | ||
2002 | break; | ||
2003 | |||
2004 | case CRYPTO_ALG_TYPE_AHASH: | ||
2005 | chcr_handle_ahash_resp(ahash_request_cast(req), input, err); | ||
2006 | } | ||
2007 | atomic_inc(&adap->chcr_stats.complete); | ||
2008 | return err; | ||
2009 | } | ||
2011 | static int chcr_ahash_export(struct ahash_request *areq, void *out) | 2010 | static int chcr_ahash_export(struct ahash_request *areq, void *out) |
2012 | { | 2011 | { |
2013 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 2012 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 71025ea9c3db..c8e8972af283 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h | |||
@@ -340,5 +340,4 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx, | |||
340 | struct hash_wr_param *param); | 340 | struct hash_wr_param *param); |
341 | int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); | 341 | int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); |
342 | void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req); | 342 | void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req); |
343 | static int chcr_ahash_continue(struct ahash_request *req); | ||
344 | #endif /* __CHCR_CRYPTO_H__ */ | 343 | #endif /* __CHCR_CRYPTO_H__ */ |