Commit | Line | Data |
---|---|---|
d370cec3 TS |
1 | /* |
2 | This file is provided under a dual BSD/GPLv2 license. When using or | |
3 | redistributing this file, you may do so under either license. | |
4 | ||
5 | GPL LICENSE SUMMARY | |
6 | Copyright(c) 2014 Intel Corporation. | |
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of version 2 of the GNU General Public License as | |
9 | published by the Free Software Foundation. | |
10 | ||
11 | This program is distributed in the hope that it will be useful, but | |
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | General Public License for more details. | |
15 | ||
16 | Contact Information: | |
17 | qat-linux@intel.com | |
18 | ||
19 | BSD LICENSE | |
20 | Copyright(c) 2014 Intel Corporation. | |
21 | Redistribution and use in source and binary forms, with or without | |
22 | modification, are permitted provided that the following conditions | |
23 | are met: | |
24 | ||
25 | * Redistributions of source code must retain the above copyright | |
26 | notice, this list of conditions and the following disclaimer. | |
27 | * Redistributions in binary form must reproduce the above copyright | |
28 | notice, this list of conditions and the following disclaimer in | |
29 | the documentation and/or other materials provided with the | |
30 | distribution. | |
31 | * Neither the name of Intel Corporation nor the names of its | |
32 | contributors may be used to endorse or promote products derived | |
33 | from this software without specific prior written permission. | |
34 | ||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
46 | */ | |
47 | #include <linux/module.h> | |
48 | #include <linux/slab.h> | |
49 | #include <linux/crypto.h> | |
50 | #include <crypto/aead.h> | |
51 | #include <crypto/aes.h> | |
52 | #include <crypto/sha.h> | |
53 | #include <crypto/hash.h> | |
54 | #include <crypto/algapi.h> | |
55 | #include <crypto/authenc.h> | |
56 | #include <crypto/rng.h> | |
57 | #include <linux/dma-mapping.h> | |
58 | #include "adf_accel_devices.h" | |
59 | #include "adf_transport.h" | |
60 | #include "adf_common_drv.h" | |
61 | #include "qat_crypto.h" | |
62 | #include "icp_qat_hw.h" | |
63 | #include "icp_qat_fw.h" | |
64 | #include "icp_qat_fw_la.h" | |
65 | ||
66 | #define QAT_AES_HW_CONFIG_ENC(alg) \ | |
67 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | |
68 | ICP_QAT_HW_CIPHER_NO_CONVERT, \ | |
69 | ICP_QAT_HW_CIPHER_ENCRYPT) | |
70 | ||
71 | #define QAT_AES_HW_CONFIG_DEC(alg) \ | |
72 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | |
73 | ICP_QAT_HW_CIPHER_KEY_CONVERT, \ | |
74 | ICP_QAT_HW_CIPHER_DECRYPT) | |
75 | ||
76 | static atomic_t active_dev; | |
77 | ||
78 | struct qat_alg_buf { | |
79 | uint32_t len; | |
80 | uint32_t resrvd; | |
81 | uint64_t addr; | |
82 | } __packed; | |
83 | ||
84 | struct qat_alg_buf_list { | |
85 | uint64_t resrvd; | |
86 | uint32_t num_bufs; | |
87 | uint32_t num_mapped_bufs; | |
88 | struct qat_alg_buf bufers[]; | |
89 | } __packed __aligned(64); | |
90 | ||
91 | /* Common content descriptor */ | |
92 | struct qat_alg_cd { | |
93 | union { | |
94 | struct qat_enc { /* Encrypt content desc */ | |
95 | struct icp_qat_hw_cipher_algo_blk cipher; | |
96 | struct icp_qat_hw_auth_algo_blk hash; | |
97 | } qat_enc_cd; | |
98 | struct qat_dec { /* Decrytp content desc */ | |
99 | struct icp_qat_hw_auth_algo_blk hash; | |
100 | struct icp_qat_hw_cipher_algo_blk cipher; | |
101 | } qat_dec_cd; | |
102 | }; | |
103 | } __aligned(64); | |
104 | ||
105 | #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) | |
106 | ||
107 | struct qat_auth_state { | |
108 | uint8_t data[MAX_AUTH_STATE_SIZE]; | |
109 | } __aligned(64); | |
110 | ||
111 | struct qat_alg_session_ctx { | |
112 | struct qat_alg_cd *enc_cd; | |
113 | dma_addr_t enc_cd_paddr; | |
114 | struct qat_alg_cd *dec_cd; | |
115 | dma_addr_t dec_cd_paddr; | |
116 | struct qat_auth_state *auth_hw_state_enc; | |
117 | dma_addr_t auth_state_enc_paddr; | |
118 | struct qat_auth_state *auth_hw_state_dec; | |
119 | dma_addr_t auth_state_dec_paddr; | |
120 | struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; | |
121 | struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; | |
122 | struct qat_crypto_instance *inst; | |
123 | struct crypto_tfm *tfm; | |
124 | struct crypto_shash *hash_tfm; | |
125 | enum icp_qat_hw_auth_algo qat_hash_alg; | |
126 | uint8_t salt[AES_BLOCK_SIZE]; | |
127 | spinlock_t lock; /* protects qat_alg_session_ctx struct */ | |
128 | }; | |
129 | ||
130 | static int get_current_node(void) | |
131 | { | |
132 | return cpu_data(current_thread_info()->cpu).phys_proc_id; | |
133 | } | |
134 | ||
135 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) | |
136 | { | |
137 | switch (qat_hash_alg) { | |
138 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
139 | return ICP_QAT_HW_SHA1_STATE1_SZ; | |
140 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
141 | return ICP_QAT_HW_SHA256_STATE1_SZ; | |
142 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
143 | return ICP_QAT_HW_SHA512_STATE1_SZ; | |
144 | default: | |
145 | return -EFAULT; | |
146 | }; | |
147 | return -EFAULT; | |
148 | } | |
149 | ||
150 | static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, | |
151 | struct qat_alg_session_ctx *ctx, | |
152 | const uint8_t *auth_key, | |
153 | unsigned int auth_keylen, uint8_t *auth_state) | |
154 | { | |
155 | struct { | |
156 | struct shash_desc shash; | |
157 | char ctx[crypto_shash_descsize(ctx->hash_tfm)]; | |
158 | } desc; | |
159 | struct sha1_state sha1; | |
160 | struct sha256_state sha256; | |
161 | struct sha512_state sha512; | |
162 | int block_size = crypto_shash_blocksize(ctx->hash_tfm); | |
163 | int digest_size = crypto_shash_digestsize(ctx->hash_tfm); | |
164 | uint8_t *ipad = auth_state; | |
165 | uint8_t *opad = ipad + block_size; | |
166 | __be32 *hash_state_out; | |
167 | __be64 *hash512_state_out; | |
168 | int i, offset; | |
169 | ||
170 | desc.shash.tfm = ctx->hash_tfm; | |
171 | desc.shash.flags = 0x0; | |
172 | ||
173 | if (auth_keylen > block_size) { | |
174 | char buff[SHA512_BLOCK_SIZE]; | |
175 | int ret = crypto_shash_digest(&desc.shash, auth_key, | |
176 | auth_keylen, buff); | |
177 | if (ret) | |
178 | return ret; | |
179 | ||
180 | memcpy(ipad, buff, digest_size); | |
181 | memcpy(opad, buff, digest_size); | |
182 | memset(ipad + digest_size, 0, block_size - digest_size); | |
183 | memset(opad + digest_size, 0, block_size - digest_size); | |
184 | } else { | |
185 | memcpy(ipad, auth_key, auth_keylen); | |
186 | memcpy(opad, auth_key, auth_keylen); | |
187 | memset(ipad + auth_keylen, 0, block_size - auth_keylen); | |
188 | memset(opad + auth_keylen, 0, block_size - auth_keylen); | |
189 | } | |
190 | ||
191 | for (i = 0; i < block_size; i++) { | |
192 | char *ipad_ptr = ipad + i; | |
193 | char *opad_ptr = opad + i; | |
194 | *ipad_ptr ^= 0x36; | |
195 | *opad_ptr ^= 0x5C; | |
196 | } | |
197 | ||
198 | if (crypto_shash_init(&desc.shash)) | |
199 | return -EFAULT; | |
200 | ||
201 | if (crypto_shash_update(&desc.shash, ipad, block_size)) | |
202 | return -EFAULT; | |
203 | ||
204 | hash_state_out = (__be32 *)hash->sha.state1; | |
205 | hash512_state_out = (__be64 *)hash_state_out; | |
206 | ||
207 | switch (ctx->qat_hash_alg) { | |
208 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
209 | if (crypto_shash_export(&desc.shash, &sha1)) | |
210 | return -EFAULT; | |
211 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | |
212 | *hash_state_out = cpu_to_be32(*(sha1.state + i)); | |
213 | break; | |
214 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
215 | if (crypto_shash_export(&desc.shash, &sha256)) | |
216 | return -EFAULT; | |
217 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | |
218 | *hash_state_out = cpu_to_be32(*(sha256.state + i)); | |
219 | break; | |
220 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
221 | if (crypto_shash_export(&desc.shash, &sha512)) | |
222 | return -EFAULT; | |
223 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) | |
224 | *hash512_state_out = cpu_to_be64(*(sha512.state + i)); | |
225 | break; | |
226 | default: | |
227 | return -EFAULT; | |
228 | } | |
229 | ||
230 | if (crypto_shash_init(&desc.shash)) | |
231 | return -EFAULT; | |
232 | ||
233 | if (crypto_shash_update(&desc.shash, opad, block_size)) | |
234 | return -EFAULT; | |
235 | ||
236 | offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); | |
237 | hash_state_out = (__be32 *)(hash->sha.state1 + offset); | |
238 | hash512_state_out = (__be64 *)hash_state_out; | |
239 | ||
240 | switch (ctx->qat_hash_alg) { | |
241 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
242 | if (crypto_shash_export(&desc.shash, &sha1)) | |
243 | return -EFAULT; | |
244 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | |
245 | *hash_state_out = cpu_to_be32(*(sha1.state + i)); | |
246 | break; | |
247 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
248 | if (crypto_shash_export(&desc.shash, &sha256)) | |
249 | return -EFAULT; | |
250 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | |
251 | *hash_state_out = cpu_to_be32(*(sha256.state + i)); | |
252 | break; | |
253 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
254 | if (crypto_shash_export(&desc.shash, &sha512)) | |
255 | return -EFAULT; | |
256 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) | |
257 | *hash512_state_out = cpu_to_be64(*(sha512.state + i)); | |
258 | break; | |
259 | default: | |
260 | return -EFAULT; | |
261 | } | |
262 | return 0; | |
263 | } | |
264 | ||
265 | static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | |
266 | { | |
267 | header->hdr_flags = | |
268 | ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); | |
269 | header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; | |
270 | header->comn_req_flags = | |
271 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, | |
272 | QAT_COMN_PTR_TYPE_SGL); | |
273 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | |
274 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | |
275 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, | |
276 | ICP_QAT_FW_LA_PARTIAL_NONE); | |
277 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, | |
278 | ICP_QAT_FW_CIPH_IV_16BYTE_DATA); | |
279 | ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, | |
280 | ICP_QAT_FW_LA_NO_PROTO); | |
281 | ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, | |
282 | ICP_QAT_FW_LA_NO_UPDATE_STATE); | |
283 | } | |
284 | ||
285 | static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | |
286 | int alg, struct crypto_authenc_keys *keys) | |
287 | { | |
288 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | |
289 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | |
290 | struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; | |
291 | struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; | |
292 | struct icp_qat_hw_auth_algo_blk *hash = | |
293 | (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + | |
294 | sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); | |
295 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl; | |
296 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | |
297 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | |
298 | void *ptr = &req_tmpl->cd_ctrl; | |
299 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; | |
300 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | |
301 | struct icp_qat_fw_la_auth_req_params *auth_param = | |
302 | (struct icp_qat_fw_la_auth_req_params *) | |
303 | ((char *)&req_tmpl->serv_specif_rqpars + | |
304 | sizeof(struct icp_qat_fw_la_cipher_req_params)); | |
305 | ||
306 | /* CD setup */ | |
307 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); | |
308 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | |
309 | hash->sha.inner_setup.auth_config.config = | |
310 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | |
311 | ctx->qat_hash_alg, digestsize); | |
312 | hash->sha.inner_setup.auth_counter.counter = | |
313 | cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); | |
314 | ||
315 | if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, | |
316 | (uint8_t *)ctx->auth_hw_state_enc)) | |
317 | return -EFAULT; | |
318 | ||
319 | /* Request setup */ | |
320 | qat_alg_init_common_hdr(header); | |
321 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; | |
322 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | |
323 | ICP_QAT_FW_LA_RET_AUTH_RES); | |
324 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | |
325 | ICP_QAT_FW_LA_NO_CMP_AUTH_RES); | |
326 | cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; | |
327 | cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; | |
328 | ||
329 | /* Cipher CD config setup */ | |
330 | cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; | |
331 | cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | |
332 | cipher_cd_ctrl->cipher_cfg_offset = 0; | |
333 | ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | |
334 | ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | |
335 | /* Auth CD config setup */ | |
336 | hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; | |
337 | hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; | |
338 | hash_cd_ctrl->inner_res_sz = digestsize; | |
339 | hash_cd_ctrl->final_sz = digestsize; | |
340 | ||
341 | switch (ctx->qat_hash_alg) { | |
342 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
343 | hash_cd_ctrl->inner_state1_sz = | |
344 | round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); | |
345 | hash_cd_ctrl->inner_state2_sz = | |
346 | round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); | |
347 | break; | |
348 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
349 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; | |
350 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; | |
351 | break; | |
352 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
353 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; | |
354 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; | |
355 | break; | |
356 | default: | |
357 | break; | |
358 | } | |
359 | hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + | |
360 | ((sizeof(struct icp_qat_hw_auth_setup) + | |
361 | round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); | |
362 | auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr + | |
363 | sizeof(struct icp_qat_hw_auth_counter) + | |
364 | round_up(hash_cd_ctrl->inner_state1_sz, 8); | |
365 | ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | |
366 | ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | |
367 | return 0; | |
368 | } | |
369 | ||
370 | static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | |
371 | int alg, struct crypto_authenc_keys *keys) | |
372 | { | |
373 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | |
374 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | |
375 | struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; | |
376 | struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; | |
377 | struct icp_qat_hw_cipher_algo_blk *cipher = | |
378 | (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + | |
379 | sizeof(struct icp_qat_hw_auth_setup) + | |
380 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); | |
381 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl; | |
382 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | |
383 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | |
384 | void *ptr = &req_tmpl->cd_ctrl; | |
385 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; | |
386 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | |
387 | struct icp_qat_fw_la_auth_req_params *auth_param = | |
388 | (struct icp_qat_fw_la_auth_req_params *) | |
389 | ((char *)&req_tmpl->serv_specif_rqpars + | |
390 | sizeof(struct icp_qat_fw_la_cipher_req_params)); | |
391 | ||
392 | /* CD setup */ | |
393 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg); | |
394 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | |
395 | hash->sha.inner_setup.auth_config.config = | |
396 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | |
397 | ctx->qat_hash_alg, | |
398 | digestsize); | |
399 | hash->sha.inner_setup.auth_counter.counter = | |
400 | cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); | |
401 | ||
402 | if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen, | |
403 | (uint8_t *)ctx->auth_hw_state_dec)) | |
404 | return -EFAULT; | |
405 | ||
406 | /* Request setup */ | |
407 | qat_alg_init_common_hdr(header); | |
408 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; | |
409 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | |
410 | ICP_QAT_FW_LA_NO_RET_AUTH_RES); | |
411 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | |
412 | ICP_QAT_FW_LA_CMP_AUTH_RES); | |
413 | cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; | |
414 | cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; | |
415 | ||
416 | /* Cipher CD config setup */ | |
417 | cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; | |
418 | cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | |
419 | cipher_cd_ctrl->cipher_cfg_offset = | |
420 | (sizeof(struct icp_qat_hw_auth_setup) + | |
421 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3; | |
422 | ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | |
423 | ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | |
424 | ||
425 | /* Auth CD config setup */ | |
426 | hash_cd_ctrl->hash_cfg_offset = 0; | |
427 | hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; | |
428 | hash_cd_ctrl->inner_res_sz = digestsize; | |
429 | hash_cd_ctrl->final_sz = digestsize; | |
430 | ||
431 | switch (ctx->qat_hash_alg) { | |
432 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
433 | hash_cd_ctrl->inner_state1_sz = | |
434 | round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); | |
435 | hash_cd_ctrl->inner_state2_sz = | |
436 | round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); | |
437 | break; | |
438 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
439 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; | |
440 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; | |
441 | break; | |
442 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
443 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; | |
444 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; | |
445 | break; | |
446 | default: | |
447 | break; | |
448 | } | |
449 | ||
450 | hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + | |
451 | ((sizeof(struct icp_qat_hw_auth_setup) + | |
452 | round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); | |
453 | auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr + | |
454 | sizeof(struct icp_qat_hw_auth_counter) + | |
455 | round_up(hash_cd_ctrl->inner_state1_sz, 8); | |
456 | auth_param->auth_res_sz = digestsize; | |
457 | ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | |
458 | ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | |
459 | return 0; | |
460 | } | |
461 | ||
462 | static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx, | |
463 | const uint8_t *key, unsigned int keylen) | |
464 | { | |
465 | struct crypto_authenc_keys keys; | |
466 | int alg; | |
467 | ||
468 | if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) | |
469 | return -EFAULT; | |
470 | ||
471 | if (crypto_authenc_extractkeys(&keys, key, keylen)) | |
472 | goto bad_key; | |
473 | ||
474 | switch (keys.enckeylen) { | |
475 | case AES_KEYSIZE_128: | |
476 | alg = ICP_QAT_HW_CIPHER_ALGO_AES128; | |
477 | break; | |
478 | case AES_KEYSIZE_192: | |
479 | alg = ICP_QAT_HW_CIPHER_ALGO_AES192; | |
480 | break; | |
481 | case AES_KEYSIZE_256: | |
482 | alg = ICP_QAT_HW_CIPHER_ALGO_AES256; | |
483 | break; | |
484 | default: | |
485 | goto bad_key; | |
486 | break; | |
487 | } | |
488 | ||
489 | if (qat_alg_init_enc_session(ctx, alg, &keys)) | |
490 | goto error; | |
491 | ||
492 | if (qat_alg_init_dec_session(ctx, alg, &keys)) | |
493 | goto error; | |
494 | ||
495 | return 0; | |
496 | bad_key: | |
497 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
498 | return -EINVAL; | |
499 | error: | |
500 | return -EFAULT; | |
501 | } | |
502 | ||
503 | static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, | |
504 | unsigned int keylen) | |
505 | { | |
506 | struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm); | |
507 | struct device *dev; | |
508 | ||
509 | spin_lock(&ctx->lock); | |
510 | if (ctx->enc_cd) { | |
511 | /* rekeying */ | |
512 | dev = &GET_DEV(ctx->inst->accel_dev); | |
513 | memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); | |
514 | memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); | |
515 | memset(ctx->auth_hw_state_enc, 0, | |
516 | sizeof(struct qat_auth_state)); | |
517 | memset(ctx->auth_hw_state_dec, 0, | |
518 | sizeof(struct qat_auth_state)); | |
519 | memset(&ctx->enc_fw_req_tmpl, 0, | |
520 | sizeof(struct icp_qat_fw_la_bulk_req)); | |
521 | memset(&ctx->dec_fw_req_tmpl, 0, | |
522 | sizeof(struct icp_qat_fw_la_bulk_req)); | |
523 | } else { | |
524 | /* new key */ | |
525 | int node = get_current_node(); | |
526 | struct qat_crypto_instance *inst = | |
527 | qat_crypto_get_instance_node(node); | |
528 | if (!inst) { | |
529 | spin_unlock(&ctx->lock); | |
530 | return -EINVAL; | |
531 | } | |
532 | ||
533 | dev = &GET_DEV(inst->accel_dev); | |
534 | ctx->inst = inst; | |
535 | ctx->enc_cd = dma_zalloc_coherent(dev, | |
536 | sizeof(struct qat_alg_cd), | |
537 | &ctx->enc_cd_paddr, | |
538 | GFP_ATOMIC); | |
539 | if (!ctx->enc_cd) { | |
540 | spin_unlock(&ctx->lock); | |
541 | return -ENOMEM; | |
542 | } | |
543 | ctx->dec_cd = dma_zalloc_coherent(dev, | |
544 | sizeof(struct qat_alg_cd), | |
545 | &ctx->dec_cd_paddr, | |
546 | GFP_ATOMIC); | |
547 | if (!ctx->dec_cd) { | |
548 | spin_unlock(&ctx->lock); | |
549 | goto out_free_enc; | |
550 | } | |
551 | ctx->auth_hw_state_enc = | |
552 | dma_zalloc_coherent(dev, sizeof(struct qat_auth_state), | |
553 | &ctx->auth_state_enc_paddr, | |
554 | GFP_ATOMIC); | |
555 | if (!ctx->auth_hw_state_enc) { | |
556 | spin_unlock(&ctx->lock); | |
557 | goto out_free_dec; | |
558 | } | |
559 | ctx->auth_hw_state_dec = | |
560 | dma_zalloc_coherent(dev, sizeof(struct qat_auth_state), | |
561 | &ctx->auth_state_dec_paddr, | |
562 | GFP_ATOMIC); | |
563 | if (!ctx->auth_hw_state_dec) { | |
564 | spin_unlock(&ctx->lock); | |
565 | goto out_free_auth_enc; | |
566 | } | |
567 | } | |
568 | spin_unlock(&ctx->lock); | |
569 | if (qat_alg_init_sessions(ctx, key, keylen)) | |
570 | goto out_free_all; | |
571 | ||
572 | return 0; | |
573 | ||
574 | out_free_all: | |
575 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | |
576 | ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr); | |
577 | ctx->auth_hw_state_dec = NULL; | |
578 | out_free_auth_enc: | |
579 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | |
580 | ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr); | |
581 | ctx->auth_hw_state_enc = NULL; | |
582 | out_free_dec: | |
583 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | |
584 | ctx->dec_cd, ctx->dec_cd_paddr); | |
585 | ctx->dec_cd = NULL; | |
586 | out_free_enc: | |
587 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | |
588 | ctx->enc_cd, ctx->enc_cd_paddr); | |
589 | ctx->enc_cd = NULL; | |
590 | return -ENOMEM; | |
591 | } | |
592 | ||
593 | static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | |
594 | struct qat_crypto_request *qat_req) | |
595 | { | |
596 | struct device *dev = &GET_DEV(inst->accel_dev); | |
597 | struct qat_alg_buf_list *bl = qat_req->buf.bl; | |
598 | struct qat_alg_buf_list *blout = qat_req->buf.blout; | |
599 | dma_addr_t blp = qat_req->buf.blp; | |
600 | dma_addr_t blpout = qat_req->buf.bloutp; | |
601 | size_t sz = qat_req->buf.sz; | |
602 | int i, bufs = bl->num_bufs; | |
603 | ||
604 | for (i = 0; i < bl->num_bufs; i++) | |
605 | dma_unmap_single(dev, bl->bufers[i].addr, | |
606 | bl->bufers[i].len, DMA_BIDIRECTIONAL); | |
607 | ||
608 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | |
609 | kfree(bl); | |
610 | if (blp != blpout) { | |
611 | /* If out of place operation dma unmap only data */ | |
612 | int bufless = bufs - blout->num_mapped_bufs; | |
d65071ec | 613 | |
d370cec3 TS |
614 | for (i = bufless; i < bufs; i++) { |
615 | dma_unmap_single(dev, blout->bufers[i].addr, | |
616 | blout->bufers[i].len, | |
617 | DMA_BIDIRECTIONAL); | |
618 | } | |
619 | dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE); | |
620 | kfree(blout); | |
621 | } | |
622 | } | |
623 | ||
624 | static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |
625 | struct scatterlist *assoc, | |
626 | struct scatterlist *sgl, | |
627 | struct scatterlist *sglout, uint8_t *iv, | |
628 | uint8_t ivlen, | |
629 | struct qat_crypto_request *qat_req) | |
630 | { | |
631 | struct device *dev = &GET_DEV(inst->accel_dev); | |
632 | int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); | |
633 | struct qat_alg_buf_list *bufl; | |
634 | struct qat_alg_buf_list *buflout = NULL; | |
635 | dma_addr_t blp; | |
636 | dma_addr_t bloutp = 0; | |
637 | struct scatterlist *sg; | |
638 | size_t sz = sizeof(struct qat_alg_buf_list) + | |
639 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); | |
640 | ||
641 | if (unlikely(!n)) | |
642 | return -EINVAL; | |
643 | ||
644 | bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); | |
645 | if (unlikely(!bufl)) | |
646 | return -ENOMEM; | |
647 | ||
648 | blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); | |
649 | if (unlikely(dma_mapping_error(dev, blp))) | |
650 | goto err; | |
651 | ||
652 | for_each_sg(assoc, sg, assoc_n, i) { | |
653 | bufl->bufers[bufs].addr = dma_map_single(dev, | |
654 | sg_virt(sg), | |
655 | sg->length, | |
656 | DMA_BIDIRECTIONAL); | |
657 | bufl->bufers[bufs].len = sg->length; | |
658 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | |
659 | goto err; | |
660 | bufs++; | |
661 | } | |
662 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, | |
663 | DMA_BIDIRECTIONAL); | |
664 | bufl->bufers[bufs].len = ivlen; | |
665 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | |
666 | goto err; | |
667 | bufs++; | |
668 | ||
669 | for_each_sg(sgl, sg, n, i) { | |
670 | int y = i + bufs; | |
d65071ec | 671 | |
d370cec3 TS |
672 | bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), |
673 | sg->length, | |
674 | DMA_BIDIRECTIONAL); | |
675 | bufl->bufers[y].len = sg->length; | |
676 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) | |
677 | goto err; | |
678 | } | |
679 | bufl->num_bufs = n + bufs; | |
680 | qat_req->buf.bl = bufl; | |
681 | qat_req->buf.blp = blp; | |
682 | qat_req->buf.sz = sz; | |
683 | /* Handle out of place operation */ | |
684 | if (sgl != sglout) { | |
685 | struct qat_alg_buf *bufers; | |
686 | ||
687 | buflout = kmalloc_node(sz, GFP_ATOMIC, | |
688 | inst->accel_dev->numa_node); | |
689 | if (unlikely(!buflout)) | |
690 | goto err; | |
691 | bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); | |
692 | if (unlikely(dma_mapping_error(dev, bloutp))) | |
693 | goto err; | |
694 | bufers = buflout->bufers; | |
695 | /* For out of place operation dma map only data and | |
696 | * reuse assoc mapping and iv */ | |
697 | for (i = 0; i < bufs; i++) { | |
698 | bufers[i].len = bufl->bufers[i].len; | |
699 | bufers[i].addr = bufl->bufers[i].addr; | |
700 | } | |
701 | for_each_sg(sglout, sg, n, i) { | |
702 | int y = i + bufs; | |
d65071ec | 703 | |
d370cec3 TS |
704 | bufers[y].addr = dma_map_single(dev, sg_virt(sg), |
705 | sg->length, | |
706 | DMA_BIDIRECTIONAL); | |
707 | buflout->bufers[y].len = sg->length; | |
708 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) | |
709 | goto err; | |
710 | } | |
711 | buflout->num_bufs = n + bufs; | |
712 | buflout->num_mapped_bufs = n; | |
713 | qat_req->buf.blout = buflout; | |
714 | qat_req->buf.bloutp = bloutp; | |
715 | } else { | |
716 | /* Otherwise set the src and dst to the same address */ | |
717 | qat_req->buf.bloutp = qat_req->buf.blp; | |
718 | } | |
719 | return 0; | |
720 | err: | |
721 | dev_err(dev, "Failed to map buf for dma\n"); | |
722 | for_each_sg(sgl, sg, n + bufs, i) { | |
723 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) { | |
724 | dma_unmap_single(dev, bufl->bufers[i].addr, | |
725 | bufl->bufers[i].len, | |
726 | DMA_BIDIRECTIONAL); | |
727 | } | |
728 | } | |
729 | if (!dma_mapping_error(dev, blp)) | |
730 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | |
731 | kfree(bufl); | |
732 | if (sgl != sglout && buflout) { | |
733 | for_each_sg(sglout, sg, n, i) { | |
734 | int y = i + bufs; | |
d65071ec | 735 | |
d370cec3 TS |
736 | if (!dma_mapping_error(dev, buflout->bufers[y].addr)) |
737 | dma_unmap_single(dev, buflout->bufers[y].addr, | |
738 | buflout->bufers[y].len, | |
739 | DMA_BIDIRECTIONAL); | |
740 | } | |
741 | if (!dma_mapping_error(dev, bloutp)) | |
742 | dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE); | |
743 | kfree(buflout); | |
744 | } | |
745 | return -ENOMEM; | |
746 | } | |
747 | ||
748 | void qat_alg_callback(void *resp) | |
749 | { | |
750 | struct icp_qat_fw_la_resp *qat_resp = resp; | |
751 | struct qat_crypto_request *qat_req = | |
bce3cc61 | 752 | (void *)(__force long)qat_resp->opaque_data; |
d370cec3 TS |
753 | struct qat_alg_session_ctx *ctx = qat_req->ctx; |
754 | struct qat_crypto_instance *inst = ctx->inst; | |
755 | struct aead_request *areq = qat_req->areq; | |
756 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; | |
757 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); | |
758 | ||
759 | qat_alg_free_bufl(inst, qat_req); | |
760 | if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) | |
761 | res = -EBADMSG; | |
762 | areq->base.complete(&(areq->base), res); | |
763 | } | |
764 | ||
765 | static int qat_alg_dec(struct aead_request *areq) | |
766 | { | |
767 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | |
768 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | |
769 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
770 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | |
771 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | |
772 | struct icp_qat_fw_la_auth_req_params *auth_param; | |
773 | struct icp_qat_fw_la_bulk_req *msg; | |
774 | int digst_size = crypto_aead_crt(aead_tfm)->authsize; | |
775 | int ret, ctr = 0; | |
776 | ||
777 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | |
778 | areq->iv, AES_BLOCK_SIZE, qat_req); | |
779 | if (unlikely(ret)) | |
780 | return ret; | |
781 | ||
782 | msg = &qat_req->req; | |
783 | *msg = ctx->dec_fw_req_tmpl; | |
784 | qat_req->ctx = ctx; | |
785 | qat_req->areq = areq; | |
bce3cc61 | 786 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
d370cec3 TS |
787 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
788 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | |
789 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | |
790 | cipher_param->cipher_length = areq->cryptlen - digst_size; | |
791 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | |
792 | memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); | |
793 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | |
794 | auth_param->auth_off = 0; | |
795 | auth_param->auth_len = areq->assoclen + | |
796 | cipher_param->cipher_length + AES_BLOCK_SIZE; | |
797 | do { | |
798 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | |
799 | } while (ret == -EAGAIN && ctr++ < 10); | |
800 | ||
801 | if (ret == -EAGAIN) { | |
802 | qat_alg_free_bufl(ctx->inst, qat_req); | |
803 | return -EBUSY; | |
804 | } | |
805 | return -EINPROGRESS; | |
806 | } | |
807 | ||
808 | static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, | |
809 | int enc_iv) | |
810 | { | |
811 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | |
812 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | |
813 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
814 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | |
815 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | |
816 | struct icp_qat_fw_la_auth_req_params *auth_param; | |
817 | struct icp_qat_fw_la_bulk_req *msg; | |
818 | int ret, ctr = 0; | |
819 | ||
820 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | |
821 | iv, AES_BLOCK_SIZE, qat_req); | |
822 | if (unlikely(ret)) | |
823 | return ret; | |
824 | ||
825 | msg = &qat_req->req; | |
826 | *msg = ctx->enc_fw_req_tmpl; | |
827 | qat_req->ctx = ctx; | |
828 | qat_req->areq = areq; | |
bce3cc61 | 829 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
d370cec3 TS |
830 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
831 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | |
832 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | |
833 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | |
834 | ||
835 | if (enc_iv) { | |
836 | cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE; | |
837 | cipher_param->cipher_offset = areq->assoclen; | |
838 | } else { | |
839 | memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); | |
840 | cipher_param->cipher_length = areq->cryptlen; | |
841 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | |
842 | } | |
843 | auth_param->auth_off = 0; | |
844 | auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE; | |
845 | ||
846 | do { | |
847 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | |
848 | } while (ret == -EAGAIN && ctr++ < 10); | |
849 | ||
850 | if (ret == -EAGAIN) { | |
851 | qat_alg_free_bufl(ctx->inst, qat_req); | |
852 | return -EBUSY; | |
853 | } | |
854 | return -EINPROGRESS; | |
855 | } | |
856 | ||
857 | static int qat_alg_enc(struct aead_request *areq) | |
858 | { | |
859 | return qat_alg_enc_internal(areq, areq->iv, 0); | |
860 | } | |
861 | ||
862 | static int qat_alg_genivenc(struct aead_givcrypt_request *req) | |
863 | { | |
864 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); | |
865 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | |
866 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
867 | __be64 seq; | |
868 | ||
869 | memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); | |
870 | seq = cpu_to_be64(req->seq); | |
871 | memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), | |
872 | &seq, sizeof(uint64_t)); | |
873 | return qat_alg_enc_internal(&req->areq, req->giv, 1); | |
874 | } | |
875 | ||
876 | static int qat_alg_init(struct crypto_tfm *tfm, | |
877 | enum icp_qat_hw_auth_algo hash, const char *hash_name) | |
878 | { | |
879 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
880 | ||
881 | memset(ctx, '\0', sizeof(*ctx)); | |
882 | ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); | |
883 | if (IS_ERR(ctx->hash_tfm)) | |
884 | return -EFAULT; | |
885 | spin_lock_init(&ctx->lock); | |
886 | ctx->qat_hash_alg = hash; | |
887 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | |
888 | sizeof(struct qat_crypto_request); | |
889 | ctx->tfm = tfm; | |
890 | return 0; | |
891 | } | |
892 | ||
893 | static int qat_alg_sha1_init(struct crypto_tfm *tfm) | |
894 | { | |
895 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); | |
896 | } | |
897 | ||
898 | static int qat_alg_sha256_init(struct crypto_tfm *tfm) | |
899 | { | |
900 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); | |
901 | } | |
902 | ||
903 | static int qat_alg_sha512_init(struct crypto_tfm *tfm) | |
904 | { | |
905 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); | |
906 | } | |
907 | ||
908 | static void qat_alg_exit(struct crypto_tfm *tfm) | |
909 | { | |
910 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
911 | struct qat_crypto_instance *inst = ctx->inst; | |
912 | struct device *dev; | |
913 | ||
914 | if (!IS_ERR(ctx->hash_tfm)) | |
915 | crypto_free_shash(ctx->hash_tfm); | |
916 | ||
917 | if (!inst) | |
918 | return; | |
919 | ||
920 | dev = &GET_DEV(inst->accel_dev); | |
921 | if (ctx->enc_cd) | |
922 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | |
923 | ctx->enc_cd, ctx->enc_cd_paddr); | |
924 | if (ctx->dec_cd) | |
925 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | |
926 | ctx->dec_cd, ctx->dec_cd_paddr); | |
927 | if (ctx->auth_hw_state_enc) | |
928 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | |
929 | ctx->auth_hw_state_enc, | |
930 | ctx->auth_state_enc_paddr); | |
931 | ||
932 | if (ctx->auth_hw_state_dec) | |
933 | dma_free_coherent(dev, sizeof(struct qat_auth_state), | |
934 | ctx->auth_hw_state_dec, | |
935 | ctx->auth_state_dec_paddr); | |
936 | ||
937 | qat_crypto_put_instance(inst); | |
938 | } | |
939 | ||
940 | static struct crypto_alg qat_algs[] = { { | |
941 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | |
942 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", | |
943 | .cra_priority = 4001, | |
944 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | |
945 | .cra_blocksize = AES_BLOCK_SIZE, | |
946 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | |
947 | .cra_alignmask = 0, | |
948 | .cra_type = &crypto_aead_type, | |
949 | .cra_module = THIS_MODULE, | |
950 | .cra_init = qat_alg_sha1_init, | |
951 | .cra_exit = qat_alg_exit, | |
952 | .cra_u = { | |
953 | .aead = { | |
954 | .setkey = qat_alg_setkey, | |
955 | .decrypt = qat_alg_dec, | |
956 | .encrypt = qat_alg_enc, | |
957 | .givencrypt = qat_alg_genivenc, | |
958 | .ivsize = AES_BLOCK_SIZE, | |
959 | .maxauthsize = SHA1_DIGEST_SIZE, | |
960 | }, | |
961 | }, | |
962 | }, { | |
963 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | |
964 | .cra_driver_name = "qat_aes_cbc_hmac_sha256", | |
965 | .cra_priority = 4001, | |
966 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | |
967 | .cra_blocksize = AES_BLOCK_SIZE, | |
968 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | |
969 | .cra_alignmask = 0, | |
970 | .cra_type = &crypto_aead_type, | |
971 | .cra_module = THIS_MODULE, | |
972 | .cra_init = qat_alg_sha256_init, | |
973 | .cra_exit = qat_alg_exit, | |
974 | .cra_u = { | |
975 | .aead = { | |
976 | .setkey = qat_alg_setkey, | |
977 | .decrypt = qat_alg_dec, | |
978 | .encrypt = qat_alg_enc, | |
979 | .givencrypt = qat_alg_genivenc, | |
980 | .ivsize = AES_BLOCK_SIZE, | |
981 | .maxauthsize = SHA256_DIGEST_SIZE, | |
982 | }, | |
983 | }, | |
984 | }, { | |
985 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | |
986 | .cra_driver_name = "qat_aes_cbc_hmac_sha512", | |
987 | .cra_priority = 4001, | |
988 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | |
989 | .cra_blocksize = AES_BLOCK_SIZE, | |
990 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | |
991 | .cra_alignmask = 0, | |
992 | .cra_type = &crypto_aead_type, | |
993 | .cra_module = THIS_MODULE, | |
994 | .cra_init = qat_alg_sha512_init, | |
995 | .cra_exit = qat_alg_exit, | |
996 | .cra_u = { | |
997 | .aead = { | |
998 | .setkey = qat_alg_setkey, | |
999 | .decrypt = qat_alg_dec, | |
1000 | .encrypt = qat_alg_enc, | |
1001 | .givencrypt = qat_alg_genivenc, | |
1002 | .ivsize = AES_BLOCK_SIZE, | |
1003 | .maxauthsize = SHA512_DIGEST_SIZE, | |
1004 | }, | |
1005 | }, | |
1006 | } }; | |
1007 | ||
1008 | int qat_algs_register(void) | |
1009 | { | |
1010 | if (atomic_add_return(1, &active_dev) == 1) { | |
1011 | int i; | |
1012 | ||
1013 | for (i = 0; i < ARRAY_SIZE(qat_algs); i++) | |
1014 | qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD | | |
1015 | CRYPTO_ALG_ASYNC; | |
1016 | return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); | |
1017 | } | |
1018 | return 0; | |
1019 | } | |
1020 | ||
1021 | int qat_algs_unregister(void) | |
1022 | { | |
1023 | if (atomic_sub_return(1, &active_dev) == 0) | |
1024 | return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); | |
1025 | return 0; | |
1026 | } | |
1027 | ||
1028 | int qat_algs_init(void) | |
1029 | { | |
1030 | atomic_set(&active_dev, 0); | |
1031 | crypto_get_default_rng(); | |
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | void qat_algs_exit(void) | |
1036 | { | |
1037 | crypto_put_default_rng(); | |
1038 | } |