Merge remote-tracking branch 'omap_dss2/for-next'
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_algs.c
CommitLineData
d370cec3
TS
1/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
0ed6264b 50#include <crypto/internal/aead.h>
d370cec3
TS
51#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
d370cec3
TS
56#include <linux/dma-mapping.h>
57#include "adf_accel_devices.h"
58#include "adf_transport.h"
59#include "adf_common_drv.h"
60#include "qat_crypto.h"
61#include "icp_qat_hw.h"
62#include "icp_qat_fw.h"
63#include "icp_qat_fw_la.h"
64
def14bfa
TS
65#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
338e84f3
TS
67 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT)
d370cec3 69
def14bfa
TS
70#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
338e84f3
TS
72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT)
d370cec3 74
6f043b50
TS
75static DEFINE_MUTEX(algs_lock);
76static unsigned int active_devs;
d370cec3
TS
77
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91/* Common content descriptor */
92struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
338e84f3 105struct qat_alg_aead_ctx {
d370cec3 106 struct qat_alg_cd *enc_cd;
d370cec3 107 struct qat_alg_cd *dec_cd;
338e84f3 108 dma_addr_t enc_cd_paddr;
d370cec3 109 dma_addr_t dec_cd_paddr;
338e84f3
TS
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
d370cec3
TS
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
338e84f3 114 struct qat_crypto_instance *inst;
338e84f3
TS
115};
116
117struct qat_alg_ablkcipher_ctx {
118 struct icp_qat_hw_cipher_algo_blk *enc_cd;
119 struct icp_qat_hw_cipher_algo_blk *dec_cd;
120 dma_addr_t enc_cd_paddr;
121 dma_addr_t dec_cd_paddr;
122 struct icp_qat_fw_la_bulk_req enc_fw_req;
123 struct icp_qat_fw_la_bulk_req dec_fw_req;
124 struct qat_crypto_instance *inst;
125 struct crypto_tfm *tfm;
126 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
d370cec3
TS
127};
128
d370cec3
TS
129static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
130{
131 switch (qat_hash_alg) {
132 case ICP_QAT_HW_AUTH_ALGO_SHA1:
133 return ICP_QAT_HW_SHA1_STATE1_SZ;
134 case ICP_QAT_HW_AUTH_ALGO_SHA256:
135 return ICP_QAT_HW_SHA256_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA512:
137 return ICP_QAT_HW_SHA512_STATE1_SZ;
138 default:
139 return -EFAULT;
140 };
141 return -EFAULT;
142}
143
144static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
338e84f3 145 struct qat_alg_aead_ctx *ctx,
d370cec3 146 const uint8_t *auth_key,
26c3af6c 147 unsigned int auth_keylen)
d370cec3 148{
37e52654 149 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
d370cec3
TS
150 struct sha1_state sha1;
151 struct sha256_state sha256;
152 struct sha512_state sha512;
153 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
154 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
48eb3691
HX
155 char ipad[block_size];
156 char opad[block_size];
d370cec3
TS
157 __be32 *hash_state_out;
158 __be64 *hash512_state_out;
159 int i, offset;
160
48eb3691
HX
161 memset(ipad, 0, block_size);
162 memset(opad, 0, block_size);
37e52654
BW
163 shash->tfm = ctx->hash_tfm;
164 shash->flags = 0x0;
d370cec3
TS
165
166 if (auth_keylen > block_size) {
37e52654 167 int ret = crypto_shash_digest(shash, auth_key,
48eb3691 168 auth_keylen, ipad);
d370cec3
TS
169 if (ret)
170 return ret;
171
48eb3691 172 memcpy(opad, ipad, digest_size);
d370cec3
TS
173 } else {
174 memcpy(ipad, auth_key, auth_keylen);
175 memcpy(opad, auth_key, auth_keylen);
d370cec3
TS
176 }
177
178 for (i = 0; i < block_size; i++) {
179 char *ipad_ptr = ipad + i;
180 char *opad_ptr = opad + i;
181 *ipad_ptr ^= 0x36;
182 *opad_ptr ^= 0x5C;
183 }
184
37e52654 185 if (crypto_shash_init(shash))
d370cec3
TS
186 return -EFAULT;
187
37e52654 188 if (crypto_shash_update(shash, ipad, block_size))
d370cec3
TS
189 return -EFAULT;
190
191 hash_state_out = (__be32 *)hash->sha.state1;
192 hash512_state_out = (__be64 *)hash_state_out;
193
194 switch (ctx->qat_hash_alg) {
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
37e52654 196 if (crypto_shash_export(shash, &sha1))
d370cec3
TS
197 return -EFAULT;
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
199 *hash_state_out = cpu_to_be32(*(sha1.state + i));
200 break;
201 case ICP_QAT_HW_AUTH_ALGO_SHA256:
37e52654 202 if (crypto_shash_export(shash, &sha256))
d370cec3
TS
203 return -EFAULT;
204 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
205 *hash_state_out = cpu_to_be32(*(sha256.state + i));
206 break;
207 case ICP_QAT_HW_AUTH_ALGO_SHA512:
37e52654 208 if (crypto_shash_export(shash, &sha512))
d370cec3
TS
209 return -EFAULT;
210 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
211 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
212 break;
213 default:
214 return -EFAULT;
215 }
216
37e52654 217 if (crypto_shash_init(shash))
d370cec3
TS
218 return -EFAULT;
219
37e52654 220 if (crypto_shash_update(shash, opad, block_size))
d370cec3
TS
221 return -EFAULT;
222
223 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
224 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
225 hash512_state_out = (__be64 *)hash_state_out;
226
227 switch (ctx->qat_hash_alg) {
228 case ICP_QAT_HW_AUTH_ALGO_SHA1:
37e52654 229 if (crypto_shash_export(shash, &sha1))
d370cec3
TS
230 return -EFAULT;
231 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
232 *hash_state_out = cpu_to_be32(*(sha1.state + i));
233 break;
234 case ICP_QAT_HW_AUTH_ALGO_SHA256:
37e52654 235 if (crypto_shash_export(shash, &sha256))
d370cec3
TS
236 return -EFAULT;
237 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
238 *hash_state_out = cpu_to_be32(*(sha256.state + i));
239 break;
240 case ICP_QAT_HW_AUTH_ALGO_SHA512:
37e52654 241 if (crypto_shash_export(shash, &sha512))
d370cec3
TS
242 return -EFAULT;
243 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
244 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
245 break;
246 default:
247 return -EFAULT;
248 }
aa408d60
ST
249 memzero_explicit(ipad, block_size);
250 memzero_explicit(opad, block_size);
d370cec3
TS
251 return 0;
252}
253
254static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
255{
256 header->hdr_flags =
257 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
258 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
259 header->comn_req_flags =
260 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
261 QAT_COMN_PTR_TYPE_SGL);
d370cec3
TS
262 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
263 ICP_QAT_FW_LA_PARTIAL_NONE);
264 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
265 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
266 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
267 ICP_QAT_FW_LA_NO_PROTO);
268 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_NO_UPDATE_STATE);
270}
271
e19ab121 272static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
338e84f3 273 int alg,
def14bfa
TS
274 struct crypto_authenc_keys *keys,
275 int mode)
d370cec3 276{
e19ab121 277 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
0a139416 278 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
d370cec3
TS
279 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
280 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
281 struct icp_qat_hw_auth_algo_blk *hash =
282 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
283 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
338e84f3 284 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
d370cec3
TS
285 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
286 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
287 void *ptr = &req_tmpl->cd_ctrl;
288 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
289 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
d370cec3
TS
290
291 /* CD setup */
def14bfa 292 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
d370cec3
TS
293 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
294 hash->sha.inner_setup.auth_config.config =
295 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
296 ctx->qat_hash_alg, digestsize);
297 hash->sha.inner_setup.auth_counter.counter =
298 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
299
26c3af6c 300 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
d370cec3
TS
301 return -EFAULT;
302
303 /* Request setup */
304 qat_alg_init_common_hdr(header);
305 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
338e84f3
TS
306 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
307 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
d370cec3
TS
308 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
309 ICP_QAT_FW_LA_RET_AUTH_RES);
310 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
311 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
312 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
313 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
314
315 /* Cipher CD config setup */
316 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
317 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
318 cipher_cd_ctrl->cipher_cfg_offset = 0;
319 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
320 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
321 /* Auth CD config setup */
322 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
323 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
324 hash_cd_ctrl->inner_res_sz = digestsize;
325 hash_cd_ctrl->final_sz = digestsize;
326
327 switch (ctx->qat_hash_alg) {
328 case ICP_QAT_HW_AUTH_ALGO_SHA1:
329 hash_cd_ctrl->inner_state1_sz =
330 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
331 hash_cd_ctrl->inner_state2_sz =
332 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
333 break;
334 case ICP_QAT_HW_AUTH_ALGO_SHA256:
335 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
336 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
337 break;
338 case ICP_QAT_HW_AUTH_ALGO_SHA512:
339 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
340 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
341 break;
342 default:
343 break;
344 }
345 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
346 ((sizeof(struct icp_qat_hw_auth_setup) +
347 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
d370cec3
TS
348 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
349 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
350 return 0;
351}
352
e19ab121 353static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
338e84f3 354 int alg,
def14bfa
TS
355 struct crypto_authenc_keys *keys,
356 int mode)
d370cec3 357{
e19ab121 358 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
0a139416 359 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
d370cec3
TS
360 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
361 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
362 struct icp_qat_hw_cipher_algo_blk *cipher =
363 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
364 sizeof(struct icp_qat_hw_auth_setup) +
365 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
338e84f3 366 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
d370cec3
TS
367 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
368 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
369 void *ptr = &req_tmpl->cd_ctrl;
370 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
371 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
372 struct icp_qat_fw_la_auth_req_params *auth_param =
373 (struct icp_qat_fw_la_auth_req_params *)
374 ((char *)&req_tmpl->serv_specif_rqpars +
375 sizeof(struct icp_qat_fw_la_cipher_req_params));
376
377 /* CD setup */
def14bfa 378 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
d370cec3
TS
379 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
380 hash->sha.inner_setup.auth_config.config =
381 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
382 ctx->qat_hash_alg,
383 digestsize);
384 hash->sha.inner_setup.auth_counter.counter =
385 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
386
26c3af6c 387 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
d370cec3
TS
388 return -EFAULT;
389
390 /* Request setup */
391 qat_alg_init_common_hdr(header);
392 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
338e84f3
TS
393 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
d370cec3
TS
395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_CMP_AUTH_RES);
399 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
400 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
401
402 /* Cipher CD config setup */
403 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
404 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
405 cipher_cd_ctrl->cipher_cfg_offset =
406 (sizeof(struct icp_qat_hw_auth_setup) +
407 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
408 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
410
411 /* Auth CD config setup */
412 hash_cd_ctrl->hash_cfg_offset = 0;
413 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
414 hash_cd_ctrl->inner_res_sz = digestsize;
415 hash_cd_ctrl->final_sz = digestsize;
416
417 switch (ctx->qat_hash_alg) {
418 case ICP_QAT_HW_AUTH_ALGO_SHA1:
419 hash_cd_ctrl->inner_state1_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
421 hash_cd_ctrl->inner_state2_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
423 break;
424 case ICP_QAT_HW_AUTH_ALGO_SHA256:
425 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
426 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
427 break;
428 case ICP_QAT_HW_AUTH_ALGO_SHA512:
429 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
430 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
431 break;
432 default:
433 break;
434 }
435
436 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
437 ((sizeof(struct icp_qat_hw_auth_setup) +
438 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
d370cec3
TS
439 auth_param->auth_res_sz = digestsize;
440 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
441 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
442 return 0;
443}
444
338e84f3
TS
445static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
446 struct icp_qat_fw_la_bulk_req *req,
447 struct icp_qat_hw_cipher_algo_blk *cd,
448 const uint8_t *key, unsigned int keylen)
d370cec3 449{
338e84f3
TS
450 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
451 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
452 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
d370cec3 453
338e84f3
TS
454 memcpy(cd->aes.key, key, keylen);
455 qat_alg_init_common_hdr(header);
456 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
457 cd_pars->u.s.content_desc_params_sz =
458 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
459 /* Cipher CD config setup */
460 cd_ctrl->cipher_key_sz = keylen >> 3;
461 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
462 cd_ctrl->cipher_cfg_offset = 0;
463 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
464 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
465}
d370cec3 466
338e84f3
TS
467static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
468 int alg, const uint8_t *key,
def14bfa 469 unsigned int keylen, int mode)
338e84f3
TS
470{
471 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
472 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
473 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
d370cec3 474
338e84f3
TS
475 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
476 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
def14bfa 477 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
338e84f3
TS
478}
479
480static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
481 int alg, const uint8_t *key,
def14bfa 482 unsigned int keylen, int mode)
338e84f3
TS
483{
484 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
485 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
486 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
487
488 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
489 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
def14bfa
TS
490
491 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
492 dec_cd->aes.cipher_config.val =
493 QAT_AES_HW_CONFIG_DEC(alg, mode);
494 else
495 dec_cd->aes.cipher_config.val =
496 QAT_AES_HW_CONFIG_ENC(alg, mode);
338e84f3
TS
497}
498
def14bfa 499static int qat_alg_validate_key(int key_len, int *alg, int mode)
338e84f3 500{
def14bfa
TS
501 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
502 switch (key_len) {
503 case AES_KEYSIZE_128:
504 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
505 break;
506 case AES_KEYSIZE_192:
507 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
508 break;
509 case AES_KEYSIZE_256:
510 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
511 break;
512 default:
513 return -EINVAL;
514 }
515 } else {
516 switch (key_len) {
517 case AES_KEYSIZE_128 << 1:
518 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
519 break;
520 case AES_KEYSIZE_256 << 1:
521 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
522 break;
523 default:
524 return -EINVAL;
525 }
d370cec3 526 }
338e84f3
TS
527 return 0;
528}
d370cec3 529
def14bfa
TS
530static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
531 unsigned int keylen, int mode)
338e84f3
TS
532{
533 struct crypto_authenc_keys keys;
534 int alg;
535
338e84f3
TS
536 if (crypto_authenc_extractkeys(&keys, key, keylen))
537 goto bad_key;
538
def14bfa 539 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
338e84f3
TS
540 goto bad_key;
541
def14bfa 542 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
d370cec3
TS
543 goto error;
544
def14bfa 545 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
d370cec3
TS
546 goto error;
547
548 return 0;
549bad_key:
e19ab121 550 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
d370cec3
TS
551 return -EINVAL;
552error:
553 return -EFAULT;
554}
555
338e84f3
TS
556static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
557 const uint8_t *key,
def14bfa
TS
558 unsigned int keylen,
559 int mode)
d370cec3 560{
338e84f3
TS
561 int alg;
562
def14bfa 563 if (qat_alg_validate_key(keylen, &alg, mode))
338e84f3
TS
564 goto bad_key;
565
def14bfa
TS
566 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
567 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
338e84f3
TS
568 return 0;
569bad_key:
570 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
571 return -EINVAL;
572}
573
574static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
575 unsigned int keylen)
576{
577 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
d370cec3
TS
578 struct device *dev;
579
d370cec3
TS
580 if (ctx->enc_cd) {
581 /* rekeying */
582 dev = &GET_DEV(ctx->inst->accel_dev);
ad511e26
HX
583 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
584 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
585 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
586 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
d370cec3
TS
587 } else {
588 /* new key */
589 int node = get_current_node();
590 struct qat_crypto_instance *inst =
591 qat_crypto_get_instance_node(node);
592 if (!inst) {
d370cec3
TS
593 return -EINVAL;
594 }
595
596 dev = &GET_DEV(inst->accel_dev);
597 ctx->inst = inst;
338e84f3 598 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
d370cec3
TS
599 &ctx->enc_cd_paddr,
600 GFP_ATOMIC);
601 if (!ctx->enc_cd) {
d370cec3
TS
602 return -ENOMEM;
603 }
338e84f3 604 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
d370cec3
TS
605 &ctx->dec_cd_paddr,
606 GFP_ATOMIC);
607 if (!ctx->dec_cd) {
d370cec3
TS
608 goto out_free_enc;
609 }
d370cec3 610 }
def14bfa
TS
611 if (qat_alg_aead_init_sessions(tfm, key, keylen,
612 ICP_QAT_HW_CIPHER_CBC_MODE))
d370cec3
TS
613 goto out_free_all;
614
615 return 0;
616
617out_free_all:
ad511e26 618 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
619 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
620 ctx->dec_cd, ctx->dec_cd_paddr);
621 ctx->dec_cd = NULL;
622out_free_enc:
ad511e26 623 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
624 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
625 ctx->enc_cd, ctx->enc_cd_paddr);
626 ctx->enc_cd = NULL;
627 return -ENOMEM;
628}
629
630static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
631 struct qat_crypto_request *qat_req)
632{
633 struct device *dev = &GET_DEV(inst->accel_dev);
634 struct qat_alg_buf_list *bl = qat_req->buf.bl;
635 struct qat_alg_buf_list *blout = qat_req->buf.blout;
636 dma_addr_t blp = qat_req->buf.blp;
637 dma_addr_t blpout = qat_req->buf.bloutp;
638 size_t sz = qat_req->buf.sz;
82f82504
TS
639 size_t sz_out = qat_req->buf.sz_out;
640 int i;
d370cec3
TS
641
642 for (i = 0; i < bl->num_bufs; i++)
643 dma_unmap_single(dev, bl->bufers[i].addr,
644 bl->bufers[i].len, DMA_BIDIRECTIONAL);
645
646 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
647 kfree(bl);
648 if (blp != blpout) {
649 /* If out of place operation dma unmap only data */
82f82504 650 int bufless = blout->num_bufs - blout->num_mapped_bufs;
d65071ec 651
82f82504 652 for (i = bufless; i < blout->num_bufs; i++) {
d370cec3
TS
653 dma_unmap_single(dev, blout->bufers[i].addr,
654 blout->bufers[i].len,
655 DMA_BIDIRECTIONAL);
656 }
82f82504 657 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
d370cec3
TS
658 kfree(blout);
659 }
660}
661
662static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
d370cec3 663 struct scatterlist *sgl,
e19ab121 664 struct scatterlist *sglout,
d370cec3
TS
665 struct qat_crypto_request *qat_req)
666{
667 struct device *dev = &GET_DEV(inst->accel_dev);
e19ab121
HX
668 int i, sg_nctr = 0;
669 int n = sg_nents(sgl);
d370cec3
TS
670 struct qat_alg_buf_list *bufl;
671 struct qat_alg_buf_list *buflout = NULL;
672 dma_addr_t blp;
673 dma_addr_t bloutp = 0;
674 struct scatterlist *sg;
82f82504 675 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
e19ab121 676 ((1 + n) * sizeof(struct qat_alg_buf));
d370cec3
TS
677
678 if (unlikely(!n))
679 return -EINVAL;
680
82f82504 681 bufl = kzalloc_node(sz, GFP_ATOMIC,
09adc878 682 dev_to_node(&GET_DEV(inst->accel_dev)));
d370cec3
TS
683 if (unlikely(!bufl))
684 return -ENOMEM;
685
686 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
687 if (unlikely(dma_mapping_error(dev, blp)))
688 goto err;
689
d370cec3 690 for_each_sg(sgl, sg, n, i) {
e19ab121 691 int y = sg_nctr;
82f82504
TS
692
693 if (!sg->length)
694 continue;
d65071ec 695
d370cec3
TS
696 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
697 sg->length,
698 DMA_BIDIRECTIONAL);
699 bufl->bufers[y].len = sg->length;
700 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
701 goto err;
82f82504 702 sg_nctr++;
d370cec3 703 }
e19ab121 704 bufl->num_bufs = sg_nctr;
d370cec3
TS
705 qat_req->buf.bl = bufl;
706 qat_req->buf.blp = blp;
707 qat_req->buf.sz = sz;
708 /* Handle out of place operation */
709 if (sgl != sglout) {
710 struct qat_alg_buf *bufers;
711
82f82504
TS
712 n = sg_nents(sglout);
713 sz_out = sizeof(struct qat_alg_buf_list) +
e19ab121 714 ((1 + n) * sizeof(struct qat_alg_buf));
82f82504
TS
715 sg_nctr = 0;
716 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
09adc878 717 dev_to_node(&GET_DEV(inst->accel_dev)));
d370cec3
TS
718 if (unlikely(!buflout))
719 goto err;
82f82504 720 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
d370cec3
TS
721 if (unlikely(dma_mapping_error(dev, bloutp)))
722 goto err;
723 bufers = buflout->bufers;
d370cec3 724 for_each_sg(sglout, sg, n, i) {
e19ab121 725 int y = sg_nctr;
82f82504
TS
726
727 if (!sg->length)
728 continue;
d65071ec 729
d370cec3
TS
730 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
731 sg->length,
732 DMA_BIDIRECTIONAL);
d370cec3
TS
733 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
734 goto err;
82f82504
TS
735 bufers[y].len = sg->length;
736 sg_nctr++;
d370cec3 737 }
e19ab121 738 buflout->num_bufs = sg_nctr;
82f82504 739 buflout->num_mapped_bufs = sg_nctr;
d370cec3
TS
740 qat_req->buf.blout = buflout;
741 qat_req->buf.bloutp = bloutp;
82f82504 742 qat_req->buf.sz_out = sz_out;
d370cec3
TS
743 } else {
744 /* Otherwise set the src and dst to the same address */
745 qat_req->buf.bloutp = qat_req->buf.blp;
82f82504 746 qat_req->buf.sz_out = 0;
d370cec3
TS
747 }
748 return 0;
749err:
750 dev_err(dev, "Failed to map buf for dma\n");
82f82504 751 sg_nctr = 0;
e19ab121 752 for (i = 0; i < n; i++)
82f82504 753 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
d370cec3
TS
754 dma_unmap_single(dev, bufl->bufers[i].addr,
755 bufl->bufers[i].len,
756 DMA_BIDIRECTIONAL);
82f82504 757
d370cec3
TS
758 if (!dma_mapping_error(dev, blp))
759 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
760 kfree(bufl);
761 if (sgl != sglout && buflout) {
82f82504 762 n = sg_nents(sglout);
e19ab121 763 for (i = 0; i < n; i++)
82f82504
TS
764 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
765 dma_unmap_single(dev, buflout->bufers[i].addr,
766 buflout->bufers[i].len,
d370cec3 767 DMA_BIDIRECTIONAL);
d370cec3 768 if (!dma_mapping_error(dev, bloutp))
82f82504 769 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
d370cec3
TS
770 kfree(buflout);
771 }
772 return -ENOMEM;
773}
774
338e84f3
TS
775static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
776 struct qat_crypto_request *qat_req)
d370cec3 777{
338e84f3 778 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
d370cec3 779 struct qat_crypto_instance *inst = ctx->inst;
338e84f3 780 struct aead_request *areq = qat_req->aead_req;
d370cec3
TS
781 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
782 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
783
784 qat_alg_free_bufl(inst, qat_req);
785 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
786 res = -EBADMSG;
45cff260 787 areq->base.complete(&areq->base, res);
d370cec3
TS
788}
789
338e84f3
TS
790static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
791 struct qat_crypto_request *qat_req)
792{
793 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
794 struct qat_crypto_instance *inst = ctx->inst;
795 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
796 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
797 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
798
799 qat_alg_free_bufl(inst, qat_req);
800 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
801 res = -EINVAL;
802 areq->base.complete(&areq->base, res);
803}
804
805void qat_alg_callback(void *resp)
806{
807 struct icp_qat_fw_la_resp *qat_resp = resp;
808 struct qat_crypto_request *qat_req =
809 (void *)(__force long)qat_resp->opaque_data;
810
811 qat_req->cb(qat_resp, qat_req);
812}
813
814static int qat_alg_aead_dec(struct aead_request *areq)
d370cec3
TS
815{
816 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
817 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
338e84f3 818 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3
TS
819 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
820 struct icp_qat_fw_la_cipher_req_params *cipher_param;
821 struct icp_qat_fw_la_auth_req_params *auth_param;
822 struct icp_qat_fw_la_bulk_req *msg;
0a139416 823 int digst_size = crypto_aead_authsize(aead_tfm);
d370cec3
TS
824 int ret, ctr = 0;
825
e19ab121 826 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
d370cec3
TS
827 if (unlikely(ret))
828 return ret;
829
830 msg = &qat_req->req;
338e84f3
TS
831 *msg = ctx->dec_fw_req;
832 qat_req->aead_ctx = ctx;
833 qat_req->aead_req = areq;
834 qat_req->cb = qat_aead_alg_callback;
bce3cc61 835 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
d370cec3
TS
836 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
837 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
838 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
839 cipher_param->cipher_length = areq->cryptlen - digst_size;
e19ab121 840 cipher_param->cipher_offset = areq->assoclen;
d370cec3
TS
841 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
842 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
843 auth_param->auth_off = 0;
e19ab121 844 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
d370cec3
TS
845 do {
846 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
847 } while (ret == -EAGAIN && ctr++ < 10);
848
849 if (ret == -EAGAIN) {
850 qat_alg_free_bufl(ctx->inst, qat_req);
851 return -EBUSY;
852 }
853 return -EINPROGRESS;
854}
855
e19ab121 856static int qat_alg_aead_enc(struct aead_request *areq)
d370cec3
TS
857{
858 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
859 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
338e84f3 860 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3
TS
861 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
862 struct icp_qat_fw_la_cipher_req_params *cipher_param;
863 struct icp_qat_fw_la_auth_req_params *auth_param;
864 struct icp_qat_fw_la_bulk_req *msg;
e19ab121 865 uint8_t *iv = areq->iv;
d370cec3
TS
866 int ret, ctr = 0;
867
e19ab121 868 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
d370cec3
TS
869 if (unlikely(ret))
870 return ret;
871
872 msg = &qat_req->req;
338e84f3
TS
873 *msg = ctx->enc_fw_req;
874 qat_req->aead_ctx = ctx;
875 qat_req->aead_req = areq;
876 qat_req->cb = qat_aead_alg_callback;
bce3cc61 877 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
d370cec3
TS
878 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
879 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
880 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
881 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
882
e19ab121
HX
883 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
884 cipher_param->cipher_length = areq->cryptlen;
885 cipher_param->cipher_offset = areq->assoclen;
886
d370cec3 887 auth_param->auth_off = 0;
e19ab121 888 auth_param->auth_len = areq->assoclen + areq->cryptlen;
d370cec3
TS
889
890 do {
891 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
892 } while (ret == -EAGAIN && ctr++ < 10);
893
894 if (ret == -EAGAIN) {
895 qat_alg_free_bufl(ctx->inst, qat_req);
896 return -EBUSY;
897 }
898 return -EINPROGRESS;
899}
900
338e84f3 901static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
def14bfa
TS
902 const u8 *key, unsigned int keylen,
903 int mode)
d370cec3 904{
338e84f3
TS
905 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
906 struct device *dev;
907
908 spin_lock(&ctx->lock);
909 if (ctx->enc_cd) {
910 /* rekeying */
911 dev = &GET_DEV(ctx->inst->accel_dev);
ad511e26
HX
912 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
913 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
914 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
915 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
338e84f3
TS
916 } else {
917 /* new key */
918 int node = get_current_node();
919 struct qat_crypto_instance *inst =
920 qat_crypto_get_instance_node(node);
921 if (!inst) {
922 spin_unlock(&ctx->lock);
923 return -EINVAL;
924 }
925
926 dev = &GET_DEV(inst->accel_dev);
927 ctx->inst = inst;
928 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
929 &ctx->enc_cd_paddr,
930 GFP_ATOMIC);
931 if (!ctx->enc_cd) {
932 spin_unlock(&ctx->lock);
933 return -ENOMEM;
934 }
935 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
936 &ctx->dec_cd_paddr,
937 GFP_ATOMIC);
938 if (!ctx->dec_cd) {
939 spin_unlock(&ctx->lock);
940 goto out_free_enc;
941 }
942 }
943 spin_unlock(&ctx->lock);
def14bfa 944 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
338e84f3
TS
945 goto out_free_all;
946
947 return 0;
948
949out_free_all:
21a3d3b2
TS
950 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
951 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
338e84f3
TS
952 ctx->dec_cd, ctx->dec_cd_paddr);
953 ctx->dec_cd = NULL;
954out_free_enc:
21a3d3b2
TS
955 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
956 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
338e84f3
TS
957 ctx->enc_cd, ctx->enc_cd_paddr);
958 ctx->enc_cd = NULL;
959 return -ENOMEM;
960}
961
def14bfa
TS
962static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
963 const u8 *key, unsigned int keylen)
964{
965 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
966 ICP_QAT_HW_CIPHER_CBC_MODE);
967}
968
969static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
970 const u8 *key, unsigned int keylen)
971{
972 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
973 ICP_QAT_HW_CIPHER_CTR_MODE);
974}
975
976static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
977 const u8 *key, unsigned int keylen)
978{
979 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
980 ICP_QAT_HW_CIPHER_XTS_MODE);
981}
982
338e84f3
TS
983static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
984{
985 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
986 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
987 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
988 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
989 struct icp_qat_fw_la_cipher_req_params *cipher_param;
990 struct icp_qat_fw_la_bulk_req *msg;
991 int ret, ctr = 0;
992
e19ab121 993 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
338e84f3
TS
994 if (unlikely(ret))
995 return ret;
996
997 msg = &qat_req->req;
998 *msg = ctx->enc_fw_req;
999 qat_req->ablkcipher_ctx = ctx;
1000 qat_req->ablkcipher_req = req;
1001 qat_req->cb = qat_ablkcipher_alg_callback;
1002 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1003 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1004 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1005 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1006 cipher_param->cipher_length = req->nbytes;
1007 cipher_param->cipher_offset = 0;
1008 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1009 do {
1010 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1011 } while (ret == -EAGAIN && ctr++ < 10);
1012
1013 if (ret == -EAGAIN) {
1014 qat_alg_free_bufl(ctx->inst, qat_req);
1015 return -EBUSY;
1016 }
1017 return -EINPROGRESS;
1018}
1019
1020static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1021{
1022 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1023 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1024 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1025 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1026 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1027 struct icp_qat_fw_la_bulk_req *msg;
1028 int ret, ctr = 0;
1029
e19ab121 1030 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
338e84f3
TS
1031 if (unlikely(ret))
1032 return ret;
1033
1034 msg = &qat_req->req;
1035 *msg = ctx->dec_fw_req;
1036 qat_req->ablkcipher_ctx = ctx;
1037 qat_req->ablkcipher_req = req;
1038 qat_req->cb = qat_ablkcipher_alg_callback;
1039 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1040 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1041 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1042 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1043 cipher_param->cipher_length = req->nbytes;
1044 cipher_param->cipher_offset = 0;
1045 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1046 do {
1047 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1048 } while (ret == -EAGAIN && ctr++ < 10);
1049
1050 if (ret == -EAGAIN) {
1051 qat_alg_free_bufl(ctx->inst, qat_req);
1052 return -EBUSY;
1053 }
1054 return -EINPROGRESS;
1055}
1056
e19ab121 1057static int qat_alg_aead_init(struct crypto_aead *tfm,
338e84f3
TS
1058 enum icp_qat_hw_auth_algo hash,
1059 const char *hash_name)
1060{
e19ab121 1061 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
d370cec3 1062
d370cec3
TS
1063 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1064 if (IS_ERR(ctx->hash_tfm))
e19ab121 1065 return PTR_ERR(ctx->hash_tfm);
d370cec3 1066 ctx->qat_hash_alg = hash;
7768fb2e 1067 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
d370cec3
TS
1068 return 0;
1069}
1070
e19ab121 1071static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
d370cec3 1072{
338e84f3 1073 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
d370cec3
TS
1074}
1075
e19ab121 1076static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
d370cec3 1077{
338e84f3 1078 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
d370cec3
TS
1079}
1080
e19ab121 1081static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
d370cec3 1082{
338e84f3 1083 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
d370cec3
TS
1084}
1085
e19ab121 1086static void qat_alg_aead_exit(struct crypto_aead *tfm)
d370cec3 1087{
e19ab121 1088 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
d370cec3
TS
1089 struct qat_crypto_instance *inst = ctx->inst;
1090 struct device *dev;
1091
e19ab121 1092 crypto_free_shash(ctx->hash_tfm);
d370cec3
TS
1093
1094 if (!inst)
1095 return;
1096
1097 dev = &GET_DEV(inst->accel_dev);
aa408d60 1098 if (ctx->enc_cd) {
ad511e26 1099 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
1100 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1101 ctx->enc_cd, ctx->enc_cd_paddr);
aa408d60
ST
1102 }
1103 if (ctx->dec_cd) {
ad511e26 1104 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
1105 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1106 ctx->dec_cd, ctx->dec_cd_paddr);
aa408d60 1107 }
d370cec3
TS
1108 qat_crypto_put_instance(inst);
1109}
1110
338e84f3
TS
1111static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1112{
1113 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1114
338e84f3 1115 spin_lock_init(&ctx->lock);
7768fb2e 1116 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
338e84f3
TS
1117 ctx->tfm = tfm;
1118 return 0;
1119}
1120
1121static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1122{
1123 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1124 struct qat_crypto_instance *inst = ctx->inst;
1125 struct device *dev;
1126
1127 if (!inst)
1128 return;
1129
1130 dev = &GET_DEV(inst->accel_dev);
1131 if (ctx->enc_cd) {
ad511e26
HX
1132 memset(ctx->enc_cd, 0,
1133 sizeof(struct icp_qat_hw_cipher_algo_blk));
338e84f3
TS
1134 dma_free_coherent(dev,
1135 sizeof(struct icp_qat_hw_cipher_algo_blk),
1136 ctx->enc_cd, ctx->enc_cd_paddr);
1137 }
1138 if (ctx->dec_cd) {
ad511e26
HX
1139 memset(ctx->dec_cd, 0,
1140 sizeof(struct icp_qat_hw_cipher_algo_blk));
338e84f3
TS
1141 dma_free_coherent(dev,
1142 sizeof(struct icp_qat_hw_cipher_algo_blk),
1143 ctx->dec_cd, ctx->dec_cd_paddr);
1144 }
1145 qat_crypto_put_instance(inst);
1146}
1147
e19ab121
HX
1148
1149static struct aead_alg qat_aeads[] = { {
1150 .base = {
1151 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1152 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1153 .cra_priority = 4001,
5e4b8c1f 1154 .cra_flags = CRYPTO_ALG_ASYNC,
e19ab121
HX
1155 .cra_blocksize = AES_BLOCK_SIZE,
1156 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1157 .cra_module = THIS_MODULE,
d370cec3 1158 },
e19ab121
HX
1159 .init = qat_alg_aead_sha1_init,
1160 .exit = qat_alg_aead_exit,
1161 .setkey = qat_alg_aead_setkey,
1162 .decrypt = qat_alg_aead_dec,
1163 .encrypt = qat_alg_aead_enc,
1164 .ivsize = AES_BLOCK_SIZE,
1165 .maxauthsize = SHA1_DIGEST_SIZE,
d370cec3 1166}, {
e19ab121
HX
1167 .base = {
1168 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1169 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1170 .cra_priority = 4001,
5e4b8c1f 1171 .cra_flags = CRYPTO_ALG_ASYNC,
e19ab121
HX
1172 .cra_blocksize = AES_BLOCK_SIZE,
1173 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1174 .cra_module = THIS_MODULE,
d370cec3 1175 },
e19ab121
HX
1176 .init = qat_alg_aead_sha256_init,
1177 .exit = qat_alg_aead_exit,
1178 .setkey = qat_alg_aead_setkey,
1179 .decrypt = qat_alg_aead_dec,
1180 .encrypt = qat_alg_aead_enc,
1181 .ivsize = AES_BLOCK_SIZE,
1182 .maxauthsize = SHA256_DIGEST_SIZE,
d370cec3 1183}, {
e19ab121
HX
1184 .base = {
1185 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1186 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1187 .cra_priority = 4001,
5e4b8c1f 1188 .cra_flags = CRYPTO_ALG_ASYNC,
e19ab121
HX
1189 .cra_blocksize = AES_BLOCK_SIZE,
1190 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1191 .cra_module = THIS_MODULE,
d370cec3 1192 },
e19ab121
HX
1193 .init = qat_alg_aead_sha512_init,
1194 .exit = qat_alg_aead_exit,
1195 .setkey = qat_alg_aead_setkey,
1196 .decrypt = qat_alg_aead_dec,
1197 .encrypt = qat_alg_aead_enc,
1198 .ivsize = AES_BLOCK_SIZE,
1199 .maxauthsize = SHA512_DIGEST_SIZE,
1200} };
1201
1202static struct crypto_alg qat_algs[] = { {
338e84f3
TS
1203 .cra_name = "cbc(aes)",
1204 .cra_driver_name = "qat_aes_cbc",
1205 .cra_priority = 4001,
1206 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1207 .cra_blocksize = AES_BLOCK_SIZE,
1208 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1209 .cra_alignmask = 0,
1210 .cra_type = &crypto_ablkcipher_type,
1211 .cra_module = THIS_MODULE,
1212 .cra_init = qat_alg_ablkcipher_init,
1213 .cra_exit = qat_alg_ablkcipher_exit,
1214 .cra_u = {
1215 .ablkcipher = {
def14bfa
TS
1216 .setkey = qat_alg_ablkcipher_cbc_setkey,
1217 .decrypt = qat_alg_ablkcipher_decrypt,
1218 .encrypt = qat_alg_ablkcipher_encrypt,
1219 .min_keysize = AES_MIN_KEY_SIZE,
1220 .max_keysize = AES_MAX_KEY_SIZE,
1221 .ivsize = AES_BLOCK_SIZE,
1222 },
1223 },
1224}, {
1225 .cra_name = "ctr(aes)",
1226 .cra_driver_name = "qat_aes_ctr",
1227 .cra_priority = 4001,
1228 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1229 .cra_blocksize = AES_BLOCK_SIZE,
1230 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1231 .cra_alignmask = 0,
1232 .cra_type = &crypto_ablkcipher_type,
1233 .cra_module = THIS_MODULE,
1234 .cra_init = qat_alg_ablkcipher_init,
1235 .cra_exit = qat_alg_ablkcipher_exit,
1236 .cra_u = {
1237 .ablkcipher = {
1238 .setkey = qat_alg_ablkcipher_ctr_setkey,
1239 .decrypt = qat_alg_ablkcipher_decrypt,
1240 .encrypt = qat_alg_ablkcipher_encrypt,
1241 .min_keysize = AES_MIN_KEY_SIZE,
1242 .max_keysize = AES_MAX_KEY_SIZE,
1243 .ivsize = AES_BLOCK_SIZE,
1244 },
1245 },
1246}, {
1247 .cra_name = "xts(aes)",
1248 .cra_driver_name = "qat_aes_xts",
1249 .cra_priority = 4001,
1250 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1251 .cra_blocksize = AES_BLOCK_SIZE,
1252 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1253 .cra_alignmask = 0,
1254 .cra_type = &crypto_ablkcipher_type,
1255 .cra_module = THIS_MODULE,
1256 .cra_init = qat_alg_ablkcipher_init,
1257 .cra_exit = qat_alg_ablkcipher_exit,
1258 .cra_u = {
1259 .ablkcipher = {
1260 .setkey = qat_alg_ablkcipher_xts_setkey,
338e84f3
TS
1261 .decrypt = qat_alg_ablkcipher_decrypt,
1262 .encrypt = qat_alg_ablkcipher_encrypt,
10bb087c
GC
1263 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1264 .max_keysize = 2 * AES_MAX_KEY_SIZE,
338e84f3
TS
1265 .ivsize = AES_BLOCK_SIZE,
1266 },
1267 },
d370cec3
TS
1268} };
1269
1270int qat_algs_register(void)
1271{
e19ab121 1272 int ret = 0, i;
6f043b50
TS
1273
1274 mutex_lock(&algs_lock);
e19ab121
HX
1275 if (++active_devs != 1)
1276 goto unlock;
d370cec3 1277
e19ab121
HX
1278 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1279 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
338e84f3 1280
e19ab121
HX
1281 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1282 if (ret)
1283 goto unlock;
1284
1285 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
5e4b8c1f 1286 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
e19ab121
HX
1287
1288 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1289 if (ret)
1290 goto unreg_algs;
1291
1292unlock:
6f043b50
TS
1293 mutex_unlock(&algs_lock);
1294 return ret;
e19ab121
HX
1295
1296unreg_algs:
1297 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1298 goto unlock;
d370cec3
TS
1299}
1300
be2cfac0 1301void qat_algs_unregister(void)
d370cec3 1302{
6f043b50 1303 mutex_lock(&algs_lock);
e19ab121
HX
1304 if (--active_devs != 0)
1305 goto unlock;
1306
1307 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1308 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309
1310unlock:
6f043b50 1311 mutex_unlock(&algs_lock);
d370cec3 1312}
This page took 0.183125 seconds and 5 git commands to generate.