spi: davinci: Choose correct pre-scaler limit based on SOC
[deliverable/linux.git] / drivers / crypto / ccp / ccp-crypto-sha.c
CommitLineData
0ab0a1d5
TL
1/*
2 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/delay.h>
16#include <linux/scatterlist.h>
17#include <linux/crypto.h>
18#include <crypto/algapi.h>
19#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/sha.h>
22#include <crypto/scatterwalk.h>
23
24#include "ccp-crypto.h"
25
0ab0a1d5
TL
26static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
27{
28 struct ahash_request *req = ahash_request_cast(async_req);
29 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0ab0a1d5
TL
30 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
31 unsigned int digest_size = crypto_ahash_digestsize(tfm);
32
33 if (ret)
34 goto e_free;
35
36 if (rctx->hash_rem) {
37 /* Save remaining data to buffer */
81a59f00 38 unsigned int offset = rctx->nbytes - rctx->hash_rem;
8db88467 39
81a59f00
TL
40 scatterwalk_map_and_copy(rctx->buf, rctx->src,
41 offset, rctx->hash_rem, 0);
0ab0a1d5 42 rctx->buf_count = rctx->hash_rem;
8db88467 43 } else {
0ab0a1d5 44 rctx->buf_count = 0;
8db88467 45 }
0ab0a1d5 46
393897c5
TL
47 /* Update result area if supplied */
48 if (req->result)
49 memcpy(req->result, rctx->ctx, digest_size);
0ab0a1d5 50
0ab0a1d5
TL
51e_free:
52 sg_free_table(&rctx->data_sg);
53
54 return ret;
55}
56
57static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
58 unsigned int final)
59{
60 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
c11baa02 61 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
0ab0a1d5
TL
62 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
63 struct scatterlist *sg;
64 unsigned int block_size =
65 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
81a59f00 66 unsigned int sg_count;
5258de8a 67 gfp_t gfp;
81a59f00 68 u64 len;
0ab0a1d5
TL
69 int ret;
70
81a59f00
TL
71 len = (u64)rctx->buf_count + (u64)nbytes;
72
73 if (!final && (len <= block_size)) {
0ab0a1d5
TL
74 scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
75 0, nbytes, 0);
76 rctx->buf_count += nbytes;
77
78 return 0;
79 }
80
81a59f00
TL
81 rctx->src = req->src;
82 rctx->nbytes = nbytes;
0ab0a1d5
TL
83
84 rctx->final = final;
81a59f00
TL
85 rctx->hash_rem = final ? 0 : len & (block_size - 1);
86 rctx->hash_cnt = len - rctx->hash_rem;
87 if (!final && !rctx->hash_rem) {
0ab0a1d5
TL
88 /* CCP can't do zero length final, so keep some data around */
89 rctx->hash_cnt -= block_size;
90 rctx->hash_rem = block_size;
91 }
92
93 /* Initialize the context scatterlist */
94 sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));
95
0ab0a1d5 96 sg = NULL;
77dc4a51
TL
97 if (rctx->buf_count && nbytes) {
98 /* Build the data scatterlist table - allocate enough entries
99 * for both data pieces (buffer and input data)
100 */
101 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
102 GFP_KERNEL : GFP_ATOMIC;
103 sg_count = sg_nents(req->src) + 1;
104 ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
105 if (ret)
106 return ret;
0ab0a1d5 107
0ab0a1d5
TL
108 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
109 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
0ab0a1d5 110 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
0ab0a1d5
TL
111 sg_mark_end(sg);
112
77dc4a51
TL
113 sg = rctx->data_sg.sgl;
114 } else if (rctx->buf_count) {
115 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
116
117 sg = &rctx->buf_sg;
118 } else if (nbytes) {
119 sg = req->src;
120 }
121
0ab0a1d5
TL
122 rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */
123
124 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
125 INIT_LIST_HEAD(&rctx->cmd.entry);
126 rctx->cmd.engine = CCP_ENGINE_SHA;
127 rctx->cmd.u.sha.type = rctx->type;
128 rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
129 rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
77dc4a51 130 rctx->cmd.u.sha.src = sg;
0ab0a1d5 131 rctx->cmd.u.sha.src_len = rctx->hash_cnt;
c11baa02
TL
132 rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
133 &ctx->u.sha.opad_sg : NULL;
134 rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ?
135 ctx->u.sha.opad_count : 0;
136 rctx->cmd.u.sha.first = rctx->first;
0ab0a1d5
TL
137 rctx->cmd.u.sha.final = rctx->final;
138 rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
139
140 rctx->first = 0;
141
142 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
143
144 return ret;
145}
146
147static int ccp_sha_init(struct ahash_request *req)
148{
149 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
77dc4a51 150 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
0ab0a1d5
TL
151 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
152 struct ccp_crypto_ahash_alg *alg =
153 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
77dc4a51
TL
154 unsigned int block_size =
155 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
0ab0a1d5
TL
156
157 memset(rctx, 0, sizeof(*rctx));
158
0ab0a1d5
TL
159 rctx->type = alg->type;
160 rctx->first = 1;
161
77dc4a51
TL
162 if (ctx->u.sha.key_len) {
163 /* Buffer the HMAC key for first update */
164 memcpy(rctx->buf, ctx->u.sha.ipad, block_size);
165 rctx->buf_count = block_size;
166 }
167
0ab0a1d5
TL
168 return 0;
169}
170
171static int ccp_sha_update(struct ahash_request *req)
172{
173 return ccp_do_sha_update(req, req->nbytes, 0);
174}
175
176static int ccp_sha_final(struct ahash_request *req)
177{
178 return ccp_do_sha_update(req, 0, 1);
179}
180
181static int ccp_sha_finup(struct ahash_request *req)
182{
183 return ccp_do_sha_update(req, req->nbytes, 1);
184}
185
186static int ccp_sha_digest(struct ahash_request *req)
187{
82d1585b 188 int ret;
0ab0a1d5 189
82d1585b
TL
190 ret = ccp_sha_init(req);
191 if (ret)
192 return ret;
193
194 return ccp_sha_finup(req);
0ab0a1d5
TL
195}
196
197static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
198 unsigned int key_len)
199{
200 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
c11baa02 201 struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
61ded524
JSM
202
203 SHASH_DESC_ON_STACK(sdesc, shash);
204
c11baa02
TL
205 unsigned int block_size = crypto_shash_blocksize(shash);
206 unsigned int digest_size = crypto_shash_digestsize(shash);
0ab0a1d5
TL
207 int i, ret;
208
209 /* Set to zero until complete */
210 ctx->u.sha.key_len = 0;
211
212 /* Clear key area to provide zero padding for keys smaller
213 * than the block size
214 */
215 memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key));
216
217 if (key_len > block_size) {
218 /* Must hash the input key */
61ded524
JSM
219 sdesc->tfm = shash;
220 sdesc->flags = crypto_ahash_get_flags(tfm) &
c11baa02
TL
221 CRYPTO_TFM_REQ_MAY_SLEEP;
222
61ded524 223 ret = crypto_shash_digest(sdesc, key, key_len,
c11baa02 224 ctx->u.sha.key);
0ab0a1d5
TL
225 if (ret) {
226 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
227 return -EINVAL;
228 }
229
230 key_len = digest_size;
8db88467 231 } else {
0ab0a1d5 232 memcpy(ctx->u.sha.key, key, key_len);
8db88467 233 }
0ab0a1d5
TL
234
235 for (i = 0; i < block_size; i++) {
236 ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36;
237 ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
238 }
239
c11baa02
TL
240 sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size);
241 ctx->u.sha.opad_count = block_size;
242
0ab0a1d5
TL
243 ctx->u.sha.key_len = key_len;
244
245 return 0;
246}
247
248static int ccp_sha_cra_init(struct crypto_tfm *tfm)
249{
250 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
251 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
252
253 ctx->complete = ccp_sha_complete;
254 ctx->u.sha.key_len = 0;
255
256 crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx));
257
258 return 0;
259}
260
261static void ccp_sha_cra_exit(struct crypto_tfm *tfm)
262{
263}
264
265static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
266{
267 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
268 struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
c11baa02 269 struct crypto_shash *hmac_tfm;
0ab0a1d5 270
c11baa02 271 hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0);
0ab0a1d5
TL
272 if (IS_ERR(hmac_tfm)) {
273 pr_warn("could not load driver %s need for HMAC support\n",
274 alg->child_alg);
275 return PTR_ERR(hmac_tfm);
276 }
277
278 ctx->u.sha.hmac_tfm = hmac_tfm;
279
280 return ccp_sha_cra_init(tfm);
281}
282
283static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
284{
285 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
286
287 if (ctx->u.sha.hmac_tfm)
c11baa02 288 crypto_free_shash(ctx->u.sha.hmac_tfm);
0ab0a1d5
TL
289
290 ccp_sha_cra_exit(tfm);
291}
292
0ab0a1d5
TL
293struct ccp_sha_def {
294 const char *name;
295 const char *drv_name;
0ab0a1d5
TL
296 enum ccp_sha_type type;
297 u32 digest_size;
298 u32 block_size;
299};
300
301static struct ccp_sha_def sha_algs[] = {
302 {
303 .name = "sha1",
304 .drv_name = "sha1-ccp",
0ab0a1d5
TL
305 .type = CCP_SHA_TYPE_1,
306 .digest_size = SHA1_DIGEST_SIZE,
307 .block_size = SHA1_BLOCK_SIZE,
308 },
309 {
310 .name = "sha224",
311 .drv_name = "sha224-ccp",
0ab0a1d5
TL
312 .type = CCP_SHA_TYPE_224,
313 .digest_size = SHA224_DIGEST_SIZE,
314 .block_size = SHA224_BLOCK_SIZE,
315 },
316 {
317 .name = "sha256",
318 .drv_name = "sha256-ccp",
0ab0a1d5
TL
319 .type = CCP_SHA_TYPE_256,
320 .digest_size = SHA256_DIGEST_SIZE,
321 .block_size = SHA256_BLOCK_SIZE,
322 },
323};
324
325static int ccp_register_hmac_alg(struct list_head *head,
326 const struct ccp_sha_def *def,
327 const struct ccp_crypto_ahash_alg *base_alg)
328{
329 struct ccp_crypto_ahash_alg *ccp_alg;
330 struct ahash_alg *alg;
331 struct hash_alg_common *halg;
332 struct crypto_alg *base;
333 int ret;
334
335 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
336 if (!ccp_alg)
337 return -ENOMEM;
338
339 /* Copy the base algorithm and only change what's necessary */
d1dd206c 340 *ccp_alg = *base_alg;
0ab0a1d5
TL
341 INIT_LIST_HEAD(&ccp_alg->entry);
342
343 strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
344
345 alg = &ccp_alg->alg;
346 alg->setkey = ccp_sha_setkey;
347
348 halg = &alg->halg;
349
350 base = &halg->base;
351 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name);
352 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s",
353 def->drv_name);
354 base->cra_init = ccp_hmac_sha_cra_init;
355 base->cra_exit = ccp_hmac_sha_cra_exit;
356
357 ret = crypto_register_ahash(alg);
358 if (ret) {
359 pr_err("%s ahash algorithm registration error (%d)\n",
8db88467 360 base->cra_name, ret);
0ab0a1d5
TL
361 kfree(ccp_alg);
362 return ret;
363 }
364
365 list_add(&ccp_alg->entry, head);
366
367 return ret;
368}
369
370static int ccp_register_sha_alg(struct list_head *head,
371 const struct ccp_sha_def *def)
372{
373 struct ccp_crypto_ahash_alg *ccp_alg;
374 struct ahash_alg *alg;
375 struct hash_alg_common *halg;
376 struct crypto_alg *base;
377 int ret;
378
379 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
380 if (!ccp_alg)
381 return -ENOMEM;
382
383 INIT_LIST_HEAD(&ccp_alg->entry);
384
0ab0a1d5
TL
385 ccp_alg->type = def->type;
386
387 alg = &ccp_alg->alg;
388 alg->init = ccp_sha_init;
389 alg->update = ccp_sha_update;
390 alg->final = ccp_sha_final;
391 alg->finup = ccp_sha_finup;
392 alg->digest = ccp_sha_digest;
393
394 halg = &alg->halg;
395 halg->digestsize = def->digest_size;
396
397 base = &halg->base;
398 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
399 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
400 def->drv_name);
401 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
402 CRYPTO_ALG_KERN_DRIVER_ONLY |
403 CRYPTO_ALG_NEED_FALLBACK;
404 base->cra_blocksize = def->block_size;
405 base->cra_ctxsize = sizeof(struct ccp_ctx);
406 base->cra_priority = CCP_CRA_PRIORITY;
407 base->cra_type = &crypto_ahash_type;
408 base->cra_init = ccp_sha_cra_init;
409 base->cra_exit = ccp_sha_cra_exit;
410 base->cra_module = THIS_MODULE;
411
412 ret = crypto_register_ahash(alg);
413 if (ret) {
414 pr_err("%s ahash algorithm registration error (%d)\n",
8db88467 415 base->cra_name, ret);
0ab0a1d5
TL
416 kfree(ccp_alg);
417 return ret;
418 }
419
420 list_add(&ccp_alg->entry, head);
421
422 ret = ccp_register_hmac_alg(head, def, ccp_alg);
423
424 return ret;
425}
426
427int ccp_register_sha_algs(struct list_head *head)
428{
429 int i, ret;
430
431 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
432 ret = ccp_register_sha_alg(head, &sha_algs[i]);
433 if (ret)
434 return ret;
435 }
436
437 return 0;
438}
This page took 0.110342 seconds and 5 git commands to generate.