Commit | Line | Data |
---|---|---|
1d6b8a6f TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support | |
3 | * | |
4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/scatterlist.h> | |
17 | #include <linux/crypto.h> | |
18 | #include <crypto/algapi.h> | |
19 | #include <crypto/aes.h> | |
20 | #include <crypto/scatterwalk.h> | |
21 | ||
22 | #include "ccp-crypto.h" | |
23 | ||
1d6b8a6f TL |
24 | struct ccp_aes_xts_def { |
25 | const char *name; | |
26 | const char *drv_name; | |
27 | }; | |
28 | ||
29 | static struct ccp_aes_xts_def aes_xts_algs[] = { | |
30 | { | |
31 | .name = "xts(aes)", | |
32 | .drv_name = "xts-aes-ccp", | |
33 | }, | |
34 | }; | |
35 | ||
36 | struct ccp_unit_size_map { | |
37 | unsigned int size; | |
38 | u32 value; | |
39 | }; | |
40 | ||
41 | static struct ccp_unit_size_map unit_size_map[] = { | |
42 | { | |
43 | .size = 4096, | |
44 | .value = CCP_XTS_AES_UNIT_SIZE_4096, | |
45 | }, | |
46 | { | |
47 | .size = 2048, | |
48 | .value = CCP_XTS_AES_UNIT_SIZE_2048, | |
49 | }, | |
50 | { | |
51 | .size = 1024, | |
52 | .value = CCP_XTS_AES_UNIT_SIZE_1024, | |
53 | }, | |
54 | { | |
55 | .size = 512, | |
56 | .value = CCP_XTS_AES_UNIT_SIZE_512, | |
57 | }, | |
58 | { | |
59 | .size = 256, | |
60 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
61 | }, | |
62 | { | |
63 | .size = 128, | |
64 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
65 | }, | |
66 | { | |
67 | .size = 64, | |
68 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
69 | }, | |
70 | { | |
71 | .size = 32, | |
72 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
73 | }, | |
74 | { | |
75 | .size = 16, | |
76 | .value = CCP_XTS_AES_UNIT_SIZE_16, | |
77 | }, | |
78 | { | |
79 | .size = 1, | |
80 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
81 | }, | |
82 | }; | |
83 | ||
84 | static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) | |
85 | { | |
86 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | |
87 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | |
88 | ||
89 | if (ret) | |
90 | return ret; | |
91 | ||
92 | memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); | |
93 | ||
94 | return 0; | |
95 | } | |
96 | ||
97 | static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
98 | unsigned int key_len) | |
99 | { | |
100 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); | |
101 | ||
102 | /* Only support 128-bit AES key with a 128-bit Tweak key, | |
103 | * otherwise use the fallback | |
104 | */ | |
105 | switch (key_len) { | |
106 | case AES_KEYSIZE_128 * 2: | |
107 | memcpy(ctx->u.aes.key, key, key_len); | |
108 | break; | |
109 | } | |
110 | ctx->u.aes.key_len = key_len / 2; | |
111 | sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); | |
112 | ||
113 | return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key, | |
114 | key_len); | |
115 | } | |
116 | ||
117 | static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |
118 | unsigned int encrypt) | |
119 | { | |
120 | struct crypto_tfm *tfm = | |
121 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | |
122 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | |
123 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | |
124 | unsigned int unit; | |
125 | int ret; | |
126 | ||
369f3dab | 127 | if (!ctx->u.aes.key_len) |
1d6b8a6f | 128 | return -EINVAL; |
1d6b8a6f | 129 | |
369f3dab | 130 | if (req->nbytes & (AES_BLOCK_SIZE - 1)) |
1d6b8a6f | 131 | return -EINVAL; |
1d6b8a6f | 132 | |
369f3dab | 133 | if (!req->info) |
1d6b8a6f | 134 | return -EINVAL; |
1d6b8a6f TL |
135 | |
136 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) | |
137 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) | |
138 | break; | |
139 | ||
140 | if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || | |
141 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { | |
142 | /* Use the fallback to process the request for any | |
143 | * unsupported unit sizes or key sizes | |
144 | */ | |
145 | ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher); | |
146 | ret = (encrypt) ? crypto_ablkcipher_encrypt(req) : | |
147 | crypto_ablkcipher_decrypt(req); | |
148 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | |
149 | ||
150 | return ret; | |
151 | } | |
152 | ||
153 | memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); | |
154 | sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE); | |
155 | ||
156 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); | |
157 | INIT_LIST_HEAD(&rctx->cmd.entry); | |
158 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; | |
159 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT | |
160 | : CCP_AES_ACTION_DECRYPT; | |
161 | rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; | |
162 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; | |
163 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; | |
164 | rctx->cmd.u.xts.iv = &rctx->iv_sg; | |
165 | rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE; | |
166 | rctx->cmd.u.xts.src = req->src; | |
167 | rctx->cmd.u.xts.src_len = req->nbytes; | |
168 | rctx->cmd.u.xts.dst = req->dst; | |
169 | ||
170 | ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); | |
171 | ||
172 | return ret; | |
173 | } | |
174 | ||
175 | static int ccp_aes_xts_encrypt(struct ablkcipher_request *req) | |
176 | { | |
177 | return ccp_aes_xts_crypt(req, 1); | |
178 | } | |
179 | ||
180 | static int ccp_aes_xts_decrypt(struct ablkcipher_request *req) | |
181 | { | |
182 | return ccp_aes_xts_crypt(req, 0); | |
183 | } | |
184 | ||
185 | static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm) | |
186 | { | |
187 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
188 | struct crypto_ablkcipher *fallback_tfm; | |
189 | ||
190 | ctx->complete = ccp_aes_xts_complete; | |
191 | ctx->u.aes.key_len = 0; | |
192 | ||
b4168a19 | 193 | fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0, |
1d6b8a6f TL |
194 | CRYPTO_ALG_ASYNC | |
195 | CRYPTO_ALG_NEED_FALLBACK); | |
196 | if (IS_ERR(fallback_tfm)) { | |
197 | pr_warn("could not load fallback driver %s\n", | |
b4168a19 | 198 | crypto_tfm_alg_name(tfm)); |
1d6b8a6f TL |
199 | return PTR_ERR(fallback_tfm); |
200 | } | |
201 | ctx->u.aes.tfm_ablkcipher = fallback_tfm; | |
202 | ||
203 | tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) + | |
204 | fallback_tfm->base.crt_ablkcipher.reqsize; | |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
209 | static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm) | |
210 | { | |
211 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
212 | ||
213 | if (ctx->u.aes.tfm_ablkcipher) | |
214 | crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher); | |
215 | ctx->u.aes.tfm_ablkcipher = NULL; | |
216 | } | |
217 | ||
1d6b8a6f TL |
218 | static int ccp_register_aes_xts_alg(struct list_head *head, |
219 | const struct ccp_aes_xts_def *def) | |
220 | { | |
221 | struct ccp_crypto_ablkcipher_alg *ccp_alg; | |
222 | struct crypto_alg *alg; | |
223 | int ret; | |
224 | ||
225 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); | |
226 | if (!ccp_alg) | |
227 | return -ENOMEM; | |
228 | ||
229 | INIT_LIST_HEAD(&ccp_alg->entry); | |
230 | ||
231 | alg = &ccp_alg->alg; | |
232 | ||
233 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | |
234 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
235 | def->drv_name); | |
236 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | | |
237 | CRYPTO_ALG_KERN_DRIVER_ONLY | | |
238 | CRYPTO_ALG_NEED_FALLBACK; | |
239 | alg->cra_blocksize = AES_BLOCK_SIZE; | |
240 | alg->cra_ctxsize = sizeof(struct ccp_ctx); | |
241 | alg->cra_priority = CCP_CRA_PRIORITY; | |
242 | alg->cra_type = &crypto_ablkcipher_type; | |
243 | alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey; | |
244 | alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt; | |
245 | alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt; | |
246 | alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2; | |
247 | alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2; | |
248 | alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE; | |
249 | alg->cra_init = ccp_aes_xts_cra_init; | |
250 | alg->cra_exit = ccp_aes_xts_cra_exit; | |
251 | alg->cra_module = THIS_MODULE; | |
252 | ||
253 | ret = crypto_register_alg(alg); | |
254 | if (ret) { | |
255 | pr_err("%s ablkcipher algorithm registration error (%d)\n", | |
8db88467 | 256 | alg->cra_name, ret); |
1d6b8a6f TL |
257 | kfree(ccp_alg); |
258 | return ret; | |
259 | } | |
260 | ||
261 | list_add(&ccp_alg->entry, head); | |
262 | ||
263 | return 0; | |
264 | } | |
265 | ||
266 | int ccp_register_aes_xts_algs(struct list_head *head) | |
267 | { | |
268 | int i, ret; | |
269 | ||
270 | for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) { | |
271 | ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]); | |
272 | if (ret) | |
273 | return ret; | |
274 | } | |
275 | ||
276 | return 0; | |
277 | } |