Merge branch 'for-linus' from kernel.org:/.../shaggy/jfs-2.6 manually
[deliverable/linux.git] / crypto / cipher.c
1 /*
2 * Cryptographic API.
3 *
4 * Cipher operations.
5 *
6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15 #include <linux/compiler.h>
16 #include <linux/kernel.h>
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <asm/scatterlist.h>
23 #include "internal.h"
24 #include "scatterwalk.h"
25
26 static inline void xor_64(u8 *a, const u8 *b)
27 {
28 ((u32 *)a)[0] ^= ((u32 *)b)[0];
29 ((u32 *)a)[1] ^= ((u32 *)b)[1];
30 }
31
32 static inline void xor_128(u8 *a, const u8 *b)
33 {
34 ((u32 *)a)[0] ^= ((u32 *)b)[0];
35 ((u32 *)a)[1] ^= ((u32 *)b)[1];
36 ((u32 *)a)[2] ^= ((u32 *)b)[2];
37 ((u32 *)a)[3] ^= ((u32 *)b)[3];
38 }
39
40 static unsigned int crypt_slow(const struct cipher_desc *desc,
41 struct scatter_walk *in,
42 struct scatter_walk *out, unsigned int bsize)
43 {
44 unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
45 u8 buffer[bsize * 2 + alignmask];
46 u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
47 u8 *dst = src + bsize;
48 unsigned int n;
49
50 n = scatterwalk_copychunks(src, in, bsize, 0);
51 scatterwalk_advance(in, n);
52
53 desc->prfn(desc, dst, src, bsize);
54
55 n = scatterwalk_copychunks(dst, out, bsize, 1);
56 scatterwalk_advance(out, n);
57
58 return bsize;
59 }
60
61 static inline unsigned int crypt_fast(const struct cipher_desc *desc,
62 struct scatter_walk *in,
63 struct scatter_walk *out,
64 unsigned int nbytes, u8 *tmp)
65 {
66 u8 *src, *dst;
67
68 src = in->data;
69 dst = scatterwalk_samebuf(in, out) ? src : out->data;
70
71 if (tmp) {
72 memcpy(tmp, in->data, nbytes);
73 src = tmp;
74 dst = tmp;
75 }
76
77 nbytes = desc->prfn(desc, dst, src, nbytes);
78
79 if (tmp)
80 memcpy(out->data, tmp, nbytes);
81
82 scatterwalk_advance(in, nbytes);
83 scatterwalk_advance(out, nbytes);
84
85 return nbytes;
86 }
87
88 /*
89 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
90 * multiple page boundaries by using temporary blocks. In user context,
91 * the kernel is given a chance to schedule us once per page.
92 */
93 static int crypt(const struct cipher_desc *desc,
94 struct scatterlist *dst,
95 struct scatterlist *src,
96 unsigned int nbytes)
97 {
98 struct scatter_walk walk_in, walk_out;
99 struct crypto_tfm *tfm = desc->tfm;
100 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
101 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
102 unsigned long buffer = 0;
103
104 if (!nbytes)
105 return 0;
106
107 if (nbytes % bsize) {
108 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
109 return -EINVAL;
110 }
111
112 scatterwalk_start(&walk_in, src);
113 scatterwalk_start(&walk_out, dst);
114
115 for(;;) {
116 unsigned int n = nbytes;
117 u8 *tmp = NULL;
118
119 if (!scatterwalk_aligned(&walk_in, alignmask) ||
120 !scatterwalk_aligned(&walk_out, alignmask)) {
121 if (!buffer) {
122 buffer = __get_free_page(GFP_ATOMIC);
123 if (!buffer)
124 n = 0;
125 }
126 tmp = (u8 *)buffer;
127 }
128
129 scatterwalk_map(&walk_in, 0);
130 scatterwalk_map(&walk_out, 1);
131
132 n = scatterwalk_clamp(&walk_in, n);
133 n = scatterwalk_clamp(&walk_out, n);
134
135 if (likely(n >= bsize))
136 n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
137 else
138 n = crypt_slow(desc, &walk_in, &walk_out, bsize);
139
140 nbytes -= n;
141
142 scatterwalk_done(&walk_in, 0, nbytes);
143 scatterwalk_done(&walk_out, 1, nbytes);
144
145 if (!nbytes)
146 break;
147
148 crypto_yield(tfm);
149 }
150
151 if (buffer)
152 free_page(buffer);
153
154 return 0;
155 }
156
157 static int crypt_iv_unaligned(struct cipher_desc *desc,
158 struct scatterlist *dst,
159 struct scatterlist *src,
160 unsigned int nbytes)
161 {
162 struct crypto_tfm *tfm = desc->tfm;
163 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
164 u8 *iv = desc->info;
165
166 if (unlikely(((unsigned long)iv & alignmask))) {
167 unsigned int ivsize = tfm->crt_cipher.cit_ivsize;
168 u8 buffer[ivsize + alignmask];
169 u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
170 int err;
171
172 desc->info = memcpy(tmp, iv, ivsize);
173 err = crypt(desc, dst, src, nbytes);
174 memcpy(iv, tmp, ivsize);
175
176 return err;
177 }
178
179 return crypt(desc, dst, src, nbytes);
180 }
181
182 static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
183 u8 *dst, const u8 *src,
184 unsigned int nbytes)
185 {
186 struct crypto_tfm *tfm = desc->tfm;
187 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
188 int bsize = crypto_tfm_alg_blocksize(tfm);
189
190 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
191 u8 *iv = desc->info;
192 unsigned int done = 0;
193
194 nbytes -= bsize;
195
196 do {
197 xor(iv, src);
198 fn(crypto_tfm_ctx(tfm), dst, iv);
199 memcpy(iv, dst, bsize);
200
201 src += bsize;
202 dst += bsize;
203 } while ((done += bsize) <= nbytes);
204
205 return done;
206 }
207
208 static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
209 u8 *dst, const u8 *src,
210 unsigned int nbytes)
211 {
212 struct crypto_tfm *tfm = desc->tfm;
213 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
214 int bsize = crypto_tfm_alg_blocksize(tfm);
215
216 u8 stack[src == dst ? bsize : 0];
217 u8 *buf = stack;
218 u8 **dst_p = src == dst ? &buf : &dst;
219
220 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
221 u8 *iv = desc->info;
222 unsigned int done = 0;
223
224 nbytes -= bsize;
225
226 do {
227 u8 *tmp_dst = *dst_p;
228
229 fn(crypto_tfm_ctx(tfm), tmp_dst, src);
230 xor(tmp_dst, iv);
231 memcpy(iv, src, bsize);
232 if (tmp_dst != dst)
233 memcpy(dst, tmp_dst, bsize);
234
235 src += bsize;
236 dst += bsize;
237 } while ((done += bsize) <= nbytes);
238
239 return done;
240 }
241
242 static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
243 const u8 *src, unsigned int nbytes)
244 {
245 struct crypto_tfm *tfm = desc->tfm;
246 int bsize = crypto_tfm_alg_blocksize(tfm);
247 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
248 unsigned int done = 0;
249
250 nbytes -= bsize;
251
252 do {
253 fn(crypto_tfm_ctx(tfm), dst, src);
254
255 src += bsize;
256 dst += bsize;
257 } while ((done += bsize) <= nbytes);
258
259 return done;
260 }
261
262 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
263 {
264 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
265
266 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
267 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
268 return -EINVAL;
269 } else
270 return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
271 &tfm->crt_flags);
272 }
273
274 static int ecb_encrypt(struct crypto_tfm *tfm,
275 struct scatterlist *dst,
276 struct scatterlist *src, unsigned int nbytes)
277 {
278 struct cipher_desc desc;
279 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
280
281 desc.tfm = tfm;
282 desc.crfn = cipher->cia_encrypt;
283 desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process;
284
285 return crypt(&desc, dst, src, nbytes);
286 }
287
288 static int ecb_decrypt(struct crypto_tfm *tfm,
289 struct scatterlist *dst,
290 struct scatterlist *src,
291 unsigned int nbytes)
292 {
293 struct cipher_desc desc;
294 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
295
296 desc.tfm = tfm;
297 desc.crfn = cipher->cia_decrypt;
298 desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process;
299
300 return crypt(&desc, dst, src, nbytes);
301 }
302
303 static int cbc_encrypt(struct crypto_tfm *tfm,
304 struct scatterlist *dst,
305 struct scatterlist *src,
306 unsigned int nbytes)
307 {
308 struct cipher_desc desc;
309 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
310
311 desc.tfm = tfm;
312 desc.crfn = cipher->cia_encrypt;
313 desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
314 desc.info = tfm->crt_cipher.cit_iv;
315
316 return crypt(&desc, dst, src, nbytes);
317 }
318
319 static int cbc_encrypt_iv(struct crypto_tfm *tfm,
320 struct scatterlist *dst,
321 struct scatterlist *src,
322 unsigned int nbytes, u8 *iv)
323 {
324 struct cipher_desc desc;
325 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
326
327 desc.tfm = tfm;
328 desc.crfn = cipher->cia_encrypt;
329 desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
330 desc.info = iv;
331
332 return crypt_iv_unaligned(&desc, dst, src, nbytes);
333 }
334
335 static int cbc_decrypt(struct crypto_tfm *tfm,
336 struct scatterlist *dst,
337 struct scatterlist *src,
338 unsigned int nbytes)
339 {
340 struct cipher_desc desc;
341 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
342
343 desc.tfm = tfm;
344 desc.crfn = cipher->cia_decrypt;
345 desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
346 desc.info = tfm->crt_cipher.cit_iv;
347
348 return crypt(&desc, dst, src, nbytes);
349 }
350
351 static int cbc_decrypt_iv(struct crypto_tfm *tfm,
352 struct scatterlist *dst,
353 struct scatterlist *src,
354 unsigned int nbytes, u8 *iv)
355 {
356 struct cipher_desc desc;
357 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
358
359 desc.tfm = tfm;
360 desc.crfn = cipher->cia_decrypt;
361 desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
362 desc.info = iv;
363
364 return crypt_iv_unaligned(&desc, dst, src, nbytes);
365 }
366
367 static int nocrypt(struct crypto_tfm *tfm,
368 struct scatterlist *dst,
369 struct scatterlist *src,
370 unsigned int nbytes)
371 {
372 return -ENOSYS;
373 }
374
375 static int nocrypt_iv(struct crypto_tfm *tfm,
376 struct scatterlist *dst,
377 struct scatterlist *src,
378 unsigned int nbytes, u8 *iv)
379 {
380 return -ENOSYS;
381 }
382
383 int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
384 {
385 u32 mode = flags & CRYPTO_TFM_MODE_MASK;
386 tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
387 return 0;
388 }
389
390 int crypto_init_cipher_ops(struct crypto_tfm *tfm)
391 {
392 int ret = 0;
393 struct cipher_tfm *ops = &tfm->crt_cipher;
394
395 ops->cit_setkey = setkey;
396
397 switch (tfm->crt_cipher.cit_mode) {
398 case CRYPTO_TFM_MODE_ECB:
399 ops->cit_encrypt = ecb_encrypt;
400 ops->cit_decrypt = ecb_decrypt;
401 break;
402
403 case CRYPTO_TFM_MODE_CBC:
404 ops->cit_encrypt = cbc_encrypt;
405 ops->cit_decrypt = cbc_decrypt;
406 ops->cit_encrypt_iv = cbc_encrypt_iv;
407 ops->cit_decrypt_iv = cbc_decrypt_iv;
408 break;
409
410 case CRYPTO_TFM_MODE_CFB:
411 ops->cit_encrypt = nocrypt;
412 ops->cit_decrypt = nocrypt;
413 ops->cit_encrypt_iv = nocrypt_iv;
414 ops->cit_decrypt_iv = nocrypt_iv;
415 break;
416
417 case CRYPTO_TFM_MODE_CTR:
418 ops->cit_encrypt = nocrypt;
419 ops->cit_decrypt = nocrypt;
420 ops->cit_encrypt_iv = nocrypt_iv;
421 ops->cit_decrypt_iv = nocrypt_iv;
422 break;
423
424 default:
425 BUG();
426 }
427
428 if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
429 unsigned long align;
430 unsigned long addr;
431
432 switch (crypto_tfm_alg_blocksize(tfm)) {
433 case 8:
434 ops->cit_xor_block = xor_64;
435 break;
436
437 case 16:
438 ops->cit_xor_block = xor_128;
439 break;
440
441 default:
442 printk(KERN_WARNING "%s: block size %u not supported\n",
443 crypto_tfm_alg_name(tfm),
444 crypto_tfm_alg_blocksize(tfm));
445 ret = -EINVAL;
446 goto out;
447 }
448
449 ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
450 align = crypto_tfm_alg_alignmask(tfm) + 1;
451 addr = (unsigned long)crypto_tfm_ctx(tfm);
452 addr = ALIGN(addr, align);
453 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
454 ops->cit_iv = (void *)addr;
455 }
456
457 out:
458 return ret;
459 }
460
461 void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
462 {
463 }
This page took 0.041171 seconds and 6 git commands to generate.