Merge tag 'arc-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[deliverable/linux.git] / arch / x86 / crypto / cast5-avx-x86_64-asm_64.S
CommitLineData
4d6d6a2c
JG
1/*
2 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
ddaea786
JK
7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
8 *
4d6d6a2c
JG
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
e17e209e 26#include <linux/linkage.h>
8691ccd7 27#include <asm/frame.h>
e17e209e 28
4d6d6a2c 29.file "cast5-avx-x86_64-asm_64.S"
4d6d6a2c 30
044ab525
JK
31.extern cast_s1
32.extern cast_s2
33.extern cast_s3
34.extern cast_s4
4d6d6a2c
JG
35
36/* structure of crypto context */
37#define km 0
38#define kr (16*4)
39#define rr ((16*4)+16)
40
41/* s-boxes */
044ab525
JK
42#define s1 cast_s1
43#define s2 cast_s2
44#define s3 cast_s3
45#define s4 cast_s4
4d6d6a2c
JG
46
47/**********************************************************************
48 16-way AVX cast5
49 **********************************************************************/
50#define CTX %rdi
51
52#define RL1 %xmm0
53#define RR1 %xmm1
54#define RL2 %xmm2
55#define RR2 %xmm3
56#define RL3 %xmm4
57#define RR3 %xmm5
58#define RL4 %xmm6
59#define RR4 %xmm7
60
61#define RX %xmm8
62
63#define RKM %xmm9
ddaea786
JK
64#define RKR %xmm10
65#define RKRF %xmm11
66#define RKRR %xmm12
67
68#define R32 %xmm13
69#define R1ST %xmm14
4d6d6a2c 70
ddaea786 71#define RTMP %xmm15
4d6d6a2c 72
ddaea786
JK
73#define RID1 %rbp
74#define RID1d %ebp
75#define RID2 %rsi
76#define RID2d %esi
4d6d6a2c
JG
77
78#define RGI1 %rdx
79#define RGI1bl %dl
80#define RGI1bh %dh
81#define RGI2 %rcx
82#define RGI2bl %cl
83#define RGI2bh %ch
84
ddaea786
JK
85#define RGI3 %rax
86#define RGI3bl %al
87#define RGI3bh %ah
88#define RGI4 %rbx
89#define RGI4bl %bl
90#define RGI4bh %bh
91
4d6d6a2c
JG
92#define RFS1 %r8
93#define RFS1d %r8d
94#define RFS2 %r9
95#define RFS2d %r9d
96#define RFS3 %r10
97#define RFS3d %r10d
98
99
ddaea786
JK
100#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
101 movzbl src ## bh, RID1d; \
102 movzbl src ## bl, RID2d; \
103 shrq $16, src; \
4d6d6a2c
JG
104 movl s1(, RID1, 4), dst ## d; \
105 op1 s2(, RID2, 4), dst ## d; \
ddaea786
JK
106 movzbl src ## bh, RID1d; \
107 movzbl src ## bl, RID2d; \
108 interleave_op(il_reg); \
4d6d6a2c
JG
109 op2 s3(, RID1, 4), dst ## d; \
110 op3 s4(, RID2, 4), dst ## d;
111
ddaea786
JK
112#define dummy(d) /* do nothing */
113
114#define shr_next(reg) \
115 shrq $16, reg;
116
117#define F_head(a, x, gi1, gi2, op0) \
4d6d6a2c 118 op0 a, RKM, x; \
ddaea786
JK
119 vpslld RKRF, x, RTMP; \
120 vpsrld RKRR, x, x; \
4d6d6a2c
JG
121 vpor RTMP, x, x; \
122 \
ddaea786
JK
123 vmovq x, gi1; \
124 vpextrq $1, x, gi2;
125
126#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
127 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
128 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
4d6d6a2c 129 \
ddaea786
JK
130 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
131 shlq $32, RFS2; \
132 orq RFS1, RFS2; \
133 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
134 shlq $32, RFS1; \
135 orq RFS1, RFS3; \
4d6d6a2c 136 \
ddaea786 137 vmovq RFS2, x; \
4d6d6a2c
JG
138 vpinsrq $1, RFS3, x, x;
139
ddaea786
JK
140#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
141 F_head(b1, RX, RGI1, RGI2, op0); \
142 F_head(b2, RX, RGI3, RGI4, op0); \
143 \
144 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
145 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
146 \
147 vpxor a1, RX, a1; \
148 vpxor a2, RTMP, a2;
149
150#define F1_2(a1, b1, a2, b2) \
151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
152#define F2_2(a1, b1, a2, b2) \
153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
154#define F3_2(a1, b1, a2, b2) \
155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
4d6d6a2c 156
ddaea786
JK
157#define subround(a1, b1, a2, b2, f) \
158 F ## f ## _2(a1, b1, a2, b2);
4d6d6a2c
JG
159
160#define round(l, r, n, f) \
161 vbroadcastss (km+(4*n))(CTX), RKM; \
ddaea786 162 vpand R1ST, RKR, RKRF; \
4d6d6a2c 163 vpsubq RKRF, R32, RKRR; \
ddaea786
JK
164 vpsrldq $1, RKR, RKR; \
165 subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
166 subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
167
168#define enc_preload_rkr() \
169 vbroadcastss .L16_mask, RKR; \
170 /* add 16-bit rotation to key rotations (mod 32) */ \
171 vpxor kr(CTX), RKR, RKR;
4d6d6a2c 172
ddaea786
JK
173#define dec_preload_rkr() \
174 vbroadcastss .L16_mask, RKR; \
175 /* add 16-bit rotation to key rotations (mod 32) */ \
176 vpxor kr(CTX), RKR, RKR; \
177 vpshufb .Lbswap128_mask, RKR, RKR;
4d6d6a2c
JG
178
179#define transpose_2x4(x0, x1, t0, t1) \
180 vpunpckldq x1, x0, t0; \
181 vpunpckhdq x1, x0, t1; \
182 \
183 vpunpcklqdq t1, t0, x0; \
184 vpunpckhqdq t1, t0, x1;
185
c12ab20b 186#define inpack_blocks(x0, x1, t0, t1, rmask) \
ddaea786
JK
187 vpshufb rmask, x0, x0; \
188 vpshufb rmask, x1, x1; \
4d6d6a2c
JG
189 \
190 transpose_2x4(x0, x1, t0, t1)
191
c12ab20b 192#define outunpack_blocks(x0, x1, t0, t1, rmask) \
4d6d6a2c
JG
193 transpose_2x4(x0, x1, t0, t1) \
194 \
ddaea786 195 vpshufb rmask, x0, x0; \
c12ab20b 196 vpshufb rmask, x1, x1;
4d6d6a2c 197
ddaea786
JK
198.data
199
4d6d6a2c
JG
200.align 16
201.Lbswap_mask:
202 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
ddaea786
JK
203.Lbswap128_mask:
204 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
c12ab20b
JK
205.Lbswap_iv_mask:
206 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
ddaea786
JK
207.L16_mask:
208 .byte 16, 16, 16, 16
4d6d6a2c 209.L32_mask:
ddaea786
JK
210 .byte 32, 0, 0, 0
211.Lfirst_mask:
212 .byte 0x1f, 0, 0, 0
213
214.text
4d6d6a2c
JG
215
216.align 16
c12ab20b 217__cast5_enc_blk16:
4d6d6a2c
JG
218 /* input:
219 * %rdi: ctx, CTX
c12ab20b
JK
220 * RL1: blocks 1 and 2
221 * RR1: blocks 3 and 4
222 * RL2: blocks 5 and 6
223 * RR2: blocks 7 and 8
224 * RL3: blocks 9 and 10
225 * RR3: blocks 11 and 12
226 * RL4: blocks 13 and 14
227 * RR4: blocks 15 and 16
228 * output:
229 * RL1: encrypted blocks 1 and 2
230 * RR1: encrypted blocks 3 and 4
231 * RL2: encrypted blocks 5 and 6
232 * RR2: encrypted blocks 7 and 8
233 * RL3: encrypted blocks 9 and 10
234 * RR3: encrypted blocks 11 and 12
235 * RL4: encrypted blocks 13 and 14
236 * RR4: encrypted blocks 15 and 16
4d6d6a2c
JG
237 */
238
ddaea786 239 pushq %rbp;
4d6d6a2c 240 pushq %rbx;
4d6d6a2c 241
ddaea786
JK
242 vmovdqa .Lbswap_mask, RKM;
243 vmovd .Lfirst_mask, R1ST;
244 vmovd .L32_mask, R32;
245 enc_preload_rkr();
4d6d6a2c 246
c12ab20b
JK
247 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
248 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
249 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
250 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
4d6d6a2c
JG
251
252 round(RL, RR, 0, 1);
253 round(RR, RL, 1, 2);
254 round(RL, RR, 2, 3);
255 round(RR, RL, 3, 1);
256 round(RL, RR, 4, 2);
257 round(RR, RL, 5, 3);
258 round(RL, RR, 6, 1);
259 round(RR, RL, 7, 2);
260 round(RL, RR, 8, 3);
261 round(RR, RL, 9, 1);
262 round(RL, RR, 10, 2);
263 round(RR, RL, 11, 3);
264
ddaea786
JK
265 movzbl rr(CTX), %eax;
266 testl %eax, %eax;
e17e209e 267 jnz .L__skip_enc;
4d6d6a2c
JG
268
269 round(RL, RR, 12, 1);
270 round(RR, RL, 13, 2);
271 round(RL, RR, 14, 3);
272 round(RR, RL, 15, 1);
273
e17e209e 274.L__skip_enc:
4d6d6a2c 275 popq %rbx;
ddaea786
JK
276 popq %rbp;
277
278 vmovdqa .Lbswap_mask, RKM;
4d6d6a2c 279
c12ab20b
JK
280 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
281 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
282 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
283 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
4d6d6a2c
JG
284
285 ret;
e17e209e 286ENDPROC(__cast5_enc_blk16)
4d6d6a2c
JG
287
288.align 16
c12ab20b 289__cast5_dec_blk16:
4d6d6a2c
JG
290 /* input:
291 * %rdi: ctx, CTX
c12ab20b
JK
292 * RL1: encrypted blocks 1 and 2
293 * RR1: encrypted blocks 3 and 4
294 * RL2: encrypted blocks 5 and 6
295 * RR2: encrypted blocks 7 and 8
296 * RL3: encrypted blocks 9 and 10
297 * RR3: encrypted blocks 11 and 12
298 * RL4: encrypted blocks 13 and 14
299 * RR4: encrypted blocks 15 and 16
300 * output:
301 * RL1: decrypted blocks 1 and 2
302 * RR1: decrypted blocks 3 and 4
303 * RL2: decrypted blocks 5 and 6
304 * RR2: decrypted blocks 7 and 8
305 * RL3: decrypted blocks 9 and 10
306 * RR3: decrypted blocks 11 and 12
307 * RL4: decrypted blocks 13 and 14
308 * RR4: decrypted blocks 15 and 16
4d6d6a2c
JG
309 */
310
ddaea786 311 pushq %rbp;
4d6d6a2c
JG
312 pushq %rbx;
313
ddaea786
JK
314 vmovdqa .Lbswap_mask, RKM;
315 vmovd .Lfirst_mask, R1ST;
316 vmovd .L32_mask, R32;
317 dec_preload_rkr();
4d6d6a2c 318
c12ab20b
JK
319 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
320 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
321 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
322 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
4d6d6a2c 323
ddaea786
JK
324 movzbl rr(CTX), %eax;
325 testl %eax, %eax;
e17e209e 326 jnz .L__skip_dec;
4d6d6a2c
JG
327
328 round(RL, RR, 15, 1);
329 round(RR, RL, 14, 3);
330 round(RL, RR, 13, 2);
331 round(RR, RL, 12, 1);
332
e17e209e 333.L__dec_tail:
4d6d6a2c
JG
334 round(RL, RR, 11, 3);
335 round(RR, RL, 10, 2);
336 round(RL, RR, 9, 1);
337 round(RR, RL, 8, 3);
338 round(RL, RR, 7, 2);
339 round(RR, RL, 6, 1);
340 round(RL, RR, 5, 3);
341 round(RR, RL, 4, 2);
342 round(RL, RR, 3, 1);
343 round(RR, RL, 2, 3);
344 round(RL, RR, 1, 2);
345 round(RR, RL, 0, 1);
346
ddaea786 347 vmovdqa .Lbswap_mask, RKM;
4d6d6a2c 348 popq %rbx;
ddaea786 349 popq %rbp;
4d6d6a2c 350
c12ab20b
JK
351 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
352 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
353 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
354 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
4d6d6a2c
JG
355
356 ret;
ddaea786 357
e17e209e 358.L__skip_dec:
ddaea786 359 vpsrldq $4, RKR, RKR;
e17e209e
JK
360 jmp .L__dec_tail;
361ENDPROC(__cast5_dec_blk16)
c12ab20b 362
e17e209e 363ENTRY(cast5_ecb_enc_16way)
c12ab20b
JK
364 /* input:
365 * %rdi: ctx, CTX
366 * %rsi: dst
367 * %rdx: src
368 */
8691ccd7 369 FRAME_BEGIN
c12ab20b
JK
370
371 movq %rsi, %r11;
372
373 vmovdqu (0*4*4)(%rdx), RL1;
374 vmovdqu (1*4*4)(%rdx), RR1;
375 vmovdqu (2*4*4)(%rdx), RL2;
376 vmovdqu (3*4*4)(%rdx), RR2;
377 vmovdqu (4*4*4)(%rdx), RL3;
378 vmovdqu (5*4*4)(%rdx), RR3;
379 vmovdqu (6*4*4)(%rdx), RL4;
380 vmovdqu (7*4*4)(%rdx), RR4;
381
382 call __cast5_enc_blk16;
383
384 vmovdqu RR1, (0*4*4)(%r11);
385 vmovdqu RL1, (1*4*4)(%r11);
386 vmovdqu RR2, (2*4*4)(%r11);
387 vmovdqu RL2, (3*4*4)(%r11);
388 vmovdqu RR3, (4*4*4)(%r11);
389 vmovdqu RL3, (5*4*4)(%r11);
390 vmovdqu RR4, (6*4*4)(%r11);
391 vmovdqu RL4, (7*4*4)(%r11);
392
8691ccd7 393 FRAME_END
c12ab20b 394 ret;
e17e209e 395ENDPROC(cast5_ecb_enc_16way)
c12ab20b 396
e17e209e 397ENTRY(cast5_ecb_dec_16way)
c12ab20b
JK
398 /* input:
399 * %rdi: ctx, CTX
400 * %rsi: dst
401 * %rdx: src
402 */
403
8691ccd7 404 FRAME_BEGIN
c12ab20b
JK
405 movq %rsi, %r11;
406
407 vmovdqu (0*4*4)(%rdx), RL1;
408 vmovdqu (1*4*4)(%rdx), RR1;
409 vmovdqu (2*4*4)(%rdx), RL2;
410 vmovdqu (3*4*4)(%rdx), RR2;
411 vmovdqu (4*4*4)(%rdx), RL3;
412 vmovdqu (5*4*4)(%rdx), RR3;
413 vmovdqu (6*4*4)(%rdx), RL4;
414 vmovdqu (7*4*4)(%rdx), RR4;
415
416 call __cast5_dec_blk16;
417
418 vmovdqu RR1, (0*4*4)(%r11);
419 vmovdqu RL1, (1*4*4)(%r11);
420 vmovdqu RR2, (2*4*4)(%r11);
421 vmovdqu RL2, (3*4*4)(%r11);
422 vmovdqu RR3, (4*4*4)(%r11);
423 vmovdqu RL3, (5*4*4)(%r11);
424 vmovdqu RR4, (6*4*4)(%r11);
425 vmovdqu RL4, (7*4*4)(%r11);
426
8691ccd7 427 FRAME_END
c12ab20b 428 ret;
e17e209e 429ENDPROC(cast5_ecb_dec_16way)
c12ab20b 430
e17e209e 431ENTRY(cast5_cbc_dec_16way)
c12ab20b
JK
432 /* input:
433 * %rdi: ctx, CTX
434 * %rsi: dst
435 * %rdx: src
436 */
8691ccd7 437 FRAME_BEGIN
c12ab20b
JK
438
439 pushq %r12;
440
441 movq %rsi, %r11;
442 movq %rdx, %r12;
443
444 vmovdqu (0*16)(%rdx), RL1;
445 vmovdqu (1*16)(%rdx), RR1;
446 vmovdqu (2*16)(%rdx), RL2;
447 vmovdqu (3*16)(%rdx), RR2;
448 vmovdqu (4*16)(%rdx), RL3;
449 vmovdqu (5*16)(%rdx), RR3;
450 vmovdqu (6*16)(%rdx), RL4;
451 vmovdqu (7*16)(%rdx), RR4;
452
453 call __cast5_dec_blk16;
454
455 /* xor with src */
456 vmovq (%r12), RX;
457 vpshufd $0x4f, RX, RX;
458 vpxor RX, RR1, RR1;
459 vpxor 0*16+8(%r12), RL1, RL1;
460 vpxor 1*16+8(%r12), RR2, RR2;
461 vpxor 2*16+8(%r12), RL2, RL2;
462 vpxor 3*16+8(%r12), RR3, RR3;
463 vpxor 4*16+8(%r12), RL3, RL3;
464 vpxor 5*16+8(%r12), RR4, RR4;
465 vpxor 6*16+8(%r12), RL4, RL4;
466
467 vmovdqu RR1, (0*16)(%r11);
468 vmovdqu RL1, (1*16)(%r11);
469 vmovdqu RR2, (2*16)(%r11);
470 vmovdqu RL2, (3*16)(%r11);
471 vmovdqu RR3, (4*16)(%r11);
472 vmovdqu RL3, (5*16)(%r11);
473 vmovdqu RR4, (6*16)(%r11);
474 vmovdqu RL4, (7*16)(%r11);
475
476 popq %r12;
477
8691ccd7 478 FRAME_END
c12ab20b 479 ret;
e17e209e 480ENDPROC(cast5_cbc_dec_16way)
c12ab20b 481
e17e209e 482ENTRY(cast5_ctr_16way)
c12ab20b
JK
483 /* input:
484 * %rdi: ctx, CTX
485 * %rsi: dst
486 * %rdx: src
487 * %rcx: iv (big endian, 64bit)
488 */
8691ccd7 489 FRAME_BEGIN
c12ab20b
JK
490
491 pushq %r12;
492
493 movq %rsi, %r11;
494 movq %rdx, %r12;
495
496 vpcmpeqd RTMP, RTMP, RTMP;
497 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
498
499 vpcmpeqd RKR, RKR, RKR;
500 vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
501 vmovdqa .Lbswap_iv_mask, R1ST;
502 vmovdqa .Lbswap128_mask, RKM;
503
504 /* load IV and byteswap */
505 vmovq (%rcx), RX;
506 vpshufb R1ST, RX, RX;
507
508 /* construct IVs */
509 vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
510 vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
511 vpsubq RKR, RX, RX;
512 vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
513 vpsubq RKR, RX, RX;
514 vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
515 vpsubq RKR, RX, RX;
516 vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
517 vpsubq RKR, RX, RX;
518 vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
519 vpsubq RKR, RX, RX;
520 vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
521 vpsubq RKR, RX, RX;
522 vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
523 vpsubq RKR, RX, RX;
524 vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
525
526 /* store last IV */
527 vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
528 vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
529 vmovq RX, (%rcx);
530
531 call __cast5_enc_blk16;
532
533 /* dst = src ^ iv */
534 vpxor (0*16)(%r12), RR1, RR1;
535 vpxor (1*16)(%r12), RL1, RL1;
536 vpxor (2*16)(%r12), RR2, RR2;
537 vpxor (3*16)(%r12), RL2, RL2;
538 vpxor (4*16)(%r12), RR3, RR3;
539 vpxor (5*16)(%r12), RL3, RL3;
540 vpxor (6*16)(%r12), RR4, RR4;
541 vpxor (7*16)(%r12), RL4, RL4;
542 vmovdqu RR1, (0*16)(%r11);
543 vmovdqu RL1, (1*16)(%r11);
544 vmovdqu RR2, (2*16)(%r11);
545 vmovdqu RL2, (3*16)(%r11);
546 vmovdqu RR3, (4*16)(%r11);
547 vmovdqu RL3, (5*16)(%r11);
548 vmovdqu RR4, (6*16)(%r11);
549 vmovdqu RL4, (7*16)(%r11);
550
551 popq %r12;
552
8691ccd7 553 FRAME_END
c12ab20b 554 ret;
e17e209e 555ENDPROC(cast5_ctr_16way)
This page took 0.193087 seconds and 5 git commands to generate.