Merge tag 'mac80211-next-for-davem-2016-04-13' of git://git.kernel.org/pub/scm/linux...
[deliverable/linux.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
5af50730
RH
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
9c4a7965
KP
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
5a0e3ad6 42#include <linux/slab.h>
9c4a7965
KP
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
3952f17e 46#include <crypto/des.h>
9c4a7965 47#include <crypto/sha.h>
497f2e6b 48#include <crypto/md5.h>
e98014ab 49#include <crypto/internal/aead.h>
9c4a7965 50#include <crypto/authenc.h>
4de9d0b5 51#include <crypto/skcipher.h>
acbf7c62
LN
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
4de9d0b5 54#include <crypto/scatterwalk.h>
9c4a7965
KP
55
56#include "talitos.h"
57
922f9dc8
LC
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
81eb024c 60{
edc6bd69 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
922f9dc8
LC
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
64}
65
42e8b0d7 66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
922f9dc8 67 bool is_sec1)
538caf83 68{
922f9dc8
LC
69 if (is_sec1) {
70 ptr->res = 0;
71 ptr->len1 = cpu_to_be16(len);
72 } else {
73 ptr->len = cpu_to_be16(len);
74 }
538caf83
LC
75}
76
922f9dc8
LC
77static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 bool is_sec1)
538caf83 79{
922f9dc8
LC
80 if (is_sec1)
81 return be16_to_cpu(ptr->len1);
82 else
83 return be16_to_cpu(ptr->len);
538caf83
LC
84}
85
922f9dc8 86static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
185eb79f 87{
922f9dc8
LC
88 if (!is_sec1)
89 ptr->j_extent = 0;
185eb79f
LC
90}
91
9c4a7965
KP
92/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void map_single_talitos_ptr(struct device *dev,
edc6bd69 96 struct talitos_ptr *ptr,
42e8b0d7 97 unsigned int len, void *data,
9c4a7965
KP
98 enum dma_data_direction dir)
99{
81eb024c 100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
922f9dc8
LC
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
81eb024c 103
922f9dc8
LC
104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
9c4a7965
KP
107}
108
109/*
110 * unmap bus single (contiguous) h/w descriptor pointer
111 */
112static void unmap_single_talitos_ptr(struct device *dev,
edc6bd69 113 struct talitos_ptr *ptr,
9c4a7965
KP
114 enum dma_data_direction dir)
115{
922f9dc8
LC
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
edc6bd69 119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
922f9dc8 120 from_talitos_ptr_len(ptr, is_sec1), dir);
9c4a7965
KP
121}
122
123static int reset_channel(struct device *dev, int ch)
124{
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 127 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 128
dd3c0987
LC
129 if (is_sec1) {
130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 TALITOS1_CCCR_LO_RESET);
9c4a7965 132
dd3c0987
LC
133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 TALITOS1_CCCR_LO_RESET) && --timeout)
135 cpu_relax();
136 } else {
137 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 TALITOS2_CCCR_RESET);
139
140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 TALITOS2_CCCR_RESET) && --timeout)
142 cpu_relax();
143 }
9c4a7965
KP
144
145 if (timeout == 0) {
146 dev_err(dev, "failed to reset channel %d\n", ch);
147 return -EIO;
148 }
149
81eb024c 150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
ad42d5fc 151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
81eb024c 152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 153
fe5720e2
KP
154 /* and ICCR writeback, if available */
155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
ad42d5fc 156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
fe5720e2
KP
157 TALITOS_CCCR_LO_IWSE);
158
9c4a7965
KP
159 return 0;
160}
161
162static int reset_device(struct device *dev)
163{
164 struct talitos_private *priv = dev_get_drvdata(dev);
165 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987
LC
166 bool is_sec1 = has_ftr_sec1(priv);
167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
9c4a7965 168
c3e337f8 169 setbits32(priv->reg + TALITOS_MCR, mcr);
9c4a7965 170
dd3c0987 171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
9c4a7965
KP
172 && --timeout)
173 cpu_relax();
174
2cdba3cf 175 if (priv->irq[1]) {
c3e337f8
KP
176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
178 }
179
9c4a7965
KP
180 if (timeout == 0) {
181 dev_err(dev, "failed to reset device\n");
182 return -EIO;
183 }
184
185 return 0;
186}
187
188/*
189 * Reset and initialize the device
190 */
191static int init_device(struct device *dev)
192{
193 struct talitos_private *priv = dev_get_drvdata(dev);
194 int ch, err;
dd3c0987 195 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965
KP
196
197 /*
198 * Master reset
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
202 */
203 err = reset_device(dev);
204 if (err)
205 return err;
206
207 err = reset_device(dev);
208 if (err)
209 return err;
210
211 /* reset channels */
212 for (ch = 0; ch < priv->num_channels; ch++) {
213 err = reset_channel(dev, ch);
214 if (err)
215 return err;
216 }
217
218 /* enable channel done and error interrupts */
dd3c0987
LC
219 if (is_sec1) {
220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 } else {
225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 }
9c4a7965 228
fe5720e2
KP
229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
5fa7fa14 231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
fe5720e2
KP
232 TALITOS_MDEUICR_LO_ICE);
233
9c4a7965
KP
234 return 0;
235}
236
237/**
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
5228f0f7 240 * @ch: the SEC device channel to be used
9c4a7965
KP
241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
244 *
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
248 */
865d5061
HG
249int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 void (*callback)(struct device *dev,
251 struct talitos_desc *desc,
252 void *context, int error),
253 void *context)
9c4a7965
KP
254{
255 struct talitos_private *priv = dev_get_drvdata(dev);
256 struct talitos_request *request;
5228f0f7 257 unsigned long flags;
9c4a7965 258 int head;
7d607c6a 259 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 260
4b992628 261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 262
4b992628 263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 264 /* h/w fifo is full */
4b992628 265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
266 return -EAGAIN;
267 }
268
4b992628
KP
269 head = priv->chan[ch].head;
270 request = &priv->chan[ch].fifo[head];
ec6644d6 271
9c4a7965 272 /* map descriptor and save caller data */
7d607c6a
LC
273 if (is_sec1) {
274 desc->hdr1 = desc->hdr;
275 desc->next_desc = 0;
276 request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 TALITOS_DESC_SIZE,
278 DMA_BIDIRECTIONAL);
279 } else {
280 request->dma_desc = dma_map_single(dev, desc,
281 TALITOS_DESC_SIZE,
282 DMA_BIDIRECTIONAL);
283 }
9c4a7965
KP
284 request->callback = callback;
285 request->context = context;
286
287 /* increment fifo head */
4b992628 288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
289
290 smp_wmb();
291 request->desc = desc;
292
293 /* GO! */
294 wmb();
ad42d5fc
KP
295 out_be32(priv->chan[ch].reg + TALITOS_FF,
296 upper_32_bits(request->dma_desc));
297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
a752447a 298 lower_32_bits(request->dma_desc));
9c4a7965 299
4b992628 300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
301
302 return -EINPROGRESS;
303}
865d5061 304EXPORT_SYMBOL(talitos_submit);
9c4a7965
KP
305
306/*
307 * process what was done, notify callback of error if not
308 */
309static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310{
311 struct talitos_private *priv = dev_get_drvdata(dev);
312 struct talitos_request *request, saved_req;
313 unsigned long flags;
314 int tail, status;
7d607c6a 315 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 316
4b992628 317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 318
4b992628
KP
319 tail = priv->chan[ch].tail;
320 while (priv->chan[ch].fifo[tail].desc) {
7d607c6a
LC
321 __be32 hdr;
322
4b992628 323 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
324
325 /* descriptors with their done bits set don't get the error */
326 rmb();
7d607c6a
LC
327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328
329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 330 status = 0;
ca38a814 331 else
9c4a7965
KP
332 if (!error)
333 break;
334 else
335 status = error;
336
337 dma_unmap_single(dev, request->dma_desc,
7d607c6a 338 TALITOS_DESC_SIZE,
e938e465 339 DMA_BIDIRECTIONAL);
9c4a7965
KP
340
341 /* copy entries so we can call callback outside lock */
342 saved_req.desc = request->desc;
343 saved_req.callback = request->callback;
344 saved_req.context = request->context;
345
346 /* release request entry in fifo */
347 smp_wmb();
348 request->desc = NULL;
349
350 /* increment fifo tail */
4b992628 351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 352
4b992628 353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 354
4b992628 355 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 356
9c4a7965
KP
357 saved_req.callback(dev, saved_req.desc, saved_req.context,
358 status);
359 /* channel may resume processing in single desc error case */
360 if (error && !reset_ch && status == error)
361 return;
4b992628
KP
362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 tail = priv->chan[ch].tail;
9c4a7965
KP
364 }
365
4b992628 366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
367}
368
369/*
370 * process completed requests for channels that have done status
371 */
dd3c0987
LC
372#define DEF_TALITOS1_DONE(name, ch_done_mask) \
373static void talitos1_done_##name(unsigned long data) \
374{ \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
378 \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
382 goto out; \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
389 \
390out: \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
397}
398
399DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400
401#define DEF_TALITOS2_DONE(name, ch_done_mask) \
402static void talitos2_done_##name(unsigned long data) \
c3e337f8
KP
403{ \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
511d63cb 406 unsigned long flags; \
c3e337f8
KP
407 \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
411 goto out; \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
418 \
419out: \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
511d63cb 422 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8 423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
dd3c0987 424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
511d63cb 425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
9c4a7965 426}
dd3c0987
LC
427
428DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
9c4a7965
KP
431
432/*
433 * locate current (offending) descriptor
434 */
3e721aeb 435static u32 current_desc_hdr(struct device *dev, int ch)
9c4a7965
KP
436{
437 struct talitos_private *priv = dev_get_drvdata(dev);
b62ffd8c 438 int tail, iter;
9c4a7965
KP
439 dma_addr_t cur_desc;
440
b62ffd8c
HG
441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
9c4a7965 443
b62ffd8c
HG
444 if (!cur_desc) {
445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 return 0;
447 }
448
449 tail = priv->chan[ch].tail;
450
451 iter = tail;
452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 iter = (iter + 1) & (priv->fifo_len - 1);
454 if (iter == tail) {
9c4a7965 455 dev_err(dev, "couldn't locate current descriptor\n");
3e721aeb 456 return 0;
9c4a7965
KP
457 }
458 }
459
b62ffd8c 460 return priv->chan[ch].fifo[iter].desc->hdr;
9c4a7965
KP
461}
462
463/*
464 * user diagnostics; report root cause of error based on execution unit status
465 */
3e721aeb 466static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
9c4a7965
KP
467{
468 struct talitos_private *priv = dev_get_drvdata(dev);
469 int i;
470
3e721aeb 471 if (!desc_hdr)
ad42d5fc 472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3e721aeb
KP
473
474 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
9c4a7965
KP
475 case DESC_HDR_SEL0_AFEU:
476 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
5fa7fa14
LC
477 in_be32(priv->reg_afeu + TALITOS_EUISR),
478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
9c4a7965
KP
479 break;
480 case DESC_HDR_SEL0_DEU:
481 dev_err(dev, "DEUISR 0x%08x_%08x\n",
5fa7fa14
LC
482 in_be32(priv->reg_deu + TALITOS_EUISR),
483 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
9c4a7965
KP
484 break;
485 case DESC_HDR_SEL0_MDEUA:
486 case DESC_HDR_SEL0_MDEUB:
487 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
488 in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
490 break;
491 case DESC_HDR_SEL0_RNG:
492 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
5fa7fa14
LC
493 in_be32(priv->reg_rngu + TALITOS_ISR),
494 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
9c4a7965
KP
495 break;
496 case DESC_HDR_SEL0_PKEU:
497 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
5fa7fa14
LC
498 in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
500 break;
501 case DESC_HDR_SEL0_AESU:
502 dev_err(dev, "AESUISR 0x%08x_%08x\n",
5fa7fa14
LC
503 in_be32(priv->reg_aesu + TALITOS_EUISR),
504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
9c4a7965
KP
505 break;
506 case DESC_HDR_SEL0_CRCU:
507 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
508 in_be32(priv->reg_crcu + TALITOS_EUISR),
509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
510 break;
511 case DESC_HDR_SEL0_KEU:
512 dev_err(dev, "KEUISR 0x%08x_%08x\n",
5fa7fa14
LC
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
515 break;
516 }
517
3e721aeb 518 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
9c4a7965
KP
519 case DESC_HDR_SEL1_MDEUA:
520 case DESC_HDR_SEL1_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
524 break;
525 case DESC_HDR_SEL1_CRCU:
526 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
527 in_be32(priv->reg_crcu + TALITOS_EUISR),
528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
529 break;
530 }
531
532 for (i = 0; i < 8; i++)
533 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
ad42d5fc
KP
534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
9c4a7965
KP
536}
537
538/*
539 * recover from error interrupts
540 */
5e718a09 541static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
9c4a7965 542{
9c4a7965
KP
543 struct talitos_private *priv = dev_get_drvdata(dev);
544 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 545 int ch, error, reset_dev = 0;
42e8b0d7 546 u32 v_lo;
dd3c0987
LC
547 bool is_sec1 = has_ftr_sec1(priv);
548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
9c4a7965
KP
549
550 for (ch = 0; ch < priv->num_channels; ch++) {
551 /* skip channels without errors */
dd3c0987
LC
552 if (is_sec1) {
553 /* bits 29, 31, 17, 19 */
554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 continue;
556 } else {
557 if (!(isr & (1 << (ch * 2 + 1))))
558 continue;
559 }
9c4a7965
KP
560
561 error = -EINVAL;
562
ad42d5fc 563 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
9c4a7965
KP
564
565 if (v_lo & TALITOS_CCPSR_LO_DOF) {
566 dev_err(dev, "double fetch fifo overflow error\n");
567 error = -EAGAIN;
568 reset_ch = 1;
569 }
570 if (v_lo & TALITOS_CCPSR_LO_SOF) {
571 /* h/w dropped descriptor */
572 dev_err(dev, "single fetch fifo overflow error\n");
573 error = -EAGAIN;
574 }
575 if (v_lo & TALITOS_CCPSR_LO_MDTE)
576 dev_err(dev, "master data transfer error\n");
577 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
dd3c0987
LC
578 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
579 : "s/g data length zero error\n");
9c4a7965 580 if (v_lo & TALITOS_CCPSR_LO_FPZ)
dd3c0987
LC
581 dev_err(dev, is_sec1 ? "parity error\n"
582 : "fetch pointer zero error\n");
9c4a7965
KP
583 if (v_lo & TALITOS_CCPSR_LO_IDH)
584 dev_err(dev, "illegal descriptor header error\n");
585 if (v_lo & TALITOS_CCPSR_LO_IEU)
dd3c0987
LC
586 dev_err(dev, is_sec1 ? "static assignment error\n"
587 : "invalid exec unit error\n");
9c4a7965 588 if (v_lo & TALITOS_CCPSR_LO_EU)
3e721aeb 589 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
dd3c0987
LC
590 if (!is_sec1) {
591 if (v_lo & TALITOS_CCPSR_LO_GB)
592 dev_err(dev, "gather boundary error\n");
593 if (v_lo & TALITOS_CCPSR_LO_GRL)
594 dev_err(dev, "gather return/length error\n");
595 if (v_lo & TALITOS_CCPSR_LO_SB)
596 dev_err(dev, "scatter boundary error\n");
597 if (v_lo & TALITOS_CCPSR_LO_SRL)
598 dev_err(dev, "scatter return/length error\n");
599 }
9c4a7965
KP
600
601 flush_channel(dev, ch, error, reset_ch);
602
603 if (reset_ch) {
604 reset_channel(dev, ch);
605 } else {
ad42d5fc 606 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
dd3c0987 607 TALITOS2_CCCR_CONT);
ad42d5fc
KP
608 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
609 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
dd3c0987 610 TALITOS2_CCCR_CONT) && --timeout)
9c4a7965
KP
611 cpu_relax();
612 if (timeout == 0) {
613 dev_err(dev, "failed to restart channel %d\n",
614 ch);
615 reset_dev = 1;
616 }
617 }
618 }
dd3c0987
LC
619 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
620 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
621 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
622 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
623 isr, isr_lo);
624 else
625 dev_err(dev, "done overflow, internal time out, or "
626 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
9c4a7965
KP
627
628 /* purge request queues */
629 for (ch = 0; ch < priv->num_channels; ch++)
630 flush_channel(dev, ch, -EIO, 1);
631
632 /* reset and reinitialize the device */
633 init_device(dev);
634 }
635}
636
dd3c0987
LC
637#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
638static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
639{ \
640 struct device *dev = data; \
641 struct talitos_private *priv = dev_get_drvdata(dev); \
642 u32 isr, isr_lo; \
643 unsigned long flags; \
644 \
645 spin_lock_irqsave(&priv->reg_lock, flags); \
646 isr = in_be32(priv->reg + TALITOS_ISR); \
647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
648 /* Acknowledge interrupt */ \
649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
651 \
652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
653 spin_unlock_irqrestore(&priv->reg_lock, flags); \
654 talitos_error(dev, isr & ch_err_mask, isr_lo); \
655 } \
656 else { \
657 if (likely(isr & ch_done_mask)) { \
658 /* mask further done interrupts. */ \
659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
660 /* done_task will unmask done interrupts at exit */ \
661 tasklet_schedule(&priv->done_task[tlet]); \
662 } \
663 spin_unlock_irqrestore(&priv->reg_lock, flags); \
664 } \
665 \
666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
667 IRQ_NONE; \
668}
669
670DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
671
672#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
673static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
c3e337f8
KP
674{ \
675 struct device *dev = data; \
676 struct talitos_private *priv = dev_get_drvdata(dev); \
677 u32 isr, isr_lo; \
511d63cb 678 unsigned long flags; \
c3e337f8 679 \
511d63cb 680 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
681 isr = in_be32(priv->reg + TALITOS_ISR); \
682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
683 /* Acknowledge interrupt */ \
684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
686 \
511d63cb
HG
687 if (unlikely(isr & ch_err_mask || isr_lo)) { \
688 spin_unlock_irqrestore(&priv->reg_lock, flags); \
689 talitos_error(dev, isr & ch_err_mask, isr_lo); \
690 } \
691 else { \
c3e337f8
KP
692 if (likely(isr & ch_done_mask)) { \
693 /* mask further done interrupts. */ \
694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
695 /* done_task will unmask done interrupts at exit */ \
696 tasklet_schedule(&priv->done_task[tlet]); \
697 } \
511d63cb
HG
698 spin_unlock_irqrestore(&priv->reg_lock, flags); \
699 } \
c3e337f8
KP
700 \
701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
702 IRQ_NONE; \
9c4a7965 703}
dd3c0987
LC
704
705DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
706DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
707 0)
708DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
709 1)
9c4a7965
KP
710
711/*
712 * hwrng
713 */
714static int talitos_rng_data_present(struct hwrng *rng, int wait)
715{
716 struct device *dev = (struct device *)rng->priv;
717 struct talitos_private *priv = dev_get_drvdata(dev);
718 u32 ofl;
719 int i;
720
721 for (i = 0; i < 20; i++) {
5fa7fa14 722 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
9c4a7965
KP
723 TALITOS_RNGUSR_LO_OFL;
724 if (ofl || !wait)
725 break;
726 udelay(10);
727 }
728
729 return !!ofl;
730}
731
732static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
733{
734 struct device *dev = (struct device *)rng->priv;
735 struct talitos_private *priv = dev_get_drvdata(dev);
736
737 /* rng fifo requires 64-bit accesses */
5fa7fa14
LC
738 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
9c4a7965
KP
740
741 return sizeof(u32);
742}
743
744static int talitos_rng_init(struct hwrng *rng)
745{
746 struct device *dev = (struct device *)rng->priv;
747 struct talitos_private *priv = dev_get_drvdata(dev);
748 unsigned int timeout = TALITOS_TIMEOUT;
749
5fa7fa14
LC
750 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
751 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
752 & TALITOS_RNGUSR_LO_RD)
9c4a7965
KP
753 && --timeout)
754 cpu_relax();
755 if (timeout == 0) {
756 dev_err(dev, "failed to reset rng hw\n");
757 return -ENODEV;
758 }
759
760 /* start generating */
5fa7fa14 761 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
9c4a7965
KP
762
763 return 0;
764}
765
766static int talitos_register_rng(struct device *dev)
767{
768 struct talitos_private *priv = dev_get_drvdata(dev);
35a3bb3d 769 int err;
9c4a7965
KP
770
771 priv->rng.name = dev_driver_string(dev),
772 priv->rng.init = talitos_rng_init,
773 priv->rng.data_present = talitos_rng_data_present,
774 priv->rng.data_read = talitos_rng_data_read,
775 priv->rng.priv = (unsigned long)dev;
776
35a3bb3d
AS
777 err = hwrng_register(&priv->rng);
778 if (!err)
779 priv->rng_registered = true;
780
781 return err;
9c4a7965
KP
782}
783
784static void talitos_unregister_rng(struct device *dev)
785{
786 struct talitos_private *priv = dev_get_drvdata(dev);
787
35a3bb3d
AS
788 if (!priv->rng_registered)
789 return;
790
9c4a7965 791 hwrng_unregister(&priv->rng);
35a3bb3d 792 priv->rng_registered = false;
9c4a7965
KP
793}
794
795/*
796 * crypto alg
797 */
798#define TALITOS_CRA_PRIORITY 3000
357fb605 799#define TALITOS_MAX_KEY_SIZE 96
3952f17e 800#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 801
9c4a7965
KP
802struct talitos_ctx {
803 struct device *dev;
5228f0f7 804 int ch;
9c4a7965
KP
805 __be32 desc_hdr_template;
806 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 807 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
808 unsigned int keylen;
809 unsigned int enckeylen;
810 unsigned int authkeylen;
9c4a7965
KP
811};
812
497f2e6b
LN
813#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
814#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
815
816struct talitos_ahash_req_ctx {
60f208d7 817 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
818 unsigned int hw_context_size;
819 u8 buf[HASH_MAX_BLOCK_SIZE];
820 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 821 unsigned int swinit;
497f2e6b
LN
822 unsigned int first;
823 unsigned int last;
824 unsigned int to_hash_later;
42e8b0d7 825 unsigned int nbuf;
497f2e6b
LN
826 struct scatterlist bufsl[2];
827 struct scatterlist *psrc;
828};
829
56af8cd4
LN
830static int aead_setkey(struct crypto_aead *authenc,
831 const u8 *key, unsigned int keylen)
9c4a7965
KP
832{
833 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
c306a98d 834 struct crypto_authenc_keys keys;
9c4a7965 835
c306a98d 836 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
9c4a7965
KP
837 goto badkey;
838
c306a98d 839 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
9c4a7965
KP
840 goto badkey;
841
c306a98d
MK
842 memcpy(ctx->key, keys.authkey, keys.authkeylen);
843 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
9c4a7965 844
c306a98d
MK
845 ctx->keylen = keys.authkeylen + keys.enckeylen;
846 ctx->enckeylen = keys.enckeylen;
847 ctx->authkeylen = keys.authkeylen;
9c4a7965
KP
848
849 return 0;
850
851badkey:
852 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
853 return -EINVAL;
854}
855
856/*
56af8cd4 857 * talitos_edesc - s/w-extended descriptor
9c4a7965
KP
858 * @src_nents: number of segments in input scatterlist
859 * @dst_nents: number of segments in output scatterlist
aeb4c132 860 * @icv_ool: whether ICV is out-of-line
79fd31d3 861 * @iv_dma: dma address of iv for checking continuity and link table
9c4a7965 862 * @dma_len: length of dma mapped link_tbl space
6f65f6ac 863 * @dma_link_tbl: bus physical address of link_tbl/buf
9c4a7965 864 * @desc: h/w descriptor
6f65f6ac
LC
865 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
866 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
9c4a7965
KP
867 *
868 * if decrypting (with authcheck), or either one of src_nents or dst_nents
869 * is greater than 1, an integrity check value is concatenated to the end
870 * of link_tbl data
871 */
56af8cd4 872struct talitos_edesc {
9c4a7965
KP
873 int src_nents;
874 int dst_nents;
aeb4c132 875 bool icv_ool;
79fd31d3 876 dma_addr_t iv_dma;
9c4a7965
KP
877 int dma_len;
878 dma_addr_t dma_link_tbl;
879 struct talitos_desc desc;
6f65f6ac
LC
880 union {
881 struct talitos_ptr link_tbl[0];
882 u8 buf[0];
883 };
9c4a7965
KP
884};
885
4de9d0b5
LN
886static void talitos_sg_unmap(struct device *dev,
887 struct talitos_edesc *edesc,
888 struct scatterlist *src,
889 struct scatterlist *dst)
890{
891 unsigned int src_nents = edesc->src_nents ? : 1;
892 unsigned int dst_nents = edesc->dst_nents ? : 1;
893
894 if (src != dst) {
b8a011d4 895 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
4de9d0b5 896
497f2e6b 897 if (dst) {
b8a011d4 898 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
497f2e6b 899 }
4de9d0b5 900 } else
b8a011d4 901 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
4de9d0b5
LN
902}
903
9c4a7965 904static void ipsec_esp_unmap(struct device *dev,
56af8cd4 905 struct talitos_edesc *edesc,
9c4a7965
KP
906 struct aead_request *areq)
907{
908 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
909 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
910 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
911 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
912
4de9d0b5 913 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
9c4a7965
KP
914
915 if (edesc->dma_len)
916 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
917 DMA_BIDIRECTIONAL);
918}
919
920/*
921 * ipsec_esp descriptor callbacks
922 */
923static void ipsec_esp_encrypt_done(struct device *dev,
924 struct talitos_desc *desc, void *context,
925 int err)
926{
927 struct aead_request *areq = context;
9c4a7965 928 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 929 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 930 struct talitos_edesc *edesc;
9c4a7965
KP
931 struct scatterlist *sg;
932 void *icvdata;
933
19bbbc63
KP
934 edesc = container_of(desc, struct talitos_edesc, desc);
935
9c4a7965
KP
936 ipsec_esp_unmap(dev, edesc, areq);
937
938 /* copy the generated ICV to dst */
aeb4c132 939 if (edesc->icv_ool) {
9c4a7965 940 icvdata = &edesc->link_tbl[edesc->src_nents +
aeb4c132 941 edesc->dst_nents + 2];
9c4a7965 942 sg = sg_last(areq->dst, edesc->dst_nents);
aeb4c132
HX
943 memcpy((char *)sg_virt(sg) + sg->length - authsize,
944 icvdata, authsize);
9c4a7965
KP
945 }
946
947 kfree(edesc);
948
949 aead_request_complete(areq, err);
950}
951
fe5720e2 952static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
953 struct talitos_desc *desc,
954 void *context, int err)
9c4a7965
KP
955{
956 struct aead_request *req = context;
9c4a7965 957 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 958 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 959 struct talitos_edesc *edesc;
9c4a7965 960 struct scatterlist *sg;
aeb4c132 961 char *oicv, *icv;
9c4a7965 962
19bbbc63
KP
963 edesc = container_of(desc, struct talitos_edesc, desc);
964
9c4a7965
KP
965 ipsec_esp_unmap(dev, edesc, req);
966
967 if (!err) {
968 /* auth check */
9c4a7965 969 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
aeb4c132
HX
970 icv = (char *)sg_virt(sg) + sg->length - authsize;
971
972 if (edesc->dma_len) {
973 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
974 edesc->dst_nents + 2];
975 if (edesc->icv_ool)
976 icv = oicv + authsize;
977 } else
978 oicv = (char *)&edesc->link_tbl[0];
979
79960943 980 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
9c4a7965
KP
981 }
982
983 kfree(edesc);
984
985 aead_request_complete(req, err);
986}
987
fe5720e2 988static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
989 struct talitos_desc *desc,
990 void *context, int err)
fe5720e2
KP
991{
992 struct aead_request *req = context;
19bbbc63
KP
993 struct talitos_edesc *edesc;
994
995 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
996
997 ipsec_esp_unmap(dev, edesc, req);
998
999 /* check ICV auth status */
e938e465
KP
1000 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1001 DESC_HDR_LO_ICCR1_PASS))
1002 err = -EBADMSG;
fe5720e2
KP
1003
1004 kfree(edesc);
1005
1006 aead_request_complete(req, err);
1007}
1008
9c4a7965
KP
1009/*
1010 * convert scatterlist to SEC h/w link table format
1011 * stop at cryptlen bytes
1012 */
aeb4c132
HX
1013static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1014 unsigned int offset, int cryptlen,
1015 struct talitos_ptr *link_tbl_ptr)
9c4a7965 1016{
70bcaca7 1017 int n_sg = sg_count;
aeb4c132 1018 int count = 0;
70bcaca7 1019
aeb4c132
HX
1020 while (cryptlen && sg && n_sg--) {
1021 unsigned int len = sg_dma_len(sg);
9c4a7965 1022
aeb4c132
HX
1023 if (offset >= len) {
1024 offset -= len;
1025 goto next;
1026 }
1027
1028 len -= offset;
1029
1030 if (len > cryptlen)
1031 len = cryptlen;
1032
1033 to_talitos_ptr(link_tbl_ptr + count,
1034 sg_dma_address(sg) + offset, 0);
1035 link_tbl_ptr[count].len = cpu_to_be16(len);
1036 link_tbl_ptr[count].j_extent = 0;
1037 count++;
1038 cryptlen -= len;
1039 offset = 0;
1040
1041next:
1042 sg = sg_next(sg);
70bcaca7 1043 }
9c4a7965
KP
1044
1045 /* tag end of link table */
aeb4c132
HX
1046 if (count > 0)
1047 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
70bcaca7 1048
aeb4c132
HX
1049 return count;
1050}
1051
1052static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1053 int cryptlen,
1054 struct talitos_ptr *link_tbl_ptr)
1055{
1056 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1057 link_tbl_ptr);
9c4a7965
KP
1058}
1059
1060/*
1061 * fill in and submit ipsec_esp descriptor
1062 */
56af8cd4 1063static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
aeb4c132
HX
1064 void (*callback)(struct device *dev,
1065 struct talitos_desc *desc,
1066 void *context, int error))
9c4a7965
KP
1067{
1068 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
aeb4c132 1069 unsigned int authsize = crypto_aead_authsize(aead);
9c4a7965
KP
1070 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1071 struct device *dev = ctx->dev;
1072 struct talitos_desc *desc = &edesc->desc;
1073 unsigned int cryptlen = areq->cryptlen;
e41256f1 1074 unsigned int ivsize = crypto_aead_ivsize(aead);
aeb4c132 1075 int tbl_off = 0;
fa86a267 1076 int sg_count, ret;
fe5720e2 1077 int sg_link_tbl_len;
9c4a7965
KP
1078
1079 /* hmac key */
1080 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
a2b35aa8 1081 DMA_TO_DEVICE);
79fd31d3 1082
b8a011d4
LC
1083 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1084 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1085 : DMA_TO_DEVICE);
aeb4c132 1086
9c4a7965 1087 /* hmac data */
aeb4c132
HX
1088 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1089 if (sg_count > 1 &&
1090 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1091 areq->assoclen,
1092 &edesc->link_tbl[tbl_off])) > 1) {
1093 tbl_off += ret;
79fd31d3
HG
1094
1095 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
922f9dc8 1096 sizeof(struct talitos_ptr), 0);
79fd31d3
HG
1097 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1098
79fd31d3
HG
1099 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1100 edesc->dma_len, DMA_BIDIRECTIONAL);
1101 } else {
aeb4c132 1102 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
79fd31d3
HG
1103 desc->ptr[1].j_extent = 0;
1104 }
1105
9c4a7965 1106 /* cipher iv */
922f9dc8 1107 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
79fd31d3
HG
1108 desc->ptr[2].len = cpu_to_be16(ivsize);
1109 desc->ptr[2].j_extent = 0;
9c4a7965
KP
1110
1111 /* cipher key */
1112 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
a2b35aa8 1113 (char *)&ctx->key + ctx->authkeylen,
9c4a7965
KP
1114 DMA_TO_DEVICE);
1115
1116 /*
1117 * cipher in
1118 * map and adjust cipher len to aead request cryptlen.
1119 * extent is bytes of HMAC postpended to ciphertext,
1120 * typically 12 for ipsec
1121 */
1122 desc->ptr[4].len = cpu_to_be16(cryptlen);
1123 desc->ptr[4].j_extent = authsize;
1124
aeb4c132
HX
1125 sg_link_tbl_len = cryptlen;
1126 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1127 sg_link_tbl_len += authsize;
1128
1129 if (sg_count > 1 &&
1130 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1131 sg_link_tbl_len,
1132 &edesc->link_tbl[tbl_off])) > 1) {
1133 tbl_off += ret;
1134 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1135 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1136 tbl_off *
1137 sizeof(struct talitos_ptr), 0);
1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 edesc->dma_len,
1140 DMA_BIDIRECTIONAL);
1141 } else
922f9dc8 1142 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
9c4a7965
KP
1143
1144 /* cipher out */
1145 desc->ptr[5].len = cpu_to_be16(cryptlen);
1146 desc->ptr[5].j_extent = authsize;
1147
e938e465 1148 if (areq->src != areq->dst)
b8a011d4
LC
1149 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1150 DMA_FROM_DEVICE);
9c4a7965 1151
aeb4c132
HX
1152 edesc->icv_ool = false;
1153
1154 if (sg_count > 1 &&
1155 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1156 areq->assoclen, cryptlen,
1157 &edesc->link_tbl[tbl_off])) >
1158 1) {
79fd31d3 1159 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
9c4a7965 1160
81eb024c 1161 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
922f9dc8 1162 tbl_off * sizeof(struct talitos_ptr), 0);
fe5720e2 1163
f3c85bc1 1164 /* Add an entry to the link table for ICV data */
79fd31d3
HG
1165 tbl_ptr += sg_count - 1;
1166 tbl_ptr->j_extent = 0;
1167 tbl_ptr++;
1168 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1169 tbl_ptr->len = cpu_to_be16(authsize);
9c4a7965
KP
1170
1171 /* icv data follows link tables */
79fd31d3 1172 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
aeb4c132
HX
1173 (edesc->src_nents + edesc->dst_nents +
1174 2) * sizeof(struct talitos_ptr) +
1175 authsize, 0);
9c4a7965
KP
1176 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1177 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1178 edesc->dma_len, DMA_BIDIRECTIONAL);
aeb4c132
HX
1179
1180 edesc->icv_ool = true;
1181 } else
1182 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
9c4a7965
KP
1183
1184 /* iv out */
a2b35aa8 1185 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
9c4a7965
KP
1186 DMA_FROM_DEVICE);
1187
5228f0f7 1188 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1189 if (ret != -EINPROGRESS) {
1190 ipsec_esp_unmap(dev, edesc, areq);
1191 kfree(edesc);
1192 }
1193 return ret;
9c4a7965
KP
1194}
1195
9c4a7965 1196/*
56af8cd4 1197 * allocate and map the extended descriptor
9c4a7965 1198 */
4de9d0b5
LN
1199static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1200 struct scatterlist *src,
1201 struct scatterlist *dst,
79fd31d3
HG
1202 u8 *iv,
1203 unsigned int assoclen,
4de9d0b5
LN
1204 unsigned int cryptlen,
1205 unsigned int authsize,
79fd31d3 1206 unsigned int ivsize,
4de9d0b5 1207 int icv_stashing,
62293a37
HG
1208 u32 cryptoflags,
1209 bool encrypt)
9c4a7965 1210{
56af8cd4 1211 struct talitos_edesc *edesc;
aeb4c132 1212 int src_nents, dst_nents, alloc_len, dma_len;
79fd31d3 1213 dma_addr_t iv_dma = 0;
4de9d0b5 1214 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1215 GFP_ATOMIC;
6f65f6ac
LC
1216 struct talitos_private *priv = dev_get_drvdata(dev);
1217 bool is_sec1 = has_ftr_sec1(priv);
1218 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
8e409fe1 1219 void *err;
9c4a7965 1220
6f65f6ac 1221 if (cryptlen + authsize > max_len) {
4de9d0b5 1222 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1223 return ERR_PTR(-EINVAL);
1224 }
1225
935e99a3 1226 if (ivsize)
79fd31d3
HG
1227 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1228
62293a37 1229 if (!dst || dst == src) {
b8a011d4
LC
1230 src_nents = sg_nents_for_len(src,
1231 assoclen + cryptlen + authsize);
8e409fe1
LC
1232 if (src_nents < 0) {
1233 dev_err(dev, "Invalid number of src SG.\n");
1234 err = ERR_PTR(-EINVAL);
1235 goto error_sg;
1236 }
62293a37
HG
1237 src_nents = (src_nents == 1) ? 0 : src_nents;
1238 dst_nents = dst ? src_nents : 0;
1239 } else { /* dst && dst != src*/
b8a011d4
LC
1240 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1241 (encrypt ? 0 : authsize));
8e409fe1
LC
1242 if (src_nents < 0) {
1243 dev_err(dev, "Invalid number of src SG.\n");
1244 err = ERR_PTR(-EINVAL);
1245 goto error_sg;
1246 }
62293a37 1247 src_nents = (src_nents == 1) ? 0 : src_nents;
b8a011d4
LC
1248 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1249 (encrypt ? authsize : 0));
8e409fe1
LC
1250 if (dst_nents < 0) {
1251 dev_err(dev, "Invalid number of dst SG.\n");
1252 err = ERR_PTR(-EINVAL);
1253 goto error_sg;
1254 }
62293a37 1255 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
9c4a7965
KP
1256 }
1257
1258 /*
1259 * allocate space for base edesc plus the link tables,
aeb4c132
HX
1260 * allowing for two separate entries for AD and generated ICV (+ 2),
1261 * and space for two sets of ICVs (stashed and generated)
9c4a7965 1262 */
56af8cd4 1263 alloc_len = sizeof(struct talitos_edesc);
aeb4c132 1264 if (src_nents || dst_nents) {
6f65f6ac 1265 if (is_sec1)
608f37d0
DC
1266 dma_len = (src_nents ? cryptlen : 0) +
1267 (dst_nents ? cryptlen : 0);
6f65f6ac 1268 else
aeb4c132
HX
1269 dma_len = (src_nents + dst_nents + 2) *
1270 sizeof(struct talitos_ptr) + authsize * 2;
9c4a7965
KP
1271 alloc_len += dma_len;
1272 } else {
1273 dma_len = 0;
4de9d0b5 1274 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1275 }
1276
586725f8 1277 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1278 if (!edesc) {
4de9d0b5 1279 dev_err(dev, "could not allocate edescriptor\n");
8e409fe1
LC
1280 err = ERR_PTR(-ENOMEM);
1281 goto error_sg;
9c4a7965
KP
1282 }
1283
1284 edesc->src_nents = src_nents;
1285 edesc->dst_nents = dst_nents;
79fd31d3 1286 edesc->iv_dma = iv_dma;
9c4a7965 1287 edesc->dma_len = dma_len;
497f2e6b
LN
1288 if (dma_len)
1289 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1290 edesc->dma_len,
1291 DMA_BIDIRECTIONAL);
9c4a7965
KP
1292
1293 return edesc;
8e409fe1
LC
1294error_sg:
1295 if (iv_dma)
1296 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1297 return err;
9c4a7965
KP
1298}
1299
79fd31d3 1300static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
62293a37 1301 int icv_stashing, bool encrypt)
4de9d0b5
LN
1302{
1303 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 1304 unsigned int authsize = crypto_aead_authsize(authenc);
4de9d0b5 1305 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
79fd31d3 1306 unsigned int ivsize = crypto_aead_ivsize(authenc);
4de9d0b5 1307
aeb4c132 1308 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1309 iv, areq->assoclen, areq->cryptlen,
aeb4c132 1310 authsize, ivsize, icv_stashing,
62293a37 1311 areq->base.flags, encrypt);
4de9d0b5
LN
1312}
1313
56af8cd4 1314static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1315{
1316 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1317 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1318 struct talitos_edesc *edesc;
9c4a7965
KP
1319
1320 /* allocate extended descriptor */
62293a37 1321 edesc = aead_edesc_alloc(req, req->iv, 0, true);
9c4a7965
KP
1322 if (IS_ERR(edesc))
1323 return PTR_ERR(edesc);
1324
1325 /* set encrypt */
70bcaca7 1326 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965 1327
aeb4c132 1328 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
9c4a7965
KP
1329}
1330
56af8cd4 1331static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1332{
1333 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 1334 unsigned int authsize = crypto_aead_authsize(authenc);
9c4a7965 1335 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
fe5720e2 1336 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1337 struct talitos_edesc *edesc;
9c4a7965
KP
1338 struct scatterlist *sg;
1339 void *icvdata;
1340
1341 req->cryptlen -= authsize;
1342
1343 /* allocate extended descriptor */
62293a37 1344 edesc = aead_edesc_alloc(req, req->iv, 1, false);
9c4a7965
KP
1345 if (IS_ERR(edesc))
1346 return PTR_ERR(edesc);
1347
fe5720e2 1348 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1349 ((!edesc->src_nents && !edesc->dst_nents) ||
1350 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1351
fe5720e2 1352 /* decrypt and check the ICV */
e938e465
KP
1353 edesc->desc.hdr = ctx->desc_hdr_template |
1354 DESC_HDR_DIR_INBOUND |
fe5720e2 1355 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1356
fe5720e2
KP
1357 /* reset integrity check result bits */
1358 edesc->desc.hdr_lo = 0;
9c4a7965 1359
aeb4c132 1360 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
e938e465 1361 }
fe5720e2 1362
e938e465
KP
1363 /* Have to check the ICV with software */
1364 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1365
e938e465
KP
1366 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1367 if (edesc->dma_len)
aeb4c132
HX
1368 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1369 edesc->dst_nents + 2];
e938e465
KP
1370 else
1371 icvdata = &edesc->link_tbl[0];
fe5720e2 1372
e938e465 1373 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1374
aeb4c132 1375 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
9c4a7965 1376
aeb4c132 1377 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1378}
1379
4de9d0b5
LN
1380static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1381 const u8 *key, unsigned int keylen)
1382{
1383 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
4de9d0b5
LN
1384
1385 memcpy(&ctx->key, key, keylen);
1386 ctx->keylen = keylen;
1387
1388 return 0;
4de9d0b5
LN
1389}
1390
032d197e
LC
1391static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1392 struct scatterlist *dst, unsigned int len,
1393 struct talitos_edesc *edesc)
1394{
6f65f6ac
LC
1395 struct talitos_private *priv = dev_get_drvdata(dev);
1396 bool is_sec1 = has_ftr_sec1(priv);
1397
1398 if (is_sec1) {
1399 if (!edesc->src_nents) {
1400 dma_unmap_sg(dev, src, 1,
1401 dst != src ? DMA_TO_DEVICE
1402 : DMA_BIDIRECTIONAL);
1403 }
1404 if (dst && edesc->dst_nents) {
1405 dma_sync_single_for_device(dev,
1406 edesc->dma_link_tbl + len,
1407 len, DMA_FROM_DEVICE);
1408 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1409 edesc->buf + len, len);
1410 } else if (dst && dst != src) {
1411 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1412 }
1413 } else {
1414 talitos_sg_unmap(dev, edesc, src, dst);
1415 }
032d197e
LC
1416}
1417
4de9d0b5
LN
1418static void common_nonsnoop_unmap(struct device *dev,
1419 struct talitos_edesc *edesc,
1420 struct ablkcipher_request *areq)
1421{
1422 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
032d197e
LC
1423
1424 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
4de9d0b5
LN
1425 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1426 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1427
4de9d0b5
LN
1428 if (edesc->dma_len)
1429 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1430 DMA_BIDIRECTIONAL);
1431}
1432
1433static void ablkcipher_done(struct device *dev,
1434 struct talitos_desc *desc, void *context,
1435 int err)
1436{
1437 struct ablkcipher_request *areq = context;
19bbbc63
KP
1438 struct talitos_edesc *edesc;
1439
1440 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1441
1442 common_nonsnoop_unmap(dev, edesc, areq);
1443
1444 kfree(edesc);
1445
1446 areq->base.complete(&areq->base, err);
1447}
1448
032d197e
LC
1449int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1450 unsigned int len, struct talitos_edesc *edesc,
1451 enum dma_data_direction dir, struct talitos_ptr *ptr)
1452{
1453 int sg_count;
922f9dc8
LC
1454 struct talitos_private *priv = dev_get_drvdata(dev);
1455 bool is_sec1 = has_ftr_sec1(priv);
032d197e 1456
922f9dc8 1457 to_talitos_ptr_len(ptr, len, is_sec1);
032d197e 1458
6f65f6ac
LC
1459 if (is_sec1) {
1460 sg_count = edesc->src_nents ? : 1;
032d197e 1461
6f65f6ac
LC
1462 if (sg_count == 1) {
1463 dma_map_sg(dev, src, 1, dir);
1464 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
032d197e 1465 } else {
6f65f6ac
LC
1466 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1467 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1468 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1469 len, DMA_TO_DEVICE);
1470 }
1471 } else {
1472 to_talitos_ptr_extent_clear(ptr, is_sec1);
1473
b8a011d4 1474 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
6f65f6ac
LC
1475
1476 if (sg_count == 1) {
922f9dc8 1477 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
6f65f6ac
LC
1478 } else {
1479 sg_count = sg_to_link_tbl(src, sg_count, len,
1480 &edesc->link_tbl[0]);
1481 if (sg_count > 1) {
1482 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1483 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1484 dma_sync_single_for_device(dev,
1485 edesc->dma_link_tbl,
1486 edesc->dma_len,
1487 DMA_BIDIRECTIONAL);
1488 } else {
1489 /* Only one segment now, so no link tbl needed*/
1490 to_talitos_ptr(ptr, sg_dma_address(src),
1491 is_sec1);
1492 }
032d197e
LC
1493 }
1494 }
1495 return sg_count;
1496}
1497
1498void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1499 unsigned int len, struct talitos_edesc *edesc,
1500 enum dma_data_direction dir,
1501 struct talitos_ptr *ptr, int sg_count)
1502{
922f9dc8
LC
1503 struct talitos_private *priv = dev_get_drvdata(dev);
1504 bool is_sec1 = has_ftr_sec1(priv);
1505
032d197e 1506 if (dir != DMA_NONE)
b8a011d4 1507 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
032d197e 1508
6f65f6ac
LC
1509 to_talitos_ptr_len(ptr, len, is_sec1);
1510
1511 if (is_sec1) {
1512 if (sg_count == 1) {
1513 if (dir != DMA_NONE)
1514 dma_map_sg(dev, dst, 1, dir);
1515 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1516 } else {
1517 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1518 dma_sync_single_for_device(dev,
1519 edesc->dma_link_tbl + len,
1520 len, DMA_FROM_DEVICE);
1521 }
032d197e 1522 } else {
6f65f6ac
LC
1523 to_talitos_ptr_extent_clear(ptr, is_sec1);
1524
1525 if (sg_count == 1) {
1526 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1527 } else {
1528 struct talitos_ptr *link_tbl_ptr =
1529 &edesc->link_tbl[edesc->src_nents + 1];
1530
1531 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1532 (edesc->src_nents + 1) *
1533 sizeof(struct talitos_ptr), 0);
1534 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
42e8b0d7 1535 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
6f65f6ac
LC
1536 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1537 edesc->dma_len,
1538 DMA_BIDIRECTIONAL);
1539 }
032d197e
LC
1540 }
1541}
1542
4de9d0b5
LN
1543static int common_nonsnoop(struct talitos_edesc *edesc,
1544 struct ablkcipher_request *areq,
4de9d0b5
LN
1545 void (*callback) (struct device *dev,
1546 struct talitos_desc *desc,
1547 void *context, int error))
1548{
1549 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1550 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1551 struct device *dev = ctx->dev;
1552 struct talitos_desc *desc = &edesc->desc;
1553 unsigned int cryptlen = areq->nbytes;
79fd31d3 1554 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1555 int sg_count, ret;
922f9dc8
LC
1556 struct talitos_private *priv = dev_get_drvdata(dev);
1557 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
1558
1559 /* first DWORD empty */
2529bc37 1560 desc->ptr[0] = zero_entry;
4de9d0b5
LN
1561
1562 /* cipher iv */
922f9dc8
LC
1563 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1564 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1565 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
4de9d0b5
LN
1566
1567 /* cipher key */
1568 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1569 (char *)&ctx->key, DMA_TO_DEVICE);
4de9d0b5
LN
1570
1571 /*
1572 * cipher in
1573 */
032d197e
LC
1574 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1575 (areq->src == areq->dst) ?
1576 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1577 &desc->ptr[3]);
4de9d0b5
LN
1578
1579 /* cipher out */
032d197e
LC
1580 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1581 (areq->src == areq->dst) ? DMA_NONE
1582 : DMA_FROM_DEVICE,
1583 &desc->ptr[4], sg_count);
4de9d0b5
LN
1584
1585 /* iv out */
a2b35aa8 1586 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
4de9d0b5
LN
1587 DMA_FROM_DEVICE);
1588
1589 /* last DWORD empty */
2529bc37 1590 desc->ptr[6] = zero_entry;
4de9d0b5 1591
5228f0f7 1592 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1593 if (ret != -EINPROGRESS) {
1594 common_nonsnoop_unmap(dev, edesc, areq);
1595 kfree(edesc);
1596 }
1597 return ret;
1598}
1599
e938e465 1600static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
62293a37 1601 areq, bool encrypt)
4de9d0b5
LN
1602{
1603 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1604 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
79fd31d3 1605 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1606
aeb4c132 1607 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1608 areq->info, 0, areq->nbytes, 0, ivsize, 0,
62293a37 1609 areq->base.flags, encrypt);
4de9d0b5
LN
1610}
1611
1612static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1613{
1614 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1615 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1616 struct talitos_edesc *edesc;
1617
1618 /* allocate extended descriptor */
62293a37 1619 edesc = ablkcipher_edesc_alloc(areq, true);
4de9d0b5
LN
1620 if (IS_ERR(edesc))
1621 return PTR_ERR(edesc);
1622
1623 /* set encrypt */
1624 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1625
febec542 1626 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1627}
1628
1629static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1630{
1631 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1632 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1633 struct talitos_edesc *edesc;
1634
1635 /* allocate extended descriptor */
62293a37 1636 edesc = ablkcipher_edesc_alloc(areq, false);
4de9d0b5
LN
1637 if (IS_ERR(edesc))
1638 return PTR_ERR(edesc);
1639
1640 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1641
febec542 1642 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1643}
1644
497f2e6b
LN
1645static void common_nonsnoop_hash_unmap(struct device *dev,
1646 struct talitos_edesc *edesc,
1647 struct ahash_request *areq)
1648{
1649 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
922f9dc8
LC
1650 struct talitos_private *priv = dev_get_drvdata(dev);
1651 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1652
1653 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1654
032d197e
LC
1655 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1656
497f2e6b 1657 /* When using hashctx-in, must unmap it. */
922f9dc8 1658 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
497f2e6b
LN
1659 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1660 DMA_TO_DEVICE);
1661
922f9dc8 1662 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
497f2e6b
LN
1663 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1664 DMA_TO_DEVICE);
1665
497f2e6b
LN
1666 if (edesc->dma_len)
1667 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1668 DMA_BIDIRECTIONAL);
1669
1670}
1671
1672static void ahash_done(struct device *dev,
1673 struct talitos_desc *desc, void *context,
1674 int err)
1675{
1676 struct ahash_request *areq = context;
1677 struct talitos_edesc *edesc =
1678 container_of(desc, struct talitos_edesc, desc);
1679 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1680
1681 if (!req_ctx->last && req_ctx->to_hash_later) {
1682 /* Position any partial block for next update/final/finup */
1683 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1684 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1685 }
1686 common_nonsnoop_hash_unmap(dev, edesc, areq);
1687
1688 kfree(edesc);
1689
1690 areq->base.complete(&areq->base, err);
1691}
1692
2d02905e
LC
1693/*
1694 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1695 * ourself and submit a padded block
1696 */
1697void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1698 struct talitos_edesc *edesc,
1699 struct talitos_ptr *ptr)
1700{
1701 static u8 padded_hash[64] = {
1702 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1703 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1704 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1705 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1706 };
1707
1708 pr_err_once("Bug in SEC1, padding ourself\n");
1709 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1710 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1711 (char *)padded_hash, DMA_TO_DEVICE);
1712}
1713
497f2e6b
LN
1714static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1715 struct ahash_request *areq, unsigned int length,
1716 void (*callback) (struct device *dev,
1717 struct talitos_desc *desc,
1718 void *context, int error))
1719{
1720 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1721 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1722 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1723 struct device *dev = ctx->dev;
1724 struct talitos_desc *desc = &edesc->desc;
032d197e 1725 int ret;
922f9dc8
LC
1726 struct talitos_private *priv = dev_get_drvdata(dev);
1727 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1728
1729 /* first DWORD empty */
1730 desc->ptr[0] = zero_entry;
1731
60f208d7
KP
1732 /* hash context in */
1733 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1734 map_single_talitos_ptr(dev, &desc->ptr[1],
1735 req_ctx->hw_context_size,
a2b35aa8 1736 (char *)req_ctx->hw_context,
497f2e6b 1737 DMA_TO_DEVICE);
60f208d7 1738 req_ctx->swinit = 0;
497f2e6b
LN
1739 } else {
1740 desc->ptr[1] = zero_entry;
1741 /* Indicate next op is not the first. */
1742 req_ctx->first = 0;
1743 }
1744
1745 /* HMAC key */
1746 if (ctx->keylen)
1747 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1748 (char *)&ctx->key, DMA_TO_DEVICE);
497f2e6b
LN
1749 else
1750 desc->ptr[2] = zero_entry;
1751
1752 /*
1753 * data in
1754 */
032d197e
LC
1755 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1756 DMA_TO_DEVICE, &desc->ptr[3]);
497f2e6b
LN
1757
1758 /* fifth DWORD empty */
1759 desc->ptr[4] = zero_entry;
1760
1761 /* hash/HMAC out -or- hash context out */
1762 if (req_ctx->last)
1763 map_single_talitos_ptr(dev, &desc->ptr[5],
1764 crypto_ahash_digestsize(tfm),
a2b35aa8 1765 areq->result, DMA_FROM_DEVICE);
497f2e6b
LN
1766 else
1767 map_single_talitos_ptr(dev, &desc->ptr[5],
1768 req_ctx->hw_context_size,
a2b35aa8 1769 req_ctx->hw_context, DMA_FROM_DEVICE);
497f2e6b
LN
1770
1771 /* last DWORD empty */
1772 desc->ptr[6] = zero_entry;
1773
2d02905e
LC
1774 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1775 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1776
5228f0f7 1777 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1778 if (ret != -EINPROGRESS) {
1779 common_nonsnoop_hash_unmap(dev, edesc, areq);
1780 kfree(edesc);
1781 }
1782 return ret;
1783}
1784
1785static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1786 unsigned int nbytes)
1787{
1788 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1789 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1790 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1791
aeb4c132 1792 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
62293a37 1793 nbytes, 0, 0, 0, areq->base.flags, false);
497f2e6b
LN
1794}
1795
1796static int ahash_init(struct ahash_request *areq)
1797{
1798 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1799 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1800
1801 /* Initialize the context */
5e833bc4 1802 req_ctx->nbuf = 0;
60f208d7
KP
1803 req_ctx->first = 1; /* first indicates h/w must init its context */
1804 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1805 req_ctx->hw_context_size =
1806 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1807 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1808 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1809
1810 return 0;
1811}
1812
60f208d7
KP
1813/*
1814 * on h/w without explicit sha224 support, we initialize h/w context
1815 * manually with sha224 constants, and tell it to run sha256.
1816 */
1817static int ahash_init_sha224_swinit(struct ahash_request *areq)
1818{
1819 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1820
1821 ahash_init(areq);
1822 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1823
a752447a
KP
1824 req_ctx->hw_context[0] = SHA224_H0;
1825 req_ctx->hw_context[1] = SHA224_H1;
1826 req_ctx->hw_context[2] = SHA224_H2;
1827 req_ctx->hw_context[3] = SHA224_H3;
1828 req_ctx->hw_context[4] = SHA224_H4;
1829 req_ctx->hw_context[5] = SHA224_H5;
1830 req_ctx->hw_context[6] = SHA224_H6;
1831 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1832
1833 /* init 64-bit count */
1834 req_ctx->hw_context[8] = 0;
1835 req_ctx->hw_context[9] = 0;
1836
1837 return 0;
1838}
1839
497f2e6b
LN
1840static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1841{
1842 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1843 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1844 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1845 struct talitos_edesc *edesc;
1846 unsigned int blocksize =
1847 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1848 unsigned int nbytes_to_hash;
1849 unsigned int to_hash_later;
5e833bc4 1850 unsigned int nsg;
8e409fe1 1851 int nents;
497f2e6b 1852
5e833bc4
LN
1853 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1854 /* Buffer up to one whole block */
8e409fe1
LC
1855 nents = sg_nents_for_len(areq->src, nbytes);
1856 if (nents < 0) {
1857 dev_err(ctx->dev, "Invalid number of src SG.\n");
1858 return nents;
1859 }
1860 sg_copy_to_buffer(areq->src, nents,
5e833bc4
LN
1861 req_ctx->buf + req_ctx->nbuf, nbytes);
1862 req_ctx->nbuf += nbytes;
497f2e6b
LN
1863 return 0;
1864 }
1865
5e833bc4
LN
1866 /* At least (blocksize + 1) bytes are available to hash */
1867 nbytes_to_hash = nbytes + req_ctx->nbuf;
1868 to_hash_later = nbytes_to_hash & (blocksize - 1);
1869
1870 if (req_ctx->last)
1871 to_hash_later = 0;
1872 else if (to_hash_later)
1873 /* There is a partial block. Hash the full block(s) now */
1874 nbytes_to_hash -= to_hash_later;
1875 else {
1876 /* Keep one block buffered */
1877 nbytes_to_hash -= blocksize;
1878 to_hash_later = blocksize;
1879 }
1880
1881 /* Chain in any previously buffered data */
1882 if (req_ctx->nbuf) {
1883 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1884 sg_init_table(req_ctx->bufsl, nsg);
1885 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1886 if (nsg > 1)
c56f6d12 1887 sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1888 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1889 } else
497f2e6b 1890 req_ctx->psrc = areq->src;
5e833bc4
LN
1891
1892 if (to_hash_later) {
8e409fe1
LC
1893 nents = sg_nents_for_len(areq->src, nbytes);
1894 if (nents < 0) {
1895 dev_err(ctx->dev, "Invalid number of src SG.\n");
1896 return nents;
1897 }
d0525723 1898 sg_pcopy_to_buffer(areq->src, nents,
5e833bc4
LN
1899 req_ctx->bufnext,
1900 to_hash_later,
1901 nbytes - to_hash_later);
497f2e6b 1902 }
5e833bc4 1903 req_ctx->to_hash_later = to_hash_later;
497f2e6b 1904
5e833bc4 1905 /* Allocate extended descriptor */
497f2e6b
LN
1906 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1907 if (IS_ERR(edesc))
1908 return PTR_ERR(edesc);
1909
1910 edesc->desc.hdr = ctx->desc_hdr_template;
1911
1912 /* On last one, request SEC to pad; otherwise continue */
1913 if (req_ctx->last)
1914 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1915 else
1916 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1917
60f208d7
KP
1918 /* request SEC to INIT hash. */
1919 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
1920 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1921
1922 /* When the tfm context has a keylen, it's an HMAC.
1923 * A first or last (ie. not middle) descriptor must request HMAC.
1924 */
1925 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1926 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1927
1928 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1929 ahash_done);
1930}
1931
1932static int ahash_update(struct ahash_request *areq)
1933{
1934 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1935
1936 req_ctx->last = 0;
1937
1938 return ahash_process_req(areq, areq->nbytes);
1939}
1940
1941static int ahash_final(struct ahash_request *areq)
1942{
1943 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1944
1945 req_ctx->last = 1;
1946
1947 return ahash_process_req(areq, 0);
1948}
1949
1950static int ahash_finup(struct ahash_request *areq)
1951{
1952 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1953
1954 req_ctx->last = 1;
1955
1956 return ahash_process_req(areq, areq->nbytes);
1957}
1958
1959static int ahash_digest(struct ahash_request *areq)
1960{
1961 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 1962 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 1963
60f208d7 1964 ahash->init(areq);
497f2e6b
LN
1965 req_ctx->last = 1;
1966
1967 return ahash_process_req(areq, areq->nbytes);
1968}
1969
79b3a418
LN
1970struct keyhash_result {
1971 struct completion completion;
1972 int err;
1973};
1974
1975static void keyhash_complete(struct crypto_async_request *req, int err)
1976{
1977 struct keyhash_result *res = req->data;
1978
1979 if (err == -EINPROGRESS)
1980 return;
1981
1982 res->err = err;
1983 complete(&res->completion);
1984}
1985
1986static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1987 u8 *hash)
1988{
1989 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1990
1991 struct scatterlist sg[1];
1992 struct ahash_request *req;
1993 struct keyhash_result hresult;
1994 int ret;
1995
1996 init_completion(&hresult.completion);
1997
1998 req = ahash_request_alloc(tfm, GFP_KERNEL);
1999 if (!req)
2000 return -ENOMEM;
2001
2002 /* Keep tfm keylen == 0 during hash of the long key */
2003 ctx->keylen = 0;
2004 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2005 keyhash_complete, &hresult);
2006
2007 sg_init_one(&sg[0], key, keylen);
2008
2009 ahash_request_set_crypt(req, sg, hash, keylen);
2010 ret = crypto_ahash_digest(req);
2011 switch (ret) {
2012 case 0:
2013 break;
2014 case -EINPROGRESS:
2015 case -EBUSY:
2016 ret = wait_for_completion_interruptible(
2017 &hresult.completion);
2018 if (!ret)
2019 ret = hresult.err;
2020 break;
2021 default:
2022 break;
2023 }
2024 ahash_request_free(req);
2025
2026 return ret;
2027}
2028
2029static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2030 unsigned int keylen)
2031{
2032 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2033 unsigned int blocksize =
2034 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2035 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2036 unsigned int keysize = keylen;
2037 u8 hash[SHA512_DIGEST_SIZE];
2038 int ret;
2039
2040 if (keylen <= blocksize)
2041 memcpy(ctx->key, key, keysize);
2042 else {
2043 /* Must get the hash of the long key */
2044 ret = keyhash(tfm, key, keylen, hash);
2045
2046 if (ret) {
2047 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2048 return -EINVAL;
2049 }
2050
2051 keysize = digestsize;
2052 memcpy(ctx->key, hash, digestsize);
2053 }
2054
2055 ctx->keylen = keysize;
2056
2057 return 0;
2058}
2059
2060
9c4a7965 2061struct talitos_alg_template {
d5e4aaef
LN
2062 u32 type;
2063 union {
2064 struct crypto_alg crypto;
acbf7c62 2065 struct ahash_alg hash;
aeb4c132 2066 struct aead_alg aead;
d5e4aaef 2067 } alg;
9c4a7965
KP
2068 __be32 desc_hdr_template;
2069};
2070
2071static struct talitos_alg_template driver_algs[] = {
991155ba 2072 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef 2073 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2074 .alg.aead = {
2075 .base = {
2076 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2077 .cra_driver_name = "authenc-hmac-sha1-"
2078 "cbc-aes-talitos",
2079 .cra_blocksize = AES_BLOCK_SIZE,
2080 .cra_flags = CRYPTO_ALG_ASYNC,
2081 },
2082 .ivsize = AES_BLOCK_SIZE,
2083 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2084 },
9c4a7965
KP
2085 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2086 DESC_HDR_SEL0_AESU |
2087 DESC_HDR_MODE0_AESU_CBC |
2088 DESC_HDR_SEL1_MDEUA |
2089 DESC_HDR_MODE1_MDEU_INIT |
2090 DESC_HDR_MODE1_MDEU_PAD |
2091 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 2092 },
d5e4aaef 2093 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2094 .alg.aead = {
2095 .base = {
2096 .cra_name = "authenc(hmac(sha1),"
2097 "cbc(des3_ede))",
2098 .cra_driver_name = "authenc-hmac-sha1-"
2099 "cbc-3des-talitos",
2100 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2101 .cra_flags = CRYPTO_ALG_ASYNC,
2102 },
2103 .ivsize = DES3_EDE_BLOCK_SIZE,
2104 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2105 },
70bcaca7
LN
2106 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2107 DESC_HDR_SEL0_DEU |
2108 DESC_HDR_MODE0_DEU_CBC |
2109 DESC_HDR_MODE0_DEU_3DES |
2110 DESC_HDR_SEL1_MDEUA |
2111 DESC_HDR_MODE1_MDEU_INIT |
2112 DESC_HDR_MODE1_MDEU_PAD |
2113 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 2114 },
357fb605 2115 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2116 .alg.aead = {
2117 .base = {
2118 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2119 .cra_driver_name = "authenc-hmac-sha224-"
2120 "cbc-aes-talitos",
2121 .cra_blocksize = AES_BLOCK_SIZE,
2122 .cra_flags = CRYPTO_ALG_ASYNC,
2123 },
2124 .ivsize = AES_BLOCK_SIZE,
2125 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2126 },
2127 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2128 DESC_HDR_SEL0_AESU |
2129 DESC_HDR_MODE0_AESU_CBC |
2130 DESC_HDR_SEL1_MDEUA |
2131 DESC_HDR_MODE1_MDEU_INIT |
2132 DESC_HDR_MODE1_MDEU_PAD |
2133 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2134 },
2135 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2136 .alg.aead = {
2137 .base = {
2138 .cra_name = "authenc(hmac(sha224),"
2139 "cbc(des3_ede))",
2140 .cra_driver_name = "authenc-hmac-sha224-"
2141 "cbc-3des-talitos",
2142 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2143 .cra_flags = CRYPTO_ALG_ASYNC,
2144 },
2145 .ivsize = DES3_EDE_BLOCK_SIZE,
2146 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2147 },
2148 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2149 DESC_HDR_SEL0_DEU |
2150 DESC_HDR_MODE0_DEU_CBC |
2151 DESC_HDR_MODE0_DEU_3DES |
2152 DESC_HDR_SEL1_MDEUA |
2153 DESC_HDR_MODE1_MDEU_INIT |
2154 DESC_HDR_MODE1_MDEU_PAD |
2155 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2156 },
d5e4aaef 2157 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2158 .alg.aead = {
2159 .base = {
2160 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2161 .cra_driver_name = "authenc-hmac-sha256-"
2162 "cbc-aes-talitos",
2163 .cra_blocksize = AES_BLOCK_SIZE,
2164 .cra_flags = CRYPTO_ALG_ASYNC,
2165 },
2166 .ivsize = AES_BLOCK_SIZE,
2167 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2168 },
3952f17e
LN
2169 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2170 DESC_HDR_SEL0_AESU |
2171 DESC_HDR_MODE0_AESU_CBC |
2172 DESC_HDR_SEL1_MDEUA |
2173 DESC_HDR_MODE1_MDEU_INIT |
2174 DESC_HDR_MODE1_MDEU_PAD |
2175 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2176 },
d5e4aaef 2177 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2178 .alg.aead = {
2179 .base = {
2180 .cra_name = "authenc(hmac(sha256),"
2181 "cbc(des3_ede))",
2182 .cra_driver_name = "authenc-hmac-sha256-"
2183 "cbc-3des-talitos",
2184 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2185 .cra_flags = CRYPTO_ALG_ASYNC,
2186 },
2187 .ivsize = DES3_EDE_BLOCK_SIZE,
2188 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2189 },
3952f17e
LN
2190 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2191 DESC_HDR_SEL0_DEU |
2192 DESC_HDR_MODE0_DEU_CBC |
2193 DESC_HDR_MODE0_DEU_3DES |
2194 DESC_HDR_SEL1_MDEUA |
2195 DESC_HDR_MODE1_MDEU_INIT |
2196 DESC_HDR_MODE1_MDEU_PAD |
2197 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2198 },
d5e4aaef 2199 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2200 .alg.aead = {
2201 .base = {
2202 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2203 .cra_driver_name = "authenc-hmac-sha384-"
2204 "cbc-aes-talitos",
2205 .cra_blocksize = AES_BLOCK_SIZE,
2206 .cra_flags = CRYPTO_ALG_ASYNC,
2207 },
2208 .ivsize = AES_BLOCK_SIZE,
2209 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2210 },
2211 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2212 DESC_HDR_SEL0_AESU |
2213 DESC_HDR_MODE0_AESU_CBC |
2214 DESC_HDR_SEL1_MDEUB |
2215 DESC_HDR_MODE1_MDEU_INIT |
2216 DESC_HDR_MODE1_MDEU_PAD |
2217 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2218 },
2219 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2220 .alg.aead = {
2221 .base = {
2222 .cra_name = "authenc(hmac(sha384),"
2223 "cbc(des3_ede))",
2224 .cra_driver_name = "authenc-hmac-sha384-"
2225 "cbc-3des-talitos",
2226 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2227 .cra_flags = CRYPTO_ALG_ASYNC,
2228 },
2229 .ivsize = DES3_EDE_BLOCK_SIZE,
2230 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2231 },
2232 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2233 DESC_HDR_SEL0_DEU |
2234 DESC_HDR_MODE0_DEU_CBC |
2235 DESC_HDR_MODE0_DEU_3DES |
2236 DESC_HDR_SEL1_MDEUB |
2237 DESC_HDR_MODE1_MDEU_INIT |
2238 DESC_HDR_MODE1_MDEU_PAD |
2239 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2240 },
2241 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2242 .alg.aead = {
2243 .base = {
2244 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2245 .cra_driver_name = "authenc-hmac-sha512-"
2246 "cbc-aes-talitos",
2247 .cra_blocksize = AES_BLOCK_SIZE,
2248 .cra_flags = CRYPTO_ALG_ASYNC,
2249 },
2250 .ivsize = AES_BLOCK_SIZE,
2251 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2252 },
2253 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2254 DESC_HDR_SEL0_AESU |
2255 DESC_HDR_MODE0_AESU_CBC |
2256 DESC_HDR_SEL1_MDEUB |
2257 DESC_HDR_MODE1_MDEU_INIT |
2258 DESC_HDR_MODE1_MDEU_PAD |
2259 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2260 },
2261 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2262 .alg.aead = {
2263 .base = {
2264 .cra_name = "authenc(hmac(sha512),"
2265 "cbc(des3_ede))",
2266 .cra_driver_name = "authenc-hmac-sha512-"
2267 "cbc-3des-talitos",
2268 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2269 .cra_flags = CRYPTO_ALG_ASYNC,
2270 },
2271 .ivsize = DES3_EDE_BLOCK_SIZE,
2272 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2273 },
2274 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2275 DESC_HDR_SEL0_DEU |
2276 DESC_HDR_MODE0_DEU_CBC |
2277 DESC_HDR_MODE0_DEU_3DES |
2278 DESC_HDR_SEL1_MDEUB |
2279 DESC_HDR_MODE1_MDEU_INIT |
2280 DESC_HDR_MODE1_MDEU_PAD |
2281 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2282 },
2283 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2284 .alg.aead = {
2285 .base = {
2286 .cra_name = "authenc(hmac(md5),cbc(aes))",
2287 .cra_driver_name = "authenc-hmac-md5-"
2288 "cbc-aes-talitos",
2289 .cra_blocksize = AES_BLOCK_SIZE,
2290 .cra_flags = CRYPTO_ALG_ASYNC,
2291 },
2292 .ivsize = AES_BLOCK_SIZE,
2293 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2294 },
3952f17e
LN
2295 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2296 DESC_HDR_SEL0_AESU |
2297 DESC_HDR_MODE0_AESU_CBC |
2298 DESC_HDR_SEL1_MDEUA |
2299 DESC_HDR_MODE1_MDEU_INIT |
2300 DESC_HDR_MODE1_MDEU_PAD |
2301 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2302 },
d5e4aaef 2303 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2304 .alg.aead = {
2305 .base = {
2306 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2307 .cra_driver_name = "authenc-hmac-md5-"
2308 "cbc-3des-talitos",
2309 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2310 .cra_flags = CRYPTO_ALG_ASYNC,
2311 },
2312 .ivsize = DES3_EDE_BLOCK_SIZE,
2313 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2314 },
3952f17e
LN
2315 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2316 DESC_HDR_SEL0_DEU |
2317 DESC_HDR_MODE0_DEU_CBC |
2318 DESC_HDR_MODE0_DEU_3DES |
2319 DESC_HDR_SEL1_MDEUA |
2320 DESC_HDR_MODE1_MDEU_INIT |
2321 DESC_HDR_MODE1_MDEU_PAD |
2322 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2323 },
2324 /* ABLKCIPHER algorithms. */
5e75ae1b
LC
2325 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2326 .alg.crypto = {
2327 .cra_name = "ecb(aes)",
2328 .cra_driver_name = "ecb-aes-talitos",
2329 .cra_blocksize = AES_BLOCK_SIZE,
2330 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2331 CRYPTO_ALG_ASYNC,
2332 .cra_ablkcipher = {
2333 .min_keysize = AES_MIN_KEY_SIZE,
2334 .max_keysize = AES_MAX_KEY_SIZE,
2335 .ivsize = AES_BLOCK_SIZE,
2336 }
2337 },
2338 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2339 DESC_HDR_SEL0_AESU,
2340 },
d5e4aaef
LN
2341 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2342 .alg.crypto = {
4de9d0b5
LN
2343 .cra_name = "cbc(aes)",
2344 .cra_driver_name = "cbc-aes-talitos",
2345 .cra_blocksize = AES_BLOCK_SIZE,
2346 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2347 CRYPTO_ALG_ASYNC,
4de9d0b5 2348 .cra_ablkcipher = {
4de9d0b5
LN
2349 .min_keysize = AES_MIN_KEY_SIZE,
2350 .max_keysize = AES_MAX_KEY_SIZE,
2351 .ivsize = AES_BLOCK_SIZE,
2352 }
2353 },
2354 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2355 DESC_HDR_SEL0_AESU |
2356 DESC_HDR_MODE0_AESU_CBC,
2357 },
5e75ae1b
LC
2358 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2359 .alg.crypto = {
2360 .cra_name = "ctr(aes)",
2361 .cra_driver_name = "ctr-aes-talitos",
2362 .cra_blocksize = AES_BLOCK_SIZE,
2363 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2364 CRYPTO_ALG_ASYNC,
2365 .cra_ablkcipher = {
2366 .min_keysize = AES_MIN_KEY_SIZE,
2367 .max_keysize = AES_MAX_KEY_SIZE,
2368 .ivsize = AES_BLOCK_SIZE,
2369 }
2370 },
2371 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2372 DESC_HDR_SEL0_AESU |
2373 DESC_HDR_MODE0_AESU_CTR,
2374 },
2375 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2376 .alg.crypto = {
2377 .cra_name = "ecb(des)",
2378 .cra_driver_name = "ecb-des-talitos",
2379 .cra_blocksize = DES_BLOCK_SIZE,
2380 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2381 CRYPTO_ALG_ASYNC,
2382 .cra_ablkcipher = {
2383 .min_keysize = DES_KEY_SIZE,
2384 .max_keysize = DES_KEY_SIZE,
2385 .ivsize = DES_BLOCK_SIZE,
2386 }
2387 },
2388 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2389 DESC_HDR_SEL0_DEU,
2390 },
2391 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2392 .alg.crypto = {
2393 .cra_name = "cbc(des)",
2394 .cra_driver_name = "cbc-des-talitos",
2395 .cra_blocksize = DES_BLOCK_SIZE,
2396 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2397 CRYPTO_ALG_ASYNC,
2398 .cra_ablkcipher = {
2399 .min_keysize = DES_KEY_SIZE,
2400 .max_keysize = DES_KEY_SIZE,
2401 .ivsize = DES_BLOCK_SIZE,
2402 }
2403 },
2404 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2405 DESC_HDR_SEL0_DEU |
2406 DESC_HDR_MODE0_DEU_CBC,
2407 },
2408 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2409 .alg.crypto = {
2410 .cra_name = "ecb(des3_ede)",
2411 .cra_driver_name = "ecb-3des-talitos",
2412 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2413 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2414 CRYPTO_ALG_ASYNC,
2415 .cra_ablkcipher = {
2416 .min_keysize = DES3_EDE_KEY_SIZE,
2417 .max_keysize = DES3_EDE_KEY_SIZE,
2418 .ivsize = DES3_EDE_BLOCK_SIZE,
2419 }
2420 },
2421 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2422 DESC_HDR_SEL0_DEU |
2423 DESC_HDR_MODE0_DEU_3DES,
2424 },
d5e4aaef
LN
2425 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2426 .alg.crypto = {
4de9d0b5
LN
2427 .cra_name = "cbc(des3_ede)",
2428 .cra_driver_name = "cbc-3des-talitos",
2429 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2430 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2431 CRYPTO_ALG_ASYNC,
4de9d0b5 2432 .cra_ablkcipher = {
4de9d0b5
LN
2433 .min_keysize = DES3_EDE_KEY_SIZE,
2434 .max_keysize = DES3_EDE_KEY_SIZE,
2435 .ivsize = DES3_EDE_BLOCK_SIZE,
2436 }
2437 },
2438 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2439 DESC_HDR_SEL0_DEU |
2440 DESC_HDR_MODE0_DEU_CBC |
2441 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2442 },
2443 /* AHASH algorithms. */
2444 { .type = CRYPTO_ALG_TYPE_AHASH,
2445 .alg.hash = {
497f2e6b
LN
2446 .halg.digestsize = MD5_DIGEST_SIZE,
2447 .halg.base = {
2448 .cra_name = "md5",
2449 .cra_driver_name = "md5-talitos",
b3988618 2450 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
497f2e6b
LN
2451 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2452 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2453 }
2454 },
2455 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2456 DESC_HDR_SEL0_MDEUA |
2457 DESC_HDR_MODE0_MDEU_MD5,
2458 },
2459 { .type = CRYPTO_ALG_TYPE_AHASH,
2460 .alg.hash = {
497f2e6b
LN
2461 .halg.digestsize = SHA1_DIGEST_SIZE,
2462 .halg.base = {
2463 .cra_name = "sha1",
2464 .cra_driver_name = "sha1-talitos",
2465 .cra_blocksize = SHA1_BLOCK_SIZE,
2466 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2467 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2468 }
2469 },
2470 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2471 DESC_HDR_SEL0_MDEUA |
2472 DESC_HDR_MODE0_MDEU_SHA1,
2473 },
60f208d7
KP
2474 { .type = CRYPTO_ALG_TYPE_AHASH,
2475 .alg.hash = {
60f208d7
KP
2476 .halg.digestsize = SHA224_DIGEST_SIZE,
2477 .halg.base = {
2478 .cra_name = "sha224",
2479 .cra_driver_name = "sha224-talitos",
2480 .cra_blocksize = SHA224_BLOCK_SIZE,
2481 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2482 CRYPTO_ALG_ASYNC,
60f208d7
KP
2483 }
2484 },
2485 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2486 DESC_HDR_SEL0_MDEUA |
2487 DESC_HDR_MODE0_MDEU_SHA224,
2488 },
497f2e6b
LN
2489 { .type = CRYPTO_ALG_TYPE_AHASH,
2490 .alg.hash = {
497f2e6b
LN
2491 .halg.digestsize = SHA256_DIGEST_SIZE,
2492 .halg.base = {
2493 .cra_name = "sha256",
2494 .cra_driver_name = "sha256-talitos",
2495 .cra_blocksize = SHA256_BLOCK_SIZE,
2496 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2497 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2498 }
2499 },
2500 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2501 DESC_HDR_SEL0_MDEUA |
2502 DESC_HDR_MODE0_MDEU_SHA256,
2503 },
2504 { .type = CRYPTO_ALG_TYPE_AHASH,
2505 .alg.hash = {
497f2e6b
LN
2506 .halg.digestsize = SHA384_DIGEST_SIZE,
2507 .halg.base = {
2508 .cra_name = "sha384",
2509 .cra_driver_name = "sha384-talitos",
2510 .cra_blocksize = SHA384_BLOCK_SIZE,
2511 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2512 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2513 }
2514 },
2515 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2516 DESC_HDR_SEL0_MDEUB |
2517 DESC_HDR_MODE0_MDEUB_SHA384,
2518 },
2519 { .type = CRYPTO_ALG_TYPE_AHASH,
2520 .alg.hash = {
497f2e6b
LN
2521 .halg.digestsize = SHA512_DIGEST_SIZE,
2522 .halg.base = {
2523 .cra_name = "sha512",
2524 .cra_driver_name = "sha512-talitos",
2525 .cra_blocksize = SHA512_BLOCK_SIZE,
2526 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2527 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2528 }
2529 },
2530 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2531 DESC_HDR_SEL0_MDEUB |
2532 DESC_HDR_MODE0_MDEUB_SHA512,
2533 },
79b3a418
LN
2534 { .type = CRYPTO_ALG_TYPE_AHASH,
2535 .alg.hash = {
79b3a418
LN
2536 .halg.digestsize = MD5_DIGEST_SIZE,
2537 .halg.base = {
2538 .cra_name = "hmac(md5)",
2539 .cra_driver_name = "hmac-md5-talitos",
b3988618 2540 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
79b3a418
LN
2541 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2542 CRYPTO_ALG_ASYNC,
79b3a418
LN
2543 }
2544 },
2545 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2546 DESC_HDR_SEL0_MDEUA |
2547 DESC_HDR_MODE0_MDEU_MD5,
2548 },
2549 { .type = CRYPTO_ALG_TYPE_AHASH,
2550 .alg.hash = {
79b3a418
LN
2551 .halg.digestsize = SHA1_DIGEST_SIZE,
2552 .halg.base = {
2553 .cra_name = "hmac(sha1)",
2554 .cra_driver_name = "hmac-sha1-talitos",
2555 .cra_blocksize = SHA1_BLOCK_SIZE,
2556 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2557 CRYPTO_ALG_ASYNC,
79b3a418
LN
2558 }
2559 },
2560 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2561 DESC_HDR_SEL0_MDEUA |
2562 DESC_HDR_MODE0_MDEU_SHA1,
2563 },
2564 { .type = CRYPTO_ALG_TYPE_AHASH,
2565 .alg.hash = {
79b3a418
LN
2566 .halg.digestsize = SHA224_DIGEST_SIZE,
2567 .halg.base = {
2568 .cra_name = "hmac(sha224)",
2569 .cra_driver_name = "hmac-sha224-talitos",
2570 .cra_blocksize = SHA224_BLOCK_SIZE,
2571 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2572 CRYPTO_ALG_ASYNC,
79b3a418
LN
2573 }
2574 },
2575 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2576 DESC_HDR_SEL0_MDEUA |
2577 DESC_HDR_MODE0_MDEU_SHA224,
2578 },
2579 { .type = CRYPTO_ALG_TYPE_AHASH,
2580 .alg.hash = {
79b3a418
LN
2581 .halg.digestsize = SHA256_DIGEST_SIZE,
2582 .halg.base = {
2583 .cra_name = "hmac(sha256)",
2584 .cra_driver_name = "hmac-sha256-talitos",
2585 .cra_blocksize = SHA256_BLOCK_SIZE,
2586 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2587 CRYPTO_ALG_ASYNC,
79b3a418
LN
2588 }
2589 },
2590 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2591 DESC_HDR_SEL0_MDEUA |
2592 DESC_HDR_MODE0_MDEU_SHA256,
2593 },
2594 { .type = CRYPTO_ALG_TYPE_AHASH,
2595 .alg.hash = {
79b3a418
LN
2596 .halg.digestsize = SHA384_DIGEST_SIZE,
2597 .halg.base = {
2598 .cra_name = "hmac(sha384)",
2599 .cra_driver_name = "hmac-sha384-talitos",
2600 .cra_blocksize = SHA384_BLOCK_SIZE,
2601 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2602 CRYPTO_ALG_ASYNC,
79b3a418
LN
2603 }
2604 },
2605 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2606 DESC_HDR_SEL0_MDEUB |
2607 DESC_HDR_MODE0_MDEUB_SHA384,
2608 },
2609 { .type = CRYPTO_ALG_TYPE_AHASH,
2610 .alg.hash = {
79b3a418
LN
2611 .halg.digestsize = SHA512_DIGEST_SIZE,
2612 .halg.base = {
2613 .cra_name = "hmac(sha512)",
2614 .cra_driver_name = "hmac-sha512-talitos",
2615 .cra_blocksize = SHA512_BLOCK_SIZE,
2616 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2617 CRYPTO_ALG_ASYNC,
79b3a418
LN
2618 }
2619 },
2620 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2621 DESC_HDR_SEL0_MDEUB |
2622 DESC_HDR_MODE0_MDEUB_SHA512,
2623 }
9c4a7965
KP
2624};
2625
2626struct talitos_crypto_alg {
2627 struct list_head entry;
2628 struct device *dev;
acbf7c62 2629 struct talitos_alg_template algt;
9c4a7965
KP
2630};
2631
2632static int talitos_cra_init(struct crypto_tfm *tfm)
2633{
2634 struct crypto_alg *alg = tfm->__crt_alg;
19bbbc63 2635 struct talitos_crypto_alg *talitos_alg;
9c4a7965 2636 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
5228f0f7 2637 struct talitos_private *priv;
9c4a7965 2638
497f2e6b
LN
2639 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2640 talitos_alg = container_of(__crypto_ahash_alg(alg),
2641 struct talitos_crypto_alg,
2642 algt.alg.hash);
2643 else
2644 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2645 algt.alg.crypto);
19bbbc63 2646
9c4a7965
KP
2647 /* update context with ptr to dev */
2648 ctx->dev = talitos_alg->dev;
19bbbc63 2649
5228f0f7
KP
2650 /* assign SEC channel to tfm in round-robin fashion */
2651 priv = dev_get_drvdata(ctx->dev);
2652 ctx->ch = atomic_inc_return(&priv->last_chan) &
2653 (priv->num_channels - 1);
2654
9c4a7965 2655 /* copy descriptor header template value */
acbf7c62 2656 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2657
602dba5a
KP
2658 /* select done notification */
2659 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2660
497f2e6b
LN
2661 return 0;
2662}
2663
aeb4c132 2664static int talitos_cra_init_aead(struct crypto_aead *tfm)
497f2e6b 2665{
aeb4c132 2666 talitos_cra_init(crypto_aead_tfm(tfm));
9c4a7965
KP
2667 return 0;
2668}
2669
497f2e6b
LN
2670static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2671{
2672 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2673
2674 talitos_cra_init(tfm);
2675
2676 ctx->keylen = 0;
2677 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2678 sizeof(struct talitos_ahash_req_ctx));
2679
2680 return 0;
2681}
2682
9c4a7965
KP
2683/*
2684 * given the alg's descriptor header template, determine whether descriptor
2685 * type and primary/secondary execution units required match the hw
2686 * capabilities description provided in the device tree node.
2687 */
2688static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2689{
2690 struct talitos_private *priv = dev_get_drvdata(dev);
2691 int ret;
2692
2693 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2694 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2695
2696 if (SECONDARY_EU(desc_hdr_template))
2697 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2698 & priv->exec_units);
2699
2700 return ret;
2701}
2702
2dc11581 2703static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2704{
2705 struct device *dev = &ofdev->dev;
2706 struct talitos_private *priv = dev_get_drvdata(dev);
2707 struct talitos_crypto_alg *t_alg, *n;
2708 int i;
2709
2710 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2711 switch (t_alg->algt.type) {
2712 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62 2713 break;
aeb4c132
HX
2714 case CRYPTO_ALG_TYPE_AEAD:
2715 crypto_unregister_aead(&t_alg->algt.alg.aead);
acbf7c62
LN
2716 case CRYPTO_ALG_TYPE_AHASH:
2717 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2718 break;
2719 }
9c4a7965
KP
2720 list_del(&t_alg->entry);
2721 kfree(t_alg);
2722 }
2723
2724 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2725 talitos_unregister_rng(dev);
2726
35a3bb3d 2727 for (i = 0; priv->chan && i < priv->num_channels; i++)
0b798247 2728 kfree(priv->chan[i].fifo);
9c4a7965 2729
4b992628 2730 kfree(priv->chan);
9c4a7965 2731
c3e337f8 2732 for (i = 0; i < 2; i++)
2cdba3cf 2733 if (priv->irq[i]) {
c3e337f8
KP
2734 free_irq(priv->irq[i], dev);
2735 irq_dispose_mapping(priv->irq[i]);
2736 }
9c4a7965 2737
c3e337f8 2738 tasklet_kill(&priv->done_task[0]);
2cdba3cf 2739 if (priv->irq[1])
c3e337f8 2740 tasklet_kill(&priv->done_task[1]);
9c4a7965
KP
2741
2742 iounmap(priv->reg);
2743
9c4a7965
KP
2744 kfree(priv);
2745
2746 return 0;
2747}
2748
2749static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2750 struct talitos_alg_template
2751 *template)
2752{
60f208d7 2753 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2754 struct talitos_crypto_alg *t_alg;
2755 struct crypto_alg *alg;
2756
2757 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2758 if (!t_alg)
2759 return ERR_PTR(-ENOMEM);
2760
acbf7c62
LN
2761 t_alg->algt = *template;
2762
2763 switch (t_alg->algt.type) {
2764 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2765 alg = &t_alg->algt.alg.crypto;
2766 alg->cra_init = talitos_cra_init;
d4cd3283 2767 alg->cra_type = &crypto_ablkcipher_type;
b286e003
KP
2768 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2769 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2770 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2771 alg->cra_ablkcipher.geniv = "eseqiv";
497f2e6b 2772 break;
acbf7c62 2773 case CRYPTO_ALG_TYPE_AEAD:
aeb4c132 2774 alg = &t_alg->algt.alg.aead.base;
aeb4c132
HX
2775 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2776 t_alg->algt.alg.aead.setkey = aead_setkey;
2777 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2778 t_alg->algt.alg.aead.decrypt = aead_decrypt;
acbf7c62
LN
2779 break;
2780 case CRYPTO_ALG_TYPE_AHASH:
2781 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2782 alg->cra_init = talitos_cra_init_ahash;
d4cd3283 2783 alg->cra_type = &crypto_ahash_type;
b286e003
KP
2784 t_alg->algt.alg.hash.init = ahash_init;
2785 t_alg->algt.alg.hash.update = ahash_update;
2786 t_alg->algt.alg.hash.final = ahash_final;
2787 t_alg->algt.alg.hash.finup = ahash_finup;
2788 t_alg->algt.alg.hash.digest = ahash_digest;
2789 t_alg->algt.alg.hash.setkey = ahash_setkey;
2790
79b3a418 2791 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
0b2730d8
KP
2792 !strncmp(alg->cra_name, "hmac", 4)) {
2793 kfree(t_alg);
79b3a418 2794 return ERR_PTR(-ENOTSUPP);
0b2730d8 2795 }
60f208d7 2796 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
79b3a418
LN
2797 (!strcmp(alg->cra_name, "sha224") ||
2798 !strcmp(alg->cra_name, "hmac(sha224)"))) {
60f208d7
KP
2799 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2800 t_alg->algt.desc_hdr_template =
2801 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2802 DESC_HDR_SEL0_MDEUA |
2803 DESC_HDR_MODE0_MDEU_SHA256;
2804 }
497f2e6b 2805 break;
1d11911a
KP
2806 default:
2807 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
5fa7dadc 2808 kfree(t_alg);
1d11911a 2809 return ERR_PTR(-EINVAL);
acbf7c62 2810 }
9c4a7965 2811
9c4a7965 2812 alg->cra_module = THIS_MODULE;
9c4a7965 2813 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2814 alg->cra_alignmask = 0;
9c4a7965 2815 alg->cra_ctxsize = sizeof(struct talitos_ctx);
d912bb76 2816 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
9c4a7965 2817
9c4a7965
KP
2818 t_alg->dev = dev;
2819
2820 return t_alg;
2821}
2822
c3e337f8
KP
2823static int talitos_probe_irq(struct platform_device *ofdev)
2824{
2825 struct device *dev = &ofdev->dev;
2826 struct device_node *np = ofdev->dev.of_node;
2827 struct talitos_private *priv = dev_get_drvdata(dev);
2828 int err;
dd3c0987 2829 bool is_sec1 = has_ftr_sec1(priv);
c3e337f8
KP
2830
2831 priv->irq[0] = irq_of_parse_and_map(np, 0);
2cdba3cf 2832 if (!priv->irq[0]) {
c3e337f8
KP
2833 dev_err(dev, "failed to map irq\n");
2834 return -EINVAL;
2835 }
dd3c0987
LC
2836 if (is_sec1) {
2837 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2838 dev_driver_string(dev), dev);
2839 goto primary_out;
2840 }
c3e337f8
KP
2841
2842 priv->irq[1] = irq_of_parse_and_map(np, 1);
2843
2844 /* get the primary irq line */
2cdba3cf 2845 if (!priv->irq[1]) {
dd3c0987 2846 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
c3e337f8
KP
2847 dev_driver_string(dev), dev);
2848 goto primary_out;
2849 }
2850
dd3c0987 2851 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
c3e337f8
KP
2852 dev_driver_string(dev), dev);
2853 if (err)
2854 goto primary_out;
2855
2856 /* get the secondary irq line */
dd3c0987 2857 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
c3e337f8
KP
2858 dev_driver_string(dev), dev);
2859 if (err) {
2860 dev_err(dev, "failed to request secondary irq\n");
2861 irq_dispose_mapping(priv->irq[1]);
2cdba3cf 2862 priv->irq[1] = 0;
c3e337f8
KP
2863 }
2864
2865 return err;
2866
2867primary_out:
2868 if (err) {
2869 dev_err(dev, "failed to request primary irq\n");
2870 irq_dispose_mapping(priv->irq[0]);
2cdba3cf 2871 priv->irq[0] = 0;
c3e337f8
KP
2872 }
2873
2874 return err;
2875}
2876
1c48a5c9 2877static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2878{
2879 struct device *dev = &ofdev->dev;
61c7a080 2880 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2881 struct talitos_private *priv;
2882 const unsigned int *prop;
2883 int i, err;
5fa7fa14 2884 int stride;
9c4a7965
KP
2885
2886 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2887 if (!priv)
2888 return -ENOMEM;
2889
f3de9cb1
KH
2890 INIT_LIST_HEAD(&priv->alg_list);
2891
9c4a7965
KP
2892 dev_set_drvdata(dev, priv);
2893
2894 priv->ofdev = ofdev;
2895
511d63cb
HG
2896 spin_lock_init(&priv->reg_lock);
2897
9c4a7965
KP
2898 priv->reg = of_iomap(np, 0);
2899 if (!priv->reg) {
2900 dev_err(dev, "failed to of_iomap\n");
2901 err = -ENOMEM;
2902 goto err_out;
2903 }
2904
2905 /* get SEC version capabilities from device tree */
2906 prop = of_get_property(np, "fsl,num-channels", NULL);
2907 if (prop)
2908 priv->num_channels = *prop;
2909
2910 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2911 if (prop)
2912 priv->chfifo_len = *prop;
2913
2914 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2915 if (prop)
2916 priv->exec_units = *prop;
2917
2918 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2919 if (prop)
2920 priv->desc_types = *prop;
2921
2922 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2923 !priv->exec_units || !priv->desc_types) {
2924 dev_err(dev, "invalid property data in device tree node\n");
2925 err = -EINVAL;
2926 goto err_out;
2927 }
2928
f3c85bc1
LN
2929 if (of_device_is_compatible(np, "fsl,sec3.0"))
2930 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2931
fe5720e2 2932 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7 2933 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
79b3a418
LN
2934 TALITOS_FTR_SHA224_HWINIT |
2935 TALITOS_FTR_HMAC_OK;
fe5720e2 2936
21590888
LC
2937 if (of_device_is_compatible(np, "fsl,sec1.0"))
2938 priv->features |= TALITOS_FTR_SEC1;
2939
5fa7fa14
LC
2940 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2941 priv->reg_deu = priv->reg + TALITOS12_DEU;
2942 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2943 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2944 stride = TALITOS1_CH_STRIDE;
2945 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2946 priv->reg_deu = priv->reg + TALITOS10_DEU;
2947 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2948 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2949 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2950 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2951 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2952 stride = TALITOS1_CH_STRIDE;
2953 } else {
2954 priv->reg_deu = priv->reg + TALITOS2_DEU;
2955 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2956 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2957 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2958 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2959 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2960 priv->reg_keu = priv->reg + TALITOS2_KEU;
2961 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2962 stride = TALITOS2_CH_STRIDE;
2963 }
2964
dd3c0987
LC
2965 err = talitos_probe_irq(ofdev);
2966 if (err)
2967 goto err_out;
2968
2969 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2970 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2971 (unsigned long)dev);
2972 } else {
2973 if (!priv->irq[1]) {
2974 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2975 (unsigned long)dev);
2976 } else {
2977 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2978 (unsigned long)dev);
2979 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2980 (unsigned long)dev);
2981 }
2982 }
2983
4b992628
KP
2984 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2985 priv->num_channels, GFP_KERNEL);
2986 if (!priv->chan) {
2987 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
2988 err = -ENOMEM;
2989 goto err_out;
2990 }
2991
f641dddd
MH
2992 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2993
c3e337f8 2994 for (i = 0; i < priv->num_channels; i++) {
5fa7fa14 2995 priv->chan[i].reg = priv->reg + stride * (i + 1);
2cdba3cf 2996 if (!priv->irq[1] || !(i & 1))
c3e337f8 2997 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
ad42d5fc 2998
4b992628
KP
2999 spin_lock_init(&priv->chan[i].head_lock);
3000 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965 3001
4b992628
KP
3002 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3003 priv->fifo_len, GFP_KERNEL);
3004 if (!priv->chan[i].fifo) {
9c4a7965
KP
3005 dev_err(dev, "failed to allocate request fifo %d\n", i);
3006 err = -ENOMEM;
3007 goto err_out;
3008 }
9c4a7965 3009
4b992628
KP
3010 atomic_set(&priv->chan[i].submit_count,
3011 -(priv->chfifo_len - 1));
f641dddd 3012 }
9c4a7965 3013
81eb024c
KP
3014 dma_set_mask(dev, DMA_BIT_MASK(36));
3015
9c4a7965
KP
3016 /* reset and initialize the h/w */
3017 err = init_device(dev);
3018 if (err) {
3019 dev_err(dev, "failed to initialize device\n");
3020 goto err_out;
3021 }
3022
3023 /* register the RNG, if available */
3024 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3025 err = talitos_register_rng(dev);
3026 if (err) {
3027 dev_err(dev, "failed to register hwrng: %d\n", err);
3028 goto err_out;
3029 } else
3030 dev_info(dev, "hwrng\n");
3031 }
3032
3033 /* register crypto algorithms the device supports */
9c4a7965
KP
3034 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3035 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3036 struct talitos_crypto_alg *t_alg;
aeb4c132 3037 struct crypto_alg *alg = NULL;
9c4a7965
KP
3038
3039 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3040 if (IS_ERR(t_alg)) {
3041 err = PTR_ERR(t_alg);
0b2730d8 3042 if (err == -ENOTSUPP)
79b3a418 3043 continue;
9c4a7965
KP
3044 goto err_out;
3045 }
3046
acbf7c62
LN
3047 switch (t_alg->algt.type) {
3048 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62
LN
3049 err = crypto_register_alg(
3050 &t_alg->algt.alg.crypto);
aeb4c132 3051 alg = &t_alg->algt.alg.crypto;
acbf7c62 3052 break;
aeb4c132
HX
3053
3054 case CRYPTO_ALG_TYPE_AEAD:
3055 err = crypto_register_aead(
3056 &t_alg->algt.alg.aead);
3057 alg = &t_alg->algt.alg.aead.base;
3058 break;
3059
acbf7c62
LN
3060 case CRYPTO_ALG_TYPE_AHASH:
3061 err = crypto_register_ahash(
3062 &t_alg->algt.alg.hash);
aeb4c132 3063 alg = &t_alg->algt.alg.hash.halg.base;
acbf7c62
LN
3064 break;
3065 }
9c4a7965
KP
3066 if (err) {
3067 dev_err(dev, "%s alg registration failed\n",
aeb4c132 3068 alg->cra_driver_name);
9c4a7965 3069 kfree(t_alg);
991155ba 3070 } else
9c4a7965 3071 list_add_tail(&t_alg->entry, &priv->alg_list);
9c4a7965
KP
3072 }
3073 }
5b859b6e
KP
3074 if (!list_empty(&priv->alg_list))
3075 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3076 (char *)of_get_property(np, "compatible", NULL));
9c4a7965
KP
3077
3078 return 0;
3079
3080err_out:
3081 talitos_remove(ofdev);
9c4a7965
KP
3082
3083 return err;
3084}
3085
6c3f975a 3086static const struct of_device_id talitos_match[] = {
0635b7db
LC
3087#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3088 {
3089 .compatible = "fsl,sec1.0",
3090 },
3091#endif
3092#ifdef CONFIG_CRYPTO_DEV_TALITOS2
9c4a7965
KP
3093 {
3094 .compatible = "fsl,sec2.0",
3095 },
0635b7db 3096#endif
9c4a7965
KP
3097 {},
3098};
3099MODULE_DEVICE_TABLE(of, talitos_match);
3100
1c48a5c9 3101static struct platform_driver talitos_driver = {
4018294b
GL
3102 .driver = {
3103 .name = "talitos",
4018294b
GL
3104 .of_match_table = talitos_match,
3105 },
9c4a7965 3106 .probe = talitos_probe,
596f1034 3107 .remove = talitos_remove,
9c4a7965
KP
3108};
3109
741e8c2d 3110module_platform_driver(talitos_driver);
9c4a7965
KP
3111
3112MODULE_LICENSE("GPL");
3113MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3114MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
This page took 0.69778 seconds and 5 git commands to generate.