Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[deliverable/linux.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
5af50730
RH
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
9c4a7965
KP
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
5a0e3ad6 42#include <linux/slab.h>
9c4a7965
KP
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
3952f17e 46#include <crypto/des.h>
9c4a7965 47#include <crypto/sha.h>
497f2e6b 48#include <crypto/md5.h>
e98014ab 49#include <crypto/internal/aead.h>
9c4a7965 50#include <crypto/authenc.h>
4de9d0b5 51#include <crypto/skcipher.h>
acbf7c62
LN
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
4de9d0b5 54#include <crypto/scatterwalk.h>
9c4a7965
KP
55
56#include "talitos.h"
57
922f9dc8
LC
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
81eb024c 60{
edc6bd69 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
922f9dc8
LC
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
64}
65
340ff60a
HG
66static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68{
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72}
73
42e8b0d7 74static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
922f9dc8 75 bool is_sec1)
538caf83 76{
922f9dc8
LC
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
82 }
538caf83
LC
83}
84
922f9dc8
LC
85static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
538caf83 87{
922f9dc8
LC
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
538caf83
LC
92}
93
922f9dc8 94static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
185eb79f 95{
922f9dc8
LC
96 if (!is_sec1)
97 ptr->j_extent = 0;
185eb79f
LC
98}
99
9c4a7965
KP
100/*
101 * map virtual single (contiguous) pointer to h/w descriptor pointer
102 */
103static void map_single_talitos_ptr(struct device *dev,
edc6bd69 104 struct talitos_ptr *ptr,
42e8b0d7 105 unsigned int len, void *data,
9c4a7965
KP
106 enum dma_data_direction dir)
107{
81eb024c 108 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
922f9dc8
LC
109 struct talitos_private *priv = dev_get_drvdata(dev);
110 bool is_sec1 = has_ftr_sec1(priv);
81eb024c 111
922f9dc8
LC
112 to_talitos_ptr_len(ptr, len, is_sec1);
113 to_talitos_ptr(ptr, dma_addr, is_sec1);
114 to_talitos_ptr_extent_clear(ptr, is_sec1);
9c4a7965
KP
115}
116
117/*
118 * unmap bus single (contiguous) h/w descriptor pointer
119 */
120static void unmap_single_talitos_ptr(struct device *dev,
edc6bd69 121 struct talitos_ptr *ptr,
9c4a7965
KP
122 enum dma_data_direction dir)
123{
922f9dc8
LC
124 struct talitos_private *priv = dev_get_drvdata(dev);
125 bool is_sec1 = has_ftr_sec1(priv);
126
edc6bd69 127 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
922f9dc8 128 from_talitos_ptr_len(ptr, is_sec1), dir);
9c4a7965
KP
129}
130
131static int reset_channel(struct device *dev, int ch)
132{
133 struct talitos_private *priv = dev_get_drvdata(dev);
134 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 135 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 136
dd3c0987
LC
137 if (is_sec1) {
138 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139 TALITOS1_CCCR_LO_RESET);
9c4a7965 140
dd3c0987
LC
141 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142 TALITOS1_CCCR_LO_RESET) && --timeout)
143 cpu_relax();
144 } else {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146 TALITOS2_CCCR_RESET);
147
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149 TALITOS2_CCCR_RESET) && --timeout)
150 cpu_relax();
151 }
9c4a7965
KP
152
153 if (timeout == 0) {
154 dev_err(dev, "failed to reset channel %d\n", ch);
155 return -EIO;
156 }
157
81eb024c 158 /* set 36-bit addressing, done writeback enable and done IRQ enable */
ad42d5fc 159 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
81eb024c 160 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 161
fe5720e2
KP
162 /* and ICCR writeback, if available */
163 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
ad42d5fc 164 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
fe5720e2
KP
165 TALITOS_CCCR_LO_IWSE);
166
9c4a7965
KP
167 return 0;
168}
169
170static int reset_device(struct device *dev)
171{
172 struct talitos_private *priv = dev_get_drvdata(dev);
173 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987
LC
174 bool is_sec1 = has_ftr_sec1(priv);
175 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
9c4a7965 176
c3e337f8 177 setbits32(priv->reg + TALITOS_MCR, mcr);
9c4a7965 178
dd3c0987 179 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
9c4a7965
KP
180 && --timeout)
181 cpu_relax();
182
2cdba3cf 183 if (priv->irq[1]) {
c3e337f8
KP
184 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185 setbits32(priv->reg + TALITOS_MCR, mcr);
186 }
187
9c4a7965
KP
188 if (timeout == 0) {
189 dev_err(dev, "failed to reset device\n");
190 return -EIO;
191 }
192
193 return 0;
194}
195
196/*
197 * Reset and initialize the device
198 */
199static int init_device(struct device *dev)
200{
201 struct talitos_private *priv = dev_get_drvdata(dev);
202 int ch, err;
dd3c0987 203 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965
KP
204
205 /*
206 * Master reset
207 * errata documentation: warning: certain SEC interrupts
208 * are not fully cleared by writing the MCR:SWR bit,
209 * set bit twice to completely reset
210 */
211 err = reset_device(dev);
212 if (err)
213 return err;
214
215 err = reset_device(dev);
216 if (err)
217 return err;
218
219 /* reset channels */
220 for (ch = 0; ch < priv->num_channels; ch++) {
221 err = reset_channel(dev, ch);
222 if (err)
223 return err;
224 }
225
226 /* enable channel done and error interrupts */
dd3c0987
LC
227 if (is_sec1) {
228 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230 /* disable parity error check in DEU (erroneous? test vect.) */
231 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
232 } else {
233 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
235 }
9c4a7965 236
fe5720e2
KP
237 /* disable integrity check error interrupts (use writeback instead) */
238 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
5fa7fa14 239 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
fe5720e2
KP
240 TALITOS_MDEUICR_LO_ICE);
241
9c4a7965
KP
242 return 0;
243}
244
245/**
246 * talitos_submit - submits a descriptor to the device for processing
247 * @dev: the SEC device to be used
5228f0f7 248 * @ch: the SEC device channel to be used
9c4a7965
KP
249 * @desc: the descriptor to be processed by the device
250 * @callback: whom to call when processing is complete
251 * @context: a handle for use by caller (optional)
252 *
253 * desc must contain valid dma-mapped (bus physical) address pointers.
254 * callback must check err and feedback in descriptor header
255 * for device processing status.
256 */
865d5061
HG
257int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258 void (*callback)(struct device *dev,
259 struct talitos_desc *desc,
260 void *context, int error),
261 void *context)
9c4a7965
KP
262{
263 struct talitos_private *priv = dev_get_drvdata(dev);
264 struct talitos_request *request;
5228f0f7 265 unsigned long flags;
9c4a7965 266 int head;
7d607c6a 267 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 268
4b992628 269 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 270
4b992628 271 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 272 /* h/w fifo is full */
4b992628 273 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
274 return -EAGAIN;
275 }
276
4b992628
KP
277 head = priv->chan[ch].head;
278 request = &priv->chan[ch].fifo[head];
ec6644d6 279
9c4a7965 280 /* map descriptor and save caller data */
7d607c6a
LC
281 if (is_sec1) {
282 desc->hdr1 = desc->hdr;
283 desc->next_desc = 0;
284 request->dma_desc = dma_map_single(dev, &desc->hdr1,
285 TALITOS_DESC_SIZE,
286 DMA_BIDIRECTIONAL);
287 } else {
288 request->dma_desc = dma_map_single(dev, desc,
289 TALITOS_DESC_SIZE,
290 DMA_BIDIRECTIONAL);
291 }
9c4a7965
KP
292 request->callback = callback;
293 request->context = context;
294
295 /* increment fifo head */
4b992628 296 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
297
298 smp_wmb();
299 request->desc = desc;
300
301 /* GO! */
302 wmb();
ad42d5fc
KP
303 out_be32(priv->chan[ch].reg + TALITOS_FF,
304 upper_32_bits(request->dma_desc));
305 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
a752447a 306 lower_32_bits(request->dma_desc));
9c4a7965 307
4b992628 308 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
309
310 return -EINPROGRESS;
311}
865d5061 312EXPORT_SYMBOL(talitos_submit);
9c4a7965
KP
313
314/*
315 * process what was done, notify callback of error if not
316 */
317static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
318{
319 struct talitos_private *priv = dev_get_drvdata(dev);
320 struct talitos_request *request, saved_req;
321 unsigned long flags;
322 int tail, status;
7d607c6a 323 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 324
4b992628 325 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 326
4b992628
KP
327 tail = priv->chan[ch].tail;
328 while (priv->chan[ch].fifo[tail].desc) {
7d607c6a
LC
329 __be32 hdr;
330
4b992628 331 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
332
333 /* descriptors with their done bits set don't get the error */
334 rmb();
7d607c6a
LC
335 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
336
337 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 338 status = 0;
ca38a814 339 else
9c4a7965
KP
340 if (!error)
341 break;
342 else
343 status = error;
344
345 dma_unmap_single(dev, request->dma_desc,
7d607c6a 346 TALITOS_DESC_SIZE,
e938e465 347 DMA_BIDIRECTIONAL);
9c4a7965
KP
348
349 /* copy entries so we can call callback outside lock */
350 saved_req.desc = request->desc;
351 saved_req.callback = request->callback;
352 saved_req.context = request->context;
353
354 /* release request entry in fifo */
355 smp_wmb();
356 request->desc = NULL;
357
358 /* increment fifo tail */
4b992628 359 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 360
4b992628 361 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 362
4b992628 363 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 364
9c4a7965
KP
365 saved_req.callback(dev, saved_req.desc, saved_req.context,
366 status);
367 /* channel may resume processing in single desc error case */
368 if (error && !reset_ch && status == error)
369 return;
4b992628
KP
370 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371 tail = priv->chan[ch].tail;
9c4a7965
KP
372 }
373
4b992628 374 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
375}
376
377/*
378 * process completed requests for channels that have done status
379 */
dd3c0987
LC
380#define DEF_TALITOS1_DONE(name, ch_done_mask) \
381static void talitos1_done_##name(unsigned long data) \
382{ \
383 struct device *dev = (struct device *)data; \
384 struct talitos_private *priv = dev_get_drvdata(dev); \
385 unsigned long flags; \
386 \
387 if (ch_done_mask & 0x10000000) \
388 flush_channel(dev, 0, 0, 0); \
389 if (priv->num_channels == 1) \
390 goto out; \
391 if (ch_done_mask & 0x40000000) \
392 flush_channel(dev, 1, 0, 0); \
393 if (ch_done_mask & 0x00010000) \
394 flush_channel(dev, 2, 0, 0); \
395 if (ch_done_mask & 0x00040000) \
396 flush_channel(dev, 3, 0, 0); \
397 \
398out: \
399 /* At this point, all completed channels have been processed */ \
400 /* Unmask done interrupts for channels completed later on. */ \
401 spin_lock_irqsave(&priv->reg_lock, flags); \
402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
404 spin_unlock_irqrestore(&priv->reg_lock, flags); \
405}
406
407DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
408
409#define DEF_TALITOS2_DONE(name, ch_done_mask) \
410static void talitos2_done_##name(unsigned long data) \
c3e337f8
KP
411{ \
412 struct device *dev = (struct device *)data; \
413 struct talitos_private *priv = dev_get_drvdata(dev); \
511d63cb 414 unsigned long flags; \
c3e337f8
KP
415 \
416 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \
418 if (priv->num_channels == 1) \
419 goto out; \
420 if (ch_done_mask & (1 << 2)) \
421 flush_channel(dev, 1, 0, 0); \
422 if (ch_done_mask & (1 << 4)) \
423 flush_channel(dev, 2, 0, 0); \
424 if (ch_done_mask & (1 << 6)) \
425 flush_channel(dev, 3, 0, 0); \
426 \
427out: \
428 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \
511d63cb 430 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8 431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
dd3c0987 432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
511d63cb 433 spin_unlock_irqrestore(&priv->reg_lock, flags); \
9c4a7965 434}
dd3c0987
LC
435
436DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
437DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
9c4a7965
KP
439
440/*
441 * locate current (offending) descriptor
442 */
3e721aeb 443static u32 current_desc_hdr(struct device *dev, int ch)
9c4a7965
KP
444{
445 struct talitos_private *priv = dev_get_drvdata(dev);
b62ffd8c 446 int tail, iter;
9c4a7965
KP
447 dma_addr_t cur_desc;
448
b62ffd8c
HG
449 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
9c4a7965 451
b62ffd8c
HG
452 if (!cur_desc) {
453 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
454 return 0;
455 }
456
457 tail = priv->chan[ch].tail;
458
459 iter = tail;
460 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461 iter = (iter + 1) & (priv->fifo_len - 1);
462 if (iter == tail) {
9c4a7965 463 dev_err(dev, "couldn't locate current descriptor\n");
3e721aeb 464 return 0;
9c4a7965
KP
465 }
466 }
467
b62ffd8c 468 return priv->chan[ch].fifo[iter].desc->hdr;
9c4a7965
KP
469}
470
471/*
472 * user diagnostics; report root cause of error based on execution unit status
473 */
3e721aeb 474static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
9c4a7965
KP
475{
476 struct talitos_private *priv = dev_get_drvdata(dev);
477 int i;
478
3e721aeb 479 if (!desc_hdr)
ad42d5fc 480 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3e721aeb
KP
481
482 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
9c4a7965
KP
483 case DESC_HDR_SEL0_AFEU:
484 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
5fa7fa14
LC
485 in_be32(priv->reg_afeu + TALITOS_EUISR),
486 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
9c4a7965
KP
487 break;
488 case DESC_HDR_SEL0_DEU:
489 dev_err(dev, "DEUISR 0x%08x_%08x\n",
5fa7fa14
LC
490 in_be32(priv->reg_deu + TALITOS_EUISR),
491 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
9c4a7965
KP
492 break;
493 case DESC_HDR_SEL0_MDEUA:
494 case DESC_HDR_SEL0_MDEUB:
495 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
496 in_be32(priv->reg_mdeu + TALITOS_EUISR),
497 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
498 break;
499 case DESC_HDR_SEL0_RNG:
500 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
5fa7fa14
LC
501 in_be32(priv->reg_rngu + TALITOS_ISR),
502 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
9c4a7965
KP
503 break;
504 case DESC_HDR_SEL0_PKEU:
505 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
5fa7fa14
LC
506 in_be32(priv->reg_pkeu + TALITOS_EUISR),
507 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
508 break;
509 case DESC_HDR_SEL0_AESU:
510 dev_err(dev, "AESUISR 0x%08x_%08x\n",
5fa7fa14
LC
511 in_be32(priv->reg_aesu + TALITOS_EUISR),
512 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
9c4a7965
KP
513 break;
514 case DESC_HDR_SEL0_CRCU:
515 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
516 in_be32(priv->reg_crcu + TALITOS_EUISR),
517 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
518 break;
519 case DESC_HDR_SEL0_KEU:
520 dev_err(dev, "KEUISR 0x%08x_%08x\n",
5fa7fa14
LC
521 in_be32(priv->reg_pkeu + TALITOS_EUISR),
522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
523 break;
524 }
525
3e721aeb 526 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
9c4a7965
KP
527 case DESC_HDR_SEL1_MDEUA:
528 case DESC_HDR_SEL1_MDEUB:
529 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
530 in_be32(priv->reg_mdeu + TALITOS_EUISR),
531 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
532 break;
533 case DESC_HDR_SEL1_CRCU:
534 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
535 in_be32(priv->reg_crcu + TALITOS_EUISR),
536 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
537 break;
538 }
539
540 for (i = 0; i < 8; i++)
541 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
ad42d5fc
KP
542 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
9c4a7965
KP
544}
545
546/*
547 * recover from error interrupts
548 */
5e718a09 549static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
9c4a7965 550{
9c4a7965
KP
551 struct talitos_private *priv = dev_get_drvdata(dev);
552 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 553 int ch, error, reset_dev = 0;
42e8b0d7 554 u32 v_lo;
dd3c0987
LC
555 bool is_sec1 = has_ftr_sec1(priv);
556 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
9c4a7965
KP
557
558 for (ch = 0; ch < priv->num_channels; ch++) {
559 /* skip channels without errors */
dd3c0987
LC
560 if (is_sec1) {
561 /* bits 29, 31, 17, 19 */
562 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
563 continue;
564 } else {
565 if (!(isr & (1 << (ch * 2 + 1))))
566 continue;
567 }
9c4a7965
KP
568
569 error = -EINVAL;
570
ad42d5fc 571 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
9c4a7965
KP
572
573 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574 dev_err(dev, "double fetch fifo overflow error\n");
575 error = -EAGAIN;
576 reset_ch = 1;
577 }
578 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579 /* h/w dropped descriptor */
580 dev_err(dev, "single fetch fifo overflow error\n");
581 error = -EAGAIN;
582 }
583 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584 dev_err(dev, "master data transfer error\n");
585 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
dd3c0987
LC
586 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587 : "s/g data length zero error\n");
9c4a7965 588 if (v_lo & TALITOS_CCPSR_LO_FPZ)
dd3c0987
LC
589 dev_err(dev, is_sec1 ? "parity error\n"
590 : "fetch pointer zero error\n");
9c4a7965
KP
591 if (v_lo & TALITOS_CCPSR_LO_IDH)
592 dev_err(dev, "illegal descriptor header error\n");
593 if (v_lo & TALITOS_CCPSR_LO_IEU)
dd3c0987
LC
594 dev_err(dev, is_sec1 ? "static assignment error\n"
595 : "invalid exec unit error\n");
9c4a7965 596 if (v_lo & TALITOS_CCPSR_LO_EU)
3e721aeb 597 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
dd3c0987
LC
598 if (!is_sec1) {
599 if (v_lo & TALITOS_CCPSR_LO_GB)
600 dev_err(dev, "gather boundary error\n");
601 if (v_lo & TALITOS_CCPSR_LO_GRL)
602 dev_err(dev, "gather return/length error\n");
603 if (v_lo & TALITOS_CCPSR_LO_SB)
604 dev_err(dev, "scatter boundary error\n");
605 if (v_lo & TALITOS_CCPSR_LO_SRL)
606 dev_err(dev, "scatter return/length error\n");
607 }
9c4a7965
KP
608
609 flush_channel(dev, ch, error, reset_ch);
610
611 if (reset_ch) {
612 reset_channel(dev, ch);
613 } else {
ad42d5fc 614 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
dd3c0987 615 TALITOS2_CCCR_CONT);
ad42d5fc
KP
616 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
dd3c0987 618 TALITOS2_CCCR_CONT) && --timeout)
9c4a7965
KP
619 cpu_relax();
620 if (timeout == 0) {
621 dev_err(dev, "failed to restart channel %d\n",
622 ch);
623 reset_dev = 1;
624 }
625 }
626 }
dd3c0987
LC
627 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
631 isr, isr_lo);
632 else
633 dev_err(dev, "done overflow, internal time out, or "
634 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
9c4a7965
KP
635
636 /* purge request queues */
637 for (ch = 0; ch < priv->num_channels; ch++)
638 flush_channel(dev, ch, -EIO, 1);
639
640 /* reset and reinitialize the device */
641 init_device(dev);
642 }
643}
644
dd3c0987
LC
645#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
646static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
647{ \
648 struct device *dev = data; \
649 struct talitos_private *priv = dev_get_drvdata(dev); \
650 u32 isr, isr_lo; \
651 unsigned long flags; \
652 \
653 spin_lock_irqsave(&priv->reg_lock, flags); \
654 isr = in_be32(priv->reg + TALITOS_ISR); \
655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
656 /* Acknowledge interrupt */ \
657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
659 \
660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
661 spin_unlock_irqrestore(&priv->reg_lock, flags); \
662 talitos_error(dev, isr & ch_err_mask, isr_lo); \
663 } \
664 else { \
665 if (likely(isr & ch_done_mask)) { \
666 /* mask further done interrupts. */ \
667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
668 /* done_task will unmask done interrupts at exit */ \
669 tasklet_schedule(&priv->done_task[tlet]); \
670 } \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
672 } \
673 \
674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
675 IRQ_NONE; \
676}
677
678DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
679
680#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
681static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
c3e337f8
KP
682{ \
683 struct device *dev = data; \
684 struct talitos_private *priv = dev_get_drvdata(dev); \
685 u32 isr, isr_lo; \
511d63cb 686 unsigned long flags; \
c3e337f8 687 \
511d63cb 688 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
689 isr = in_be32(priv->reg + TALITOS_ISR); \
690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
691 /* Acknowledge interrupt */ \
692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
694 \
511d63cb
HG
695 if (unlikely(isr & ch_err_mask || isr_lo)) { \
696 spin_unlock_irqrestore(&priv->reg_lock, flags); \
697 talitos_error(dev, isr & ch_err_mask, isr_lo); \
698 } \
699 else { \
c3e337f8
KP
700 if (likely(isr & ch_done_mask)) { \
701 /* mask further done interrupts. */ \
702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
703 /* done_task will unmask done interrupts at exit */ \
704 tasklet_schedule(&priv->done_task[tlet]); \
705 } \
511d63cb
HG
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
707 } \
c3e337f8
KP
708 \
709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
710 IRQ_NONE; \
9c4a7965 711}
dd3c0987
LC
712
713DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
715 0)
716DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
717 1)
9c4a7965
KP
718
719/*
720 * hwrng
721 */
722static int talitos_rng_data_present(struct hwrng *rng, int wait)
723{
724 struct device *dev = (struct device *)rng->priv;
725 struct talitos_private *priv = dev_get_drvdata(dev);
726 u32 ofl;
727 int i;
728
729 for (i = 0; i < 20; i++) {
5fa7fa14 730 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
9c4a7965
KP
731 TALITOS_RNGUSR_LO_OFL;
732 if (ofl || !wait)
733 break;
734 udelay(10);
735 }
736
737 return !!ofl;
738}
739
740static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
741{
742 struct device *dev = (struct device *)rng->priv;
743 struct talitos_private *priv = dev_get_drvdata(dev);
744
745 /* rng fifo requires 64-bit accesses */
5fa7fa14
LC
746 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
9c4a7965
KP
748
749 return sizeof(u32);
750}
751
752static int talitos_rng_init(struct hwrng *rng)
753{
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 unsigned int timeout = TALITOS_TIMEOUT;
757
5fa7fa14
LC
758 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760 & TALITOS_RNGUSR_LO_RD)
9c4a7965
KP
761 && --timeout)
762 cpu_relax();
763 if (timeout == 0) {
764 dev_err(dev, "failed to reset rng hw\n");
765 return -ENODEV;
766 }
767
768 /* start generating */
5fa7fa14 769 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
9c4a7965
KP
770
771 return 0;
772}
773
774static int talitos_register_rng(struct device *dev)
775{
776 struct talitos_private *priv = dev_get_drvdata(dev);
35a3bb3d 777 int err;
9c4a7965
KP
778
779 priv->rng.name = dev_driver_string(dev),
780 priv->rng.init = talitos_rng_init,
781 priv->rng.data_present = talitos_rng_data_present,
782 priv->rng.data_read = talitos_rng_data_read,
783 priv->rng.priv = (unsigned long)dev;
784
35a3bb3d
AS
785 err = hwrng_register(&priv->rng);
786 if (!err)
787 priv->rng_registered = true;
788
789 return err;
9c4a7965
KP
790}
791
792static void talitos_unregister_rng(struct device *dev)
793{
794 struct talitos_private *priv = dev_get_drvdata(dev);
795
35a3bb3d
AS
796 if (!priv->rng_registered)
797 return;
798
9c4a7965 799 hwrng_unregister(&priv->rng);
35a3bb3d 800 priv->rng_registered = false;
9c4a7965
KP
801}
802
803/*
804 * crypto alg
805 */
806#define TALITOS_CRA_PRIORITY 3000
357fb605 807#define TALITOS_MAX_KEY_SIZE 96
3952f17e 808#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 809
9c4a7965
KP
810struct talitos_ctx {
811 struct device *dev;
5228f0f7 812 int ch;
9c4a7965
KP
813 __be32 desc_hdr_template;
814 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 815 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
816 unsigned int keylen;
817 unsigned int enckeylen;
818 unsigned int authkeylen;
9c4a7965
KP
819};
820
497f2e6b
LN
821#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
822#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
823
824struct talitos_ahash_req_ctx {
60f208d7 825 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
826 unsigned int hw_context_size;
827 u8 buf[HASH_MAX_BLOCK_SIZE];
828 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 829 unsigned int swinit;
497f2e6b
LN
830 unsigned int first;
831 unsigned int last;
832 unsigned int to_hash_later;
42e8b0d7 833 unsigned int nbuf;
497f2e6b
LN
834 struct scatterlist bufsl[2];
835 struct scatterlist *psrc;
836};
837
56af8cd4
LN
838static int aead_setkey(struct crypto_aead *authenc,
839 const u8 *key, unsigned int keylen)
9c4a7965
KP
840{
841 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
c306a98d 842 struct crypto_authenc_keys keys;
9c4a7965 843
c306a98d 844 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
9c4a7965
KP
845 goto badkey;
846
c306a98d 847 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
9c4a7965
KP
848 goto badkey;
849
c306a98d
MK
850 memcpy(ctx->key, keys.authkey, keys.authkeylen);
851 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
9c4a7965 852
c306a98d
MK
853 ctx->keylen = keys.authkeylen + keys.enckeylen;
854 ctx->enckeylen = keys.enckeylen;
855 ctx->authkeylen = keys.authkeylen;
9c4a7965
KP
856
857 return 0;
858
859badkey:
860 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
861 return -EINVAL;
862}
863
864/*
56af8cd4 865 * talitos_edesc - s/w-extended descriptor
9c4a7965
KP
866 * @src_nents: number of segments in input scatterlist
867 * @dst_nents: number of segments in output scatterlist
aeb4c132 868 * @icv_ool: whether ICV is out-of-line
79fd31d3 869 * @iv_dma: dma address of iv for checking continuity and link table
9c4a7965 870 * @dma_len: length of dma mapped link_tbl space
6f65f6ac 871 * @dma_link_tbl: bus physical address of link_tbl/buf
9c4a7965 872 * @desc: h/w descriptor
6f65f6ac
LC
873 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
874 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
9c4a7965
KP
875 *
876 * if decrypting (with authcheck), or either one of src_nents or dst_nents
877 * is greater than 1, an integrity check value is concatenated to the end
878 * of link_tbl data
879 */
56af8cd4 880struct talitos_edesc {
9c4a7965
KP
881 int src_nents;
882 int dst_nents;
aeb4c132 883 bool icv_ool;
79fd31d3 884 dma_addr_t iv_dma;
9c4a7965
KP
885 int dma_len;
886 dma_addr_t dma_link_tbl;
887 struct talitos_desc desc;
6f65f6ac
LC
888 union {
889 struct talitos_ptr link_tbl[0];
890 u8 buf[0];
891 };
9c4a7965
KP
892};
893
4de9d0b5
LN
894static void talitos_sg_unmap(struct device *dev,
895 struct talitos_edesc *edesc,
896 struct scatterlist *src,
897 struct scatterlist *dst)
898{
899 unsigned int src_nents = edesc->src_nents ? : 1;
900 unsigned int dst_nents = edesc->dst_nents ? : 1;
901
902 if (src != dst) {
b8a011d4 903 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
4de9d0b5 904
497f2e6b 905 if (dst) {
b8a011d4 906 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
497f2e6b 907 }
4de9d0b5 908 } else
b8a011d4 909 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
4de9d0b5
LN
910}
911
9c4a7965 912static void ipsec_esp_unmap(struct device *dev,
56af8cd4 913 struct talitos_edesc *edesc,
9c4a7965
KP
914 struct aead_request *areq)
915{
916 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
917 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
918 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
919 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
920
4de9d0b5 921 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
9c4a7965
KP
922
923 if (edesc->dma_len)
924 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
925 DMA_BIDIRECTIONAL);
926}
927
928/*
929 * ipsec_esp descriptor callbacks
930 */
931static void ipsec_esp_encrypt_done(struct device *dev,
932 struct talitos_desc *desc, void *context,
933 int err)
934{
935 struct aead_request *areq = context;
9c4a7965 936 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 937 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 938 struct talitos_edesc *edesc;
9c4a7965
KP
939 struct scatterlist *sg;
940 void *icvdata;
941
19bbbc63
KP
942 edesc = container_of(desc, struct talitos_edesc, desc);
943
9c4a7965
KP
944 ipsec_esp_unmap(dev, edesc, areq);
945
946 /* copy the generated ICV to dst */
aeb4c132 947 if (edesc->icv_ool) {
9c4a7965 948 icvdata = &edesc->link_tbl[edesc->src_nents +
aeb4c132 949 edesc->dst_nents + 2];
9c4a7965 950 sg = sg_last(areq->dst, edesc->dst_nents);
aeb4c132
HX
951 memcpy((char *)sg_virt(sg) + sg->length - authsize,
952 icvdata, authsize);
9c4a7965
KP
953 }
954
955 kfree(edesc);
956
957 aead_request_complete(areq, err);
958}
959
fe5720e2 960static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
961 struct talitos_desc *desc,
962 void *context, int err)
9c4a7965
KP
963{
964 struct aead_request *req = context;
9c4a7965 965 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 966 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 967 struct talitos_edesc *edesc;
9c4a7965 968 struct scatterlist *sg;
aeb4c132 969 char *oicv, *icv;
9c4a7965 970
19bbbc63
KP
971 edesc = container_of(desc, struct talitos_edesc, desc);
972
9c4a7965
KP
973 ipsec_esp_unmap(dev, edesc, req);
974
975 if (!err) {
976 /* auth check */
9c4a7965 977 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
aeb4c132
HX
978 icv = (char *)sg_virt(sg) + sg->length - authsize;
979
980 if (edesc->dma_len) {
981 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
982 edesc->dst_nents + 2];
983 if (edesc->icv_ool)
984 icv = oicv + authsize;
985 } else
986 oicv = (char *)&edesc->link_tbl[0];
987
79960943 988 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
9c4a7965
KP
989 }
990
991 kfree(edesc);
992
993 aead_request_complete(req, err);
994}
995
fe5720e2 996static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
997 struct talitos_desc *desc,
998 void *context, int err)
fe5720e2
KP
999{
1000 struct aead_request *req = context;
19bbbc63
KP
1001 struct talitos_edesc *edesc;
1002
1003 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
1004
1005 ipsec_esp_unmap(dev, edesc, req);
1006
1007 /* check ICV auth status */
e938e465
KP
1008 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1009 DESC_HDR_LO_ICCR1_PASS))
1010 err = -EBADMSG;
fe5720e2
KP
1011
1012 kfree(edesc);
1013
1014 aead_request_complete(req, err);
1015}
1016
9c4a7965
KP
1017/*
1018 * convert scatterlist to SEC h/w link table format
1019 * stop at cryptlen bytes
1020 */
aeb4c132
HX
1021static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1022 unsigned int offset, int cryptlen,
1023 struct talitos_ptr *link_tbl_ptr)
9c4a7965 1024{
70bcaca7 1025 int n_sg = sg_count;
aeb4c132 1026 int count = 0;
70bcaca7 1027
aeb4c132
HX
1028 while (cryptlen && sg && n_sg--) {
1029 unsigned int len = sg_dma_len(sg);
9c4a7965 1030
aeb4c132
HX
1031 if (offset >= len) {
1032 offset -= len;
1033 goto next;
1034 }
1035
1036 len -= offset;
1037
1038 if (len > cryptlen)
1039 len = cryptlen;
1040
1041 to_talitos_ptr(link_tbl_ptr + count,
1042 sg_dma_address(sg) + offset, 0);
1043 link_tbl_ptr[count].len = cpu_to_be16(len);
1044 link_tbl_ptr[count].j_extent = 0;
1045 count++;
1046 cryptlen -= len;
1047 offset = 0;
1048
1049next:
1050 sg = sg_next(sg);
70bcaca7 1051 }
9c4a7965
KP
1052
1053 /* tag end of link table */
aeb4c132
HX
1054 if (count > 0)
1055 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
70bcaca7 1056
aeb4c132
HX
1057 return count;
1058}
1059
1060static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1061 int cryptlen,
1062 struct talitos_ptr *link_tbl_ptr)
1063{
1064 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1065 link_tbl_ptr);
9c4a7965
KP
1066}
1067
1068/*
1069 * fill in and submit ipsec_esp descriptor
1070 */
56af8cd4 1071static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
aeb4c132
HX
1072 void (*callback)(struct device *dev,
1073 struct talitos_desc *desc,
1074 void *context, int error))
9c4a7965
KP
1075{
1076 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
aeb4c132 1077 unsigned int authsize = crypto_aead_authsize(aead);
9c4a7965
KP
1078 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1079 struct device *dev = ctx->dev;
1080 struct talitos_desc *desc = &edesc->desc;
1081 unsigned int cryptlen = areq->cryptlen;
e41256f1 1082 unsigned int ivsize = crypto_aead_ivsize(aead);
aeb4c132 1083 int tbl_off = 0;
fa86a267 1084 int sg_count, ret;
fe5720e2 1085 int sg_link_tbl_len;
9c4a7965
KP
1086
1087 /* hmac key */
1088 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
a2b35aa8 1089 DMA_TO_DEVICE);
79fd31d3 1090
b8a011d4
LC
1091 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1092 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1093 : DMA_TO_DEVICE);
9c4a7965 1094 /* hmac data */
aeb4c132
HX
1095 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1096 if (sg_count > 1 &&
1097 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1098 areq->assoclen,
1099 &edesc->link_tbl[tbl_off])) > 1) {
79fd31d3 1100 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
922f9dc8 1101 sizeof(struct talitos_ptr), 0);
79fd31d3
HG
1102 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1103
79fd31d3
HG
1104 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1105 edesc->dma_len, DMA_BIDIRECTIONAL);
340ff60a
HG
1106
1107 tbl_off += ret;
79fd31d3 1108 } else {
aeb4c132 1109 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
79fd31d3
HG
1110 desc->ptr[1].j_extent = 0;
1111 }
1112
9c4a7965 1113 /* cipher iv */
922f9dc8 1114 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
79fd31d3
HG
1115 desc->ptr[2].len = cpu_to_be16(ivsize);
1116 desc->ptr[2].j_extent = 0;
9c4a7965
KP
1117
1118 /* cipher key */
1119 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
a2b35aa8 1120 (char *)&ctx->key + ctx->authkeylen,
9c4a7965
KP
1121 DMA_TO_DEVICE);
1122
1123 /*
1124 * cipher in
1125 * map and adjust cipher len to aead request cryptlen.
1126 * extent is bytes of HMAC postpended to ciphertext,
1127 * typically 12 for ipsec
1128 */
1129 desc->ptr[4].len = cpu_to_be16(cryptlen);
1130 desc->ptr[4].j_extent = authsize;
1131
aeb4c132
HX
1132 sg_link_tbl_len = cryptlen;
1133 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1134 sg_link_tbl_len += authsize;
1135
340ff60a
HG
1136 if (sg_count == 1) {
1137 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1138 areq->assoclen, 0);
1139 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1140 areq->assoclen, sg_link_tbl_len,
1141 &edesc->link_tbl[tbl_off])) >
1142 1) {
aeb4c132
HX
1143 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1144 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1145 tbl_off *
1146 sizeof(struct talitos_ptr), 0);
1147 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1148 edesc->dma_len,
1149 DMA_BIDIRECTIONAL);
340ff60a
HG
1150 tbl_off += ret;
1151 } else {
1152 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1153 }
9c4a7965
KP
1154
1155 /* cipher out */
1156 desc->ptr[5].len = cpu_to_be16(cryptlen);
1157 desc->ptr[5].j_extent = authsize;
1158
e938e465 1159 if (areq->src != areq->dst)
b8a011d4
LC
1160 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1161 DMA_FROM_DEVICE);
9c4a7965 1162
aeb4c132
HX
1163 edesc->icv_ool = false;
1164
340ff60a
HG
1165 if (sg_count == 1) {
1166 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1167 areq->assoclen, 0);
1168 } else if ((sg_count =
1169 sg_to_link_tbl_offset(areq->dst, sg_count,
aeb4c132 1170 areq->assoclen, cryptlen,
340ff60a 1171 &edesc->link_tbl[tbl_off])) > 1) {
79fd31d3 1172 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
9c4a7965 1173
81eb024c 1174 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
922f9dc8 1175 tbl_off * sizeof(struct talitos_ptr), 0);
fe5720e2 1176
f3c85bc1 1177 /* Add an entry to the link table for ICV data */
79fd31d3
HG
1178 tbl_ptr += sg_count - 1;
1179 tbl_ptr->j_extent = 0;
1180 tbl_ptr++;
1181 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1182 tbl_ptr->len = cpu_to_be16(authsize);
9c4a7965
KP
1183
1184 /* icv data follows link tables */
79fd31d3 1185 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
aeb4c132
HX
1186 (edesc->src_nents + edesc->dst_nents +
1187 2) * sizeof(struct talitos_ptr) +
1188 authsize, 0);
9c4a7965
KP
1189 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1190 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1191 edesc->dma_len, DMA_BIDIRECTIONAL);
aeb4c132
HX
1192
1193 edesc->icv_ool = true;
340ff60a
HG
1194 } else {
1195 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1196 }
9c4a7965
KP
1197
1198 /* iv out */
a2b35aa8 1199 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
9c4a7965
KP
1200 DMA_FROM_DEVICE);
1201
5228f0f7 1202 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1203 if (ret != -EINPROGRESS) {
1204 ipsec_esp_unmap(dev, edesc, areq);
1205 kfree(edesc);
1206 }
1207 return ret;
9c4a7965
KP
1208}
1209
9c4a7965 1210/*
56af8cd4 1211 * allocate and map the extended descriptor
9c4a7965 1212 */
4de9d0b5
LN
1213static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1214 struct scatterlist *src,
1215 struct scatterlist *dst,
79fd31d3
HG
1216 u8 *iv,
1217 unsigned int assoclen,
4de9d0b5
LN
1218 unsigned int cryptlen,
1219 unsigned int authsize,
79fd31d3 1220 unsigned int ivsize,
4de9d0b5 1221 int icv_stashing,
62293a37
HG
1222 u32 cryptoflags,
1223 bool encrypt)
9c4a7965 1224{
56af8cd4 1225 struct talitos_edesc *edesc;
aeb4c132 1226 int src_nents, dst_nents, alloc_len, dma_len;
79fd31d3 1227 dma_addr_t iv_dma = 0;
4de9d0b5 1228 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1229 GFP_ATOMIC;
6f65f6ac
LC
1230 struct talitos_private *priv = dev_get_drvdata(dev);
1231 bool is_sec1 = has_ftr_sec1(priv);
1232 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
8e409fe1 1233 void *err;
9c4a7965 1234
6f65f6ac 1235 if (cryptlen + authsize > max_len) {
4de9d0b5 1236 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1237 return ERR_PTR(-EINVAL);
1238 }
1239
935e99a3 1240 if (ivsize)
79fd31d3
HG
1241 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1242
62293a37 1243 if (!dst || dst == src) {
b8a011d4
LC
1244 src_nents = sg_nents_for_len(src,
1245 assoclen + cryptlen + authsize);
8e409fe1
LC
1246 if (src_nents < 0) {
1247 dev_err(dev, "Invalid number of src SG.\n");
1248 err = ERR_PTR(-EINVAL);
1249 goto error_sg;
1250 }
62293a37
HG
1251 src_nents = (src_nents == 1) ? 0 : src_nents;
1252 dst_nents = dst ? src_nents : 0;
1253 } else { /* dst && dst != src*/
b8a011d4
LC
1254 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1255 (encrypt ? 0 : authsize));
8e409fe1
LC
1256 if (src_nents < 0) {
1257 dev_err(dev, "Invalid number of src SG.\n");
1258 err = ERR_PTR(-EINVAL);
1259 goto error_sg;
1260 }
62293a37 1261 src_nents = (src_nents == 1) ? 0 : src_nents;
b8a011d4
LC
1262 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1263 (encrypt ? authsize : 0));
8e409fe1
LC
1264 if (dst_nents < 0) {
1265 dev_err(dev, "Invalid number of dst SG.\n");
1266 err = ERR_PTR(-EINVAL);
1267 goto error_sg;
1268 }
62293a37 1269 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
9c4a7965
KP
1270 }
1271
1272 /*
1273 * allocate space for base edesc plus the link tables,
aeb4c132
HX
1274 * allowing for two separate entries for AD and generated ICV (+ 2),
1275 * and space for two sets of ICVs (stashed and generated)
9c4a7965 1276 */
56af8cd4 1277 alloc_len = sizeof(struct talitos_edesc);
aeb4c132 1278 if (src_nents || dst_nents) {
6f65f6ac 1279 if (is_sec1)
608f37d0
DC
1280 dma_len = (src_nents ? cryptlen : 0) +
1281 (dst_nents ? cryptlen : 0);
6f65f6ac 1282 else
aeb4c132
HX
1283 dma_len = (src_nents + dst_nents + 2) *
1284 sizeof(struct talitos_ptr) + authsize * 2;
9c4a7965
KP
1285 alloc_len += dma_len;
1286 } else {
1287 dma_len = 0;
4de9d0b5 1288 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1289 }
1290
586725f8 1291 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1292 if (!edesc) {
4de9d0b5 1293 dev_err(dev, "could not allocate edescriptor\n");
8e409fe1
LC
1294 err = ERR_PTR(-ENOMEM);
1295 goto error_sg;
9c4a7965
KP
1296 }
1297
1298 edesc->src_nents = src_nents;
1299 edesc->dst_nents = dst_nents;
79fd31d3 1300 edesc->iv_dma = iv_dma;
9c4a7965 1301 edesc->dma_len = dma_len;
497f2e6b
LN
1302 if (dma_len)
1303 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1304 edesc->dma_len,
1305 DMA_BIDIRECTIONAL);
9c4a7965
KP
1306
1307 return edesc;
8e409fe1
LC
1308error_sg:
1309 if (iv_dma)
1310 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1311 return err;
9c4a7965
KP
1312}
1313
79fd31d3 1314static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
62293a37 1315 int icv_stashing, bool encrypt)
4de9d0b5
LN
1316{
1317 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 1318 unsigned int authsize = crypto_aead_authsize(authenc);
4de9d0b5 1319 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
79fd31d3 1320 unsigned int ivsize = crypto_aead_ivsize(authenc);
4de9d0b5 1321
aeb4c132 1322 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1323 iv, areq->assoclen, areq->cryptlen,
aeb4c132 1324 authsize, ivsize, icv_stashing,
62293a37 1325 areq->base.flags, encrypt);
4de9d0b5
LN
1326}
1327
56af8cd4 1328static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1329{
1330 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1331 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1332 struct talitos_edesc *edesc;
9c4a7965
KP
1333
1334 /* allocate extended descriptor */
62293a37 1335 edesc = aead_edesc_alloc(req, req->iv, 0, true);
9c4a7965
KP
1336 if (IS_ERR(edesc))
1337 return PTR_ERR(edesc);
1338
1339 /* set encrypt */
70bcaca7 1340 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965 1341
aeb4c132 1342 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
9c4a7965
KP
1343}
1344
56af8cd4 1345static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1346{
1347 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 1348 unsigned int authsize = crypto_aead_authsize(authenc);
9c4a7965 1349 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
fe5720e2 1350 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1351 struct talitos_edesc *edesc;
9c4a7965
KP
1352 struct scatterlist *sg;
1353 void *icvdata;
1354
1355 req->cryptlen -= authsize;
1356
1357 /* allocate extended descriptor */
62293a37 1358 edesc = aead_edesc_alloc(req, req->iv, 1, false);
9c4a7965
KP
1359 if (IS_ERR(edesc))
1360 return PTR_ERR(edesc);
1361
fe5720e2 1362 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1363 ((!edesc->src_nents && !edesc->dst_nents) ||
1364 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1365
fe5720e2 1366 /* decrypt and check the ICV */
e938e465
KP
1367 edesc->desc.hdr = ctx->desc_hdr_template |
1368 DESC_HDR_DIR_INBOUND |
fe5720e2 1369 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1370
fe5720e2
KP
1371 /* reset integrity check result bits */
1372 edesc->desc.hdr_lo = 0;
9c4a7965 1373
aeb4c132 1374 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
e938e465 1375 }
fe5720e2 1376
e938e465
KP
1377 /* Have to check the ICV with software */
1378 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1379
e938e465
KP
1380 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1381 if (edesc->dma_len)
aeb4c132
HX
1382 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1383 edesc->dst_nents + 2];
e938e465
KP
1384 else
1385 icvdata = &edesc->link_tbl[0];
fe5720e2 1386
e938e465 1387 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1388
aeb4c132 1389 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
9c4a7965 1390
aeb4c132 1391 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1392}
1393
4de9d0b5
LN
1394static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1395 const u8 *key, unsigned int keylen)
1396{
1397 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
4de9d0b5
LN
1398
1399 memcpy(&ctx->key, key, keylen);
1400 ctx->keylen = keylen;
1401
1402 return 0;
4de9d0b5
LN
1403}
1404
032d197e
LC
1405static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1406 struct scatterlist *dst, unsigned int len,
1407 struct talitos_edesc *edesc)
1408{
6f65f6ac
LC
1409 struct talitos_private *priv = dev_get_drvdata(dev);
1410 bool is_sec1 = has_ftr_sec1(priv);
1411
1412 if (is_sec1) {
1413 if (!edesc->src_nents) {
1414 dma_unmap_sg(dev, src, 1,
1415 dst != src ? DMA_TO_DEVICE
1416 : DMA_BIDIRECTIONAL);
1417 }
1418 if (dst && edesc->dst_nents) {
1419 dma_sync_single_for_device(dev,
1420 edesc->dma_link_tbl + len,
1421 len, DMA_FROM_DEVICE);
1422 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1423 edesc->buf + len, len);
1424 } else if (dst && dst != src) {
1425 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1426 }
1427 } else {
1428 talitos_sg_unmap(dev, edesc, src, dst);
1429 }
032d197e
LC
1430}
1431
4de9d0b5
LN
1432static void common_nonsnoop_unmap(struct device *dev,
1433 struct talitos_edesc *edesc,
1434 struct ablkcipher_request *areq)
1435{
1436 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
032d197e
LC
1437
1438 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
4de9d0b5
LN
1439 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1440 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1441
4de9d0b5
LN
1442 if (edesc->dma_len)
1443 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1444 DMA_BIDIRECTIONAL);
1445}
1446
1447static void ablkcipher_done(struct device *dev,
1448 struct talitos_desc *desc, void *context,
1449 int err)
1450{
1451 struct ablkcipher_request *areq = context;
19bbbc63
KP
1452 struct talitos_edesc *edesc;
1453
1454 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1455
1456 common_nonsnoop_unmap(dev, edesc, areq);
1457
1458 kfree(edesc);
1459
1460 areq->base.complete(&areq->base, err);
1461}
1462
032d197e
LC
1463int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1464 unsigned int len, struct talitos_edesc *edesc,
1465 enum dma_data_direction dir, struct talitos_ptr *ptr)
1466{
1467 int sg_count;
922f9dc8
LC
1468 struct talitos_private *priv = dev_get_drvdata(dev);
1469 bool is_sec1 = has_ftr_sec1(priv);
032d197e 1470
922f9dc8 1471 to_talitos_ptr_len(ptr, len, is_sec1);
032d197e 1472
6f65f6ac
LC
1473 if (is_sec1) {
1474 sg_count = edesc->src_nents ? : 1;
032d197e 1475
6f65f6ac
LC
1476 if (sg_count == 1) {
1477 dma_map_sg(dev, src, 1, dir);
1478 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
032d197e 1479 } else {
6f65f6ac
LC
1480 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1481 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1482 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1483 len, DMA_TO_DEVICE);
1484 }
1485 } else {
1486 to_talitos_ptr_extent_clear(ptr, is_sec1);
1487
b8a011d4 1488 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
6f65f6ac
LC
1489
1490 if (sg_count == 1) {
922f9dc8 1491 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
6f65f6ac
LC
1492 } else {
1493 sg_count = sg_to_link_tbl(src, sg_count, len,
1494 &edesc->link_tbl[0]);
1495 if (sg_count > 1) {
1496 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1497 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1498 dma_sync_single_for_device(dev,
1499 edesc->dma_link_tbl,
1500 edesc->dma_len,
1501 DMA_BIDIRECTIONAL);
1502 } else {
1503 /* Only one segment now, so no link tbl needed*/
1504 to_talitos_ptr(ptr, sg_dma_address(src),
1505 is_sec1);
1506 }
032d197e
LC
1507 }
1508 }
1509 return sg_count;
1510}
1511
1512void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1513 unsigned int len, struct talitos_edesc *edesc,
1514 enum dma_data_direction dir,
1515 struct talitos_ptr *ptr, int sg_count)
1516{
922f9dc8
LC
1517 struct talitos_private *priv = dev_get_drvdata(dev);
1518 bool is_sec1 = has_ftr_sec1(priv);
1519
032d197e 1520 if (dir != DMA_NONE)
b8a011d4 1521 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
032d197e 1522
6f65f6ac
LC
1523 to_talitos_ptr_len(ptr, len, is_sec1);
1524
1525 if (is_sec1) {
1526 if (sg_count == 1) {
1527 if (dir != DMA_NONE)
1528 dma_map_sg(dev, dst, 1, dir);
1529 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1530 } else {
1531 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1532 dma_sync_single_for_device(dev,
1533 edesc->dma_link_tbl + len,
1534 len, DMA_FROM_DEVICE);
1535 }
032d197e 1536 } else {
6f65f6ac
LC
1537 to_talitos_ptr_extent_clear(ptr, is_sec1);
1538
1539 if (sg_count == 1) {
1540 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1541 } else {
1542 struct talitos_ptr *link_tbl_ptr =
1543 &edesc->link_tbl[edesc->src_nents + 1];
1544
1545 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1546 (edesc->src_nents + 1) *
1547 sizeof(struct talitos_ptr), 0);
1548 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
42e8b0d7 1549 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
6f65f6ac
LC
1550 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1551 edesc->dma_len,
1552 DMA_BIDIRECTIONAL);
1553 }
032d197e
LC
1554 }
1555}
1556
4de9d0b5
LN
1557static int common_nonsnoop(struct talitos_edesc *edesc,
1558 struct ablkcipher_request *areq,
4de9d0b5
LN
1559 void (*callback) (struct device *dev,
1560 struct talitos_desc *desc,
1561 void *context, int error))
1562{
1563 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1564 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1565 struct device *dev = ctx->dev;
1566 struct talitos_desc *desc = &edesc->desc;
1567 unsigned int cryptlen = areq->nbytes;
79fd31d3 1568 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1569 int sg_count, ret;
922f9dc8
LC
1570 struct talitos_private *priv = dev_get_drvdata(dev);
1571 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
1572
1573 /* first DWORD empty */
2529bc37 1574 desc->ptr[0] = zero_entry;
4de9d0b5
LN
1575
1576 /* cipher iv */
922f9dc8
LC
1577 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1578 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1579 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
4de9d0b5
LN
1580
1581 /* cipher key */
1582 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1583 (char *)&ctx->key, DMA_TO_DEVICE);
4de9d0b5
LN
1584
1585 /*
1586 * cipher in
1587 */
032d197e
LC
1588 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1589 (areq->src == areq->dst) ?
1590 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1591 &desc->ptr[3]);
4de9d0b5
LN
1592
1593 /* cipher out */
032d197e
LC
1594 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1595 (areq->src == areq->dst) ? DMA_NONE
1596 : DMA_FROM_DEVICE,
1597 &desc->ptr[4], sg_count);
4de9d0b5
LN
1598
1599 /* iv out */
a2b35aa8 1600 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
4de9d0b5
LN
1601 DMA_FROM_DEVICE);
1602
1603 /* last DWORD empty */
2529bc37 1604 desc->ptr[6] = zero_entry;
4de9d0b5 1605
5228f0f7 1606 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1607 if (ret != -EINPROGRESS) {
1608 common_nonsnoop_unmap(dev, edesc, areq);
1609 kfree(edesc);
1610 }
1611 return ret;
1612}
1613
e938e465 1614static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
62293a37 1615 areq, bool encrypt)
4de9d0b5
LN
1616{
1617 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1618 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
79fd31d3 1619 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1620
aeb4c132 1621 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1622 areq->info, 0, areq->nbytes, 0, ivsize, 0,
62293a37 1623 areq->base.flags, encrypt);
4de9d0b5
LN
1624}
1625
1626static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1627{
1628 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1629 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1630 struct talitos_edesc *edesc;
1631
1632 /* allocate extended descriptor */
62293a37 1633 edesc = ablkcipher_edesc_alloc(areq, true);
4de9d0b5
LN
1634 if (IS_ERR(edesc))
1635 return PTR_ERR(edesc);
1636
1637 /* set encrypt */
1638 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1639
febec542 1640 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1641}
1642
1643static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1644{
1645 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1646 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1647 struct talitos_edesc *edesc;
1648
1649 /* allocate extended descriptor */
62293a37 1650 edesc = ablkcipher_edesc_alloc(areq, false);
4de9d0b5
LN
1651 if (IS_ERR(edesc))
1652 return PTR_ERR(edesc);
1653
1654 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1655
febec542 1656 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1657}
1658
497f2e6b
LN
1659static void common_nonsnoop_hash_unmap(struct device *dev,
1660 struct talitos_edesc *edesc,
1661 struct ahash_request *areq)
1662{
1663 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
922f9dc8
LC
1664 struct talitos_private *priv = dev_get_drvdata(dev);
1665 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1666
1667 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1668
032d197e
LC
1669 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1670
497f2e6b 1671 /* When using hashctx-in, must unmap it. */
922f9dc8 1672 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
497f2e6b
LN
1673 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1674 DMA_TO_DEVICE);
1675
922f9dc8 1676 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
497f2e6b
LN
1677 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1678 DMA_TO_DEVICE);
1679
497f2e6b
LN
1680 if (edesc->dma_len)
1681 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1682 DMA_BIDIRECTIONAL);
1683
1684}
1685
1686static void ahash_done(struct device *dev,
1687 struct talitos_desc *desc, void *context,
1688 int err)
1689{
1690 struct ahash_request *areq = context;
1691 struct talitos_edesc *edesc =
1692 container_of(desc, struct talitos_edesc, desc);
1693 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1694
1695 if (!req_ctx->last && req_ctx->to_hash_later) {
1696 /* Position any partial block for next update/final/finup */
1697 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1698 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1699 }
1700 common_nonsnoop_hash_unmap(dev, edesc, areq);
1701
1702 kfree(edesc);
1703
1704 areq->base.complete(&areq->base, err);
1705}
1706
2d02905e
LC
1707/*
1708 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1709 * ourself and submit a padded block
1710 */
1711void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1712 struct talitos_edesc *edesc,
1713 struct talitos_ptr *ptr)
1714{
1715 static u8 padded_hash[64] = {
1716 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1717 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1718 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1719 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1720 };
1721
1722 pr_err_once("Bug in SEC1, padding ourself\n");
1723 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1724 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1725 (char *)padded_hash, DMA_TO_DEVICE);
1726}
1727
497f2e6b
LN
1728static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1729 struct ahash_request *areq, unsigned int length,
1730 void (*callback) (struct device *dev,
1731 struct talitos_desc *desc,
1732 void *context, int error))
1733{
1734 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1735 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1736 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1737 struct device *dev = ctx->dev;
1738 struct talitos_desc *desc = &edesc->desc;
032d197e 1739 int ret;
922f9dc8
LC
1740 struct talitos_private *priv = dev_get_drvdata(dev);
1741 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1742
1743 /* first DWORD empty */
1744 desc->ptr[0] = zero_entry;
1745
60f208d7
KP
1746 /* hash context in */
1747 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1748 map_single_talitos_ptr(dev, &desc->ptr[1],
1749 req_ctx->hw_context_size,
a2b35aa8 1750 (char *)req_ctx->hw_context,
497f2e6b 1751 DMA_TO_DEVICE);
60f208d7 1752 req_ctx->swinit = 0;
497f2e6b
LN
1753 } else {
1754 desc->ptr[1] = zero_entry;
1755 /* Indicate next op is not the first. */
1756 req_ctx->first = 0;
1757 }
1758
1759 /* HMAC key */
1760 if (ctx->keylen)
1761 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1762 (char *)&ctx->key, DMA_TO_DEVICE);
497f2e6b
LN
1763 else
1764 desc->ptr[2] = zero_entry;
1765
1766 /*
1767 * data in
1768 */
032d197e
LC
1769 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1770 DMA_TO_DEVICE, &desc->ptr[3]);
497f2e6b
LN
1771
1772 /* fifth DWORD empty */
1773 desc->ptr[4] = zero_entry;
1774
1775 /* hash/HMAC out -or- hash context out */
1776 if (req_ctx->last)
1777 map_single_talitos_ptr(dev, &desc->ptr[5],
1778 crypto_ahash_digestsize(tfm),
a2b35aa8 1779 areq->result, DMA_FROM_DEVICE);
497f2e6b
LN
1780 else
1781 map_single_talitos_ptr(dev, &desc->ptr[5],
1782 req_ctx->hw_context_size,
a2b35aa8 1783 req_ctx->hw_context, DMA_FROM_DEVICE);
497f2e6b
LN
1784
1785 /* last DWORD empty */
1786 desc->ptr[6] = zero_entry;
1787
2d02905e
LC
1788 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1789 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1790
5228f0f7 1791 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1792 if (ret != -EINPROGRESS) {
1793 common_nonsnoop_hash_unmap(dev, edesc, areq);
1794 kfree(edesc);
1795 }
1796 return ret;
1797}
1798
1799static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1800 unsigned int nbytes)
1801{
1802 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1803 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1804 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1805
aeb4c132 1806 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
62293a37 1807 nbytes, 0, 0, 0, areq->base.flags, false);
497f2e6b
LN
1808}
1809
1810static int ahash_init(struct ahash_request *areq)
1811{
1812 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1813 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1814
1815 /* Initialize the context */
5e833bc4 1816 req_ctx->nbuf = 0;
60f208d7
KP
1817 req_ctx->first = 1; /* first indicates h/w must init its context */
1818 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1819 req_ctx->hw_context_size =
1820 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1821 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1822 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1823
1824 return 0;
1825}
1826
60f208d7
KP
1827/*
1828 * on h/w without explicit sha224 support, we initialize h/w context
1829 * manually with sha224 constants, and tell it to run sha256.
1830 */
1831static int ahash_init_sha224_swinit(struct ahash_request *areq)
1832{
1833 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1834
1835 ahash_init(areq);
1836 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1837
a752447a
KP
1838 req_ctx->hw_context[0] = SHA224_H0;
1839 req_ctx->hw_context[1] = SHA224_H1;
1840 req_ctx->hw_context[2] = SHA224_H2;
1841 req_ctx->hw_context[3] = SHA224_H3;
1842 req_ctx->hw_context[4] = SHA224_H4;
1843 req_ctx->hw_context[5] = SHA224_H5;
1844 req_ctx->hw_context[6] = SHA224_H6;
1845 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1846
1847 /* init 64-bit count */
1848 req_ctx->hw_context[8] = 0;
1849 req_ctx->hw_context[9] = 0;
1850
1851 return 0;
1852}
1853
497f2e6b
LN
1854static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1855{
1856 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1857 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1858 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1859 struct talitos_edesc *edesc;
1860 unsigned int blocksize =
1861 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1862 unsigned int nbytes_to_hash;
1863 unsigned int to_hash_later;
5e833bc4 1864 unsigned int nsg;
8e409fe1 1865 int nents;
497f2e6b 1866
5e833bc4
LN
1867 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1868 /* Buffer up to one whole block */
8e409fe1
LC
1869 nents = sg_nents_for_len(areq->src, nbytes);
1870 if (nents < 0) {
1871 dev_err(ctx->dev, "Invalid number of src SG.\n");
1872 return nents;
1873 }
1874 sg_copy_to_buffer(areq->src, nents,
5e833bc4
LN
1875 req_ctx->buf + req_ctx->nbuf, nbytes);
1876 req_ctx->nbuf += nbytes;
497f2e6b
LN
1877 return 0;
1878 }
1879
5e833bc4
LN
1880 /* At least (blocksize + 1) bytes are available to hash */
1881 nbytes_to_hash = nbytes + req_ctx->nbuf;
1882 to_hash_later = nbytes_to_hash & (blocksize - 1);
1883
1884 if (req_ctx->last)
1885 to_hash_later = 0;
1886 else if (to_hash_later)
1887 /* There is a partial block. Hash the full block(s) now */
1888 nbytes_to_hash -= to_hash_later;
1889 else {
1890 /* Keep one block buffered */
1891 nbytes_to_hash -= blocksize;
1892 to_hash_later = blocksize;
1893 }
1894
1895 /* Chain in any previously buffered data */
1896 if (req_ctx->nbuf) {
1897 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1898 sg_init_table(req_ctx->bufsl, nsg);
1899 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1900 if (nsg > 1)
c56f6d12 1901 sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1902 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1903 } else
497f2e6b 1904 req_ctx->psrc = areq->src;
5e833bc4
LN
1905
1906 if (to_hash_later) {
8e409fe1
LC
1907 nents = sg_nents_for_len(areq->src, nbytes);
1908 if (nents < 0) {
1909 dev_err(ctx->dev, "Invalid number of src SG.\n");
1910 return nents;
1911 }
d0525723 1912 sg_pcopy_to_buffer(areq->src, nents,
5e833bc4
LN
1913 req_ctx->bufnext,
1914 to_hash_later,
1915 nbytes - to_hash_later);
497f2e6b 1916 }
5e833bc4 1917 req_ctx->to_hash_later = to_hash_later;
497f2e6b 1918
5e833bc4 1919 /* Allocate extended descriptor */
497f2e6b
LN
1920 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1921 if (IS_ERR(edesc))
1922 return PTR_ERR(edesc);
1923
1924 edesc->desc.hdr = ctx->desc_hdr_template;
1925
1926 /* On last one, request SEC to pad; otherwise continue */
1927 if (req_ctx->last)
1928 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1929 else
1930 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1931
60f208d7
KP
1932 /* request SEC to INIT hash. */
1933 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
1934 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1935
1936 /* When the tfm context has a keylen, it's an HMAC.
1937 * A first or last (ie. not middle) descriptor must request HMAC.
1938 */
1939 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1940 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1941
1942 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1943 ahash_done);
1944}
1945
1946static int ahash_update(struct ahash_request *areq)
1947{
1948 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1949
1950 req_ctx->last = 0;
1951
1952 return ahash_process_req(areq, areq->nbytes);
1953}
1954
1955static int ahash_final(struct ahash_request *areq)
1956{
1957 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1958
1959 req_ctx->last = 1;
1960
1961 return ahash_process_req(areq, 0);
1962}
1963
1964static int ahash_finup(struct ahash_request *areq)
1965{
1966 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1967
1968 req_ctx->last = 1;
1969
1970 return ahash_process_req(areq, areq->nbytes);
1971}
1972
1973static int ahash_digest(struct ahash_request *areq)
1974{
1975 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 1976 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 1977
60f208d7 1978 ahash->init(areq);
497f2e6b
LN
1979 req_ctx->last = 1;
1980
1981 return ahash_process_req(areq, areq->nbytes);
1982}
1983
79b3a418
LN
1984struct keyhash_result {
1985 struct completion completion;
1986 int err;
1987};
1988
1989static void keyhash_complete(struct crypto_async_request *req, int err)
1990{
1991 struct keyhash_result *res = req->data;
1992
1993 if (err == -EINPROGRESS)
1994 return;
1995
1996 res->err = err;
1997 complete(&res->completion);
1998}
1999
2000static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2001 u8 *hash)
2002{
2003 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2004
2005 struct scatterlist sg[1];
2006 struct ahash_request *req;
2007 struct keyhash_result hresult;
2008 int ret;
2009
2010 init_completion(&hresult.completion);
2011
2012 req = ahash_request_alloc(tfm, GFP_KERNEL);
2013 if (!req)
2014 return -ENOMEM;
2015
2016 /* Keep tfm keylen == 0 during hash of the long key */
2017 ctx->keylen = 0;
2018 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2019 keyhash_complete, &hresult);
2020
2021 sg_init_one(&sg[0], key, keylen);
2022
2023 ahash_request_set_crypt(req, sg, hash, keylen);
2024 ret = crypto_ahash_digest(req);
2025 switch (ret) {
2026 case 0:
2027 break;
2028 case -EINPROGRESS:
2029 case -EBUSY:
2030 ret = wait_for_completion_interruptible(
2031 &hresult.completion);
2032 if (!ret)
2033 ret = hresult.err;
2034 break;
2035 default:
2036 break;
2037 }
2038 ahash_request_free(req);
2039
2040 return ret;
2041}
2042
2043static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2044 unsigned int keylen)
2045{
2046 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2047 unsigned int blocksize =
2048 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2049 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2050 unsigned int keysize = keylen;
2051 u8 hash[SHA512_DIGEST_SIZE];
2052 int ret;
2053
2054 if (keylen <= blocksize)
2055 memcpy(ctx->key, key, keysize);
2056 else {
2057 /* Must get the hash of the long key */
2058 ret = keyhash(tfm, key, keylen, hash);
2059
2060 if (ret) {
2061 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2062 return -EINVAL;
2063 }
2064
2065 keysize = digestsize;
2066 memcpy(ctx->key, hash, digestsize);
2067 }
2068
2069 ctx->keylen = keysize;
2070
2071 return 0;
2072}
2073
2074
9c4a7965 2075struct talitos_alg_template {
d5e4aaef
LN
2076 u32 type;
2077 union {
2078 struct crypto_alg crypto;
acbf7c62 2079 struct ahash_alg hash;
aeb4c132 2080 struct aead_alg aead;
d5e4aaef 2081 } alg;
9c4a7965
KP
2082 __be32 desc_hdr_template;
2083};
2084
2085static struct talitos_alg_template driver_algs[] = {
991155ba 2086 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef 2087 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2088 .alg.aead = {
2089 .base = {
2090 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2091 .cra_driver_name = "authenc-hmac-sha1-"
2092 "cbc-aes-talitos",
2093 .cra_blocksize = AES_BLOCK_SIZE,
2094 .cra_flags = CRYPTO_ALG_ASYNC,
2095 },
2096 .ivsize = AES_BLOCK_SIZE,
2097 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2098 },
9c4a7965
KP
2099 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2100 DESC_HDR_SEL0_AESU |
2101 DESC_HDR_MODE0_AESU_CBC |
2102 DESC_HDR_SEL1_MDEUA |
2103 DESC_HDR_MODE1_MDEU_INIT |
2104 DESC_HDR_MODE1_MDEU_PAD |
2105 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 2106 },
d5e4aaef 2107 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2108 .alg.aead = {
2109 .base = {
2110 .cra_name = "authenc(hmac(sha1),"
2111 "cbc(des3_ede))",
2112 .cra_driver_name = "authenc-hmac-sha1-"
2113 "cbc-3des-talitos",
2114 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2115 .cra_flags = CRYPTO_ALG_ASYNC,
2116 },
2117 .ivsize = DES3_EDE_BLOCK_SIZE,
2118 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2119 },
70bcaca7
LN
2120 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2121 DESC_HDR_SEL0_DEU |
2122 DESC_HDR_MODE0_DEU_CBC |
2123 DESC_HDR_MODE0_DEU_3DES |
2124 DESC_HDR_SEL1_MDEUA |
2125 DESC_HDR_MODE1_MDEU_INIT |
2126 DESC_HDR_MODE1_MDEU_PAD |
2127 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 2128 },
357fb605 2129 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2130 .alg.aead = {
2131 .base = {
2132 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2133 .cra_driver_name = "authenc-hmac-sha224-"
2134 "cbc-aes-talitos",
2135 .cra_blocksize = AES_BLOCK_SIZE,
2136 .cra_flags = CRYPTO_ALG_ASYNC,
2137 },
2138 .ivsize = AES_BLOCK_SIZE,
2139 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2140 },
2141 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2142 DESC_HDR_SEL0_AESU |
2143 DESC_HDR_MODE0_AESU_CBC |
2144 DESC_HDR_SEL1_MDEUA |
2145 DESC_HDR_MODE1_MDEU_INIT |
2146 DESC_HDR_MODE1_MDEU_PAD |
2147 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2148 },
2149 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2150 .alg.aead = {
2151 .base = {
2152 .cra_name = "authenc(hmac(sha224),"
2153 "cbc(des3_ede))",
2154 .cra_driver_name = "authenc-hmac-sha224-"
2155 "cbc-3des-talitos",
2156 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2157 .cra_flags = CRYPTO_ALG_ASYNC,
2158 },
2159 .ivsize = DES3_EDE_BLOCK_SIZE,
2160 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2161 },
2162 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2163 DESC_HDR_SEL0_DEU |
2164 DESC_HDR_MODE0_DEU_CBC |
2165 DESC_HDR_MODE0_DEU_3DES |
2166 DESC_HDR_SEL1_MDEUA |
2167 DESC_HDR_MODE1_MDEU_INIT |
2168 DESC_HDR_MODE1_MDEU_PAD |
2169 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2170 },
d5e4aaef 2171 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2172 .alg.aead = {
2173 .base = {
2174 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2175 .cra_driver_name = "authenc-hmac-sha256-"
2176 "cbc-aes-talitos",
2177 .cra_blocksize = AES_BLOCK_SIZE,
2178 .cra_flags = CRYPTO_ALG_ASYNC,
2179 },
2180 .ivsize = AES_BLOCK_SIZE,
2181 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2182 },
3952f17e
LN
2183 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2184 DESC_HDR_SEL0_AESU |
2185 DESC_HDR_MODE0_AESU_CBC |
2186 DESC_HDR_SEL1_MDEUA |
2187 DESC_HDR_MODE1_MDEU_INIT |
2188 DESC_HDR_MODE1_MDEU_PAD |
2189 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2190 },
d5e4aaef 2191 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2192 .alg.aead = {
2193 .base = {
2194 .cra_name = "authenc(hmac(sha256),"
2195 "cbc(des3_ede))",
2196 .cra_driver_name = "authenc-hmac-sha256-"
2197 "cbc-3des-talitos",
2198 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2199 .cra_flags = CRYPTO_ALG_ASYNC,
2200 },
2201 .ivsize = DES3_EDE_BLOCK_SIZE,
2202 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2203 },
3952f17e
LN
2204 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2205 DESC_HDR_SEL0_DEU |
2206 DESC_HDR_MODE0_DEU_CBC |
2207 DESC_HDR_MODE0_DEU_3DES |
2208 DESC_HDR_SEL1_MDEUA |
2209 DESC_HDR_MODE1_MDEU_INIT |
2210 DESC_HDR_MODE1_MDEU_PAD |
2211 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2212 },
d5e4aaef 2213 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2214 .alg.aead = {
2215 .base = {
2216 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2217 .cra_driver_name = "authenc-hmac-sha384-"
2218 "cbc-aes-talitos",
2219 .cra_blocksize = AES_BLOCK_SIZE,
2220 .cra_flags = CRYPTO_ALG_ASYNC,
2221 },
2222 .ivsize = AES_BLOCK_SIZE,
2223 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2224 },
2225 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2226 DESC_HDR_SEL0_AESU |
2227 DESC_HDR_MODE0_AESU_CBC |
2228 DESC_HDR_SEL1_MDEUB |
2229 DESC_HDR_MODE1_MDEU_INIT |
2230 DESC_HDR_MODE1_MDEU_PAD |
2231 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2232 },
2233 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2234 .alg.aead = {
2235 .base = {
2236 .cra_name = "authenc(hmac(sha384),"
2237 "cbc(des3_ede))",
2238 .cra_driver_name = "authenc-hmac-sha384-"
2239 "cbc-3des-talitos",
2240 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2241 .cra_flags = CRYPTO_ALG_ASYNC,
2242 },
2243 .ivsize = DES3_EDE_BLOCK_SIZE,
2244 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2245 },
2246 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2247 DESC_HDR_SEL0_DEU |
2248 DESC_HDR_MODE0_DEU_CBC |
2249 DESC_HDR_MODE0_DEU_3DES |
2250 DESC_HDR_SEL1_MDEUB |
2251 DESC_HDR_MODE1_MDEU_INIT |
2252 DESC_HDR_MODE1_MDEU_PAD |
2253 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2254 },
2255 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2256 .alg.aead = {
2257 .base = {
2258 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2259 .cra_driver_name = "authenc-hmac-sha512-"
2260 "cbc-aes-talitos",
2261 .cra_blocksize = AES_BLOCK_SIZE,
2262 .cra_flags = CRYPTO_ALG_ASYNC,
2263 },
2264 .ivsize = AES_BLOCK_SIZE,
2265 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2266 },
2267 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2268 DESC_HDR_SEL0_AESU |
2269 DESC_HDR_MODE0_AESU_CBC |
2270 DESC_HDR_SEL1_MDEUB |
2271 DESC_HDR_MODE1_MDEU_INIT |
2272 DESC_HDR_MODE1_MDEU_PAD |
2273 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2274 },
2275 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2276 .alg.aead = {
2277 .base = {
2278 .cra_name = "authenc(hmac(sha512),"
2279 "cbc(des3_ede))",
2280 .cra_driver_name = "authenc-hmac-sha512-"
2281 "cbc-3des-talitos",
2282 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2283 .cra_flags = CRYPTO_ALG_ASYNC,
2284 },
2285 .ivsize = DES3_EDE_BLOCK_SIZE,
2286 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2287 },
2288 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2289 DESC_HDR_SEL0_DEU |
2290 DESC_HDR_MODE0_DEU_CBC |
2291 DESC_HDR_MODE0_DEU_3DES |
2292 DESC_HDR_SEL1_MDEUB |
2293 DESC_HDR_MODE1_MDEU_INIT |
2294 DESC_HDR_MODE1_MDEU_PAD |
2295 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2296 },
2297 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2298 .alg.aead = {
2299 .base = {
2300 .cra_name = "authenc(hmac(md5),cbc(aes))",
2301 .cra_driver_name = "authenc-hmac-md5-"
2302 "cbc-aes-talitos",
2303 .cra_blocksize = AES_BLOCK_SIZE,
2304 .cra_flags = CRYPTO_ALG_ASYNC,
2305 },
2306 .ivsize = AES_BLOCK_SIZE,
2307 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2308 },
3952f17e
LN
2309 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2310 DESC_HDR_SEL0_AESU |
2311 DESC_HDR_MODE0_AESU_CBC |
2312 DESC_HDR_SEL1_MDEUA |
2313 DESC_HDR_MODE1_MDEU_INIT |
2314 DESC_HDR_MODE1_MDEU_PAD |
2315 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2316 },
d5e4aaef 2317 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2318 .alg.aead = {
2319 .base = {
2320 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2321 .cra_driver_name = "authenc-hmac-md5-"
2322 "cbc-3des-talitos",
2323 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2324 .cra_flags = CRYPTO_ALG_ASYNC,
2325 },
2326 .ivsize = DES3_EDE_BLOCK_SIZE,
2327 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2328 },
3952f17e
LN
2329 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2330 DESC_HDR_SEL0_DEU |
2331 DESC_HDR_MODE0_DEU_CBC |
2332 DESC_HDR_MODE0_DEU_3DES |
2333 DESC_HDR_SEL1_MDEUA |
2334 DESC_HDR_MODE1_MDEU_INIT |
2335 DESC_HDR_MODE1_MDEU_PAD |
2336 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2337 },
2338 /* ABLKCIPHER algorithms. */
5e75ae1b
LC
2339 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2340 .alg.crypto = {
2341 .cra_name = "ecb(aes)",
2342 .cra_driver_name = "ecb-aes-talitos",
2343 .cra_blocksize = AES_BLOCK_SIZE,
2344 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2345 CRYPTO_ALG_ASYNC,
2346 .cra_ablkcipher = {
2347 .min_keysize = AES_MIN_KEY_SIZE,
2348 .max_keysize = AES_MAX_KEY_SIZE,
2349 .ivsize = AES_BLOCK_SIZE,
2350 }
2351 },
2352 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2353 DESC_HDR_SEL0_AESU,
2354 },
d5e4aaef
LN
2355 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2356 .alg.crypto = {
4de9d0b5
LN
2357 .cra_name = "cbc(aes)",
2358 .cra_driver_name = "cbc-aes-talitos",
2359 .cra_blocksize = AES_BLOCK_SIZE,
2360 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2361 CRYPTO_ALG_ASYNC,
4de9d0b5 2362 .cra_ablkcipher = {
4de9d0b5
LN
2363 .min_keysize = AES_MIN_KEY_SIZE,
2364 .max_keysize = AES_MAX_KEY_SIZE,
2365 .ivsize = AES_BLOCK_SIZE,
2366 }
2367 },
2368 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2369 DESC_HDR_SEL0_AESU |
2370 DESC_HDR_MODE0_AESU_CBC,
2371 },
5e75ae1b
LC
2372 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2373 .alg.crypto = {
2374 .cra_name = "ctr(aes)",
2375 .cra_driver_name = "ctr-aes-talitos",
2376 .cra_blocksize = AES_BLOCK_SIZE,
2377 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2378 CRYPTO_ALG_ASYNC,
2379 .cra_ablkcipher = {
2380 .min_keysize = AES_MIN_KEY_SIZE,
2381 .max_keysize = AES_MAX_KEY_SIZE,
2382 .ivsize = AES_BLOCK_SIZE,
2383 }
2384 },
2385 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2386 DESC_HDR_SEL0_AESU |
2387 DESC_HDR_MODE0_AESU_CTR,
2388 },
2389 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2390 .alg.crypto = {
2391 .cra_name = "ecb(des)",
2392 .cra_driver_name = "ecb-des-talitos",
2393 .cra_blocksize = DES_BLOCK_SIZE,
2394 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2395 CRYPTO_ALG_ASYNC,
2396 .cra_ablkcipher = {
2397 .min_keysize = DES_KEY_SIZE,
2398 .max_keysize = DES_KEY_SIZE,
2399 .ivsize = DES_BLOCK_SIZE,
2400 }
2401 },
2402 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2403 DESC_HDR_SEL0_DEU,
2404 },
2405 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2406 .alg.crypto = {
2407 .cra_name = "cbc(des)",
2408 .cra_driver_name = "cbc-des-talitos",
2409 .cra_blocksize = DES_BLOCK_SIZE,
2410 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2411 CRYPTO_ALG_ASYNC,
2412 .cra_ablkcipher = {
2413 .min_keysize = DES_KEY_SIZE,
2414 .max_keysize = DES_KEY_SIZE,
2415 .ivsize = DES_BLOCK_SIZE,
2416 }
2417 },
2418 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2419 DESC_HDR_SEL0_DEU |
2420 DESC_HDR_MODE0_DEU_CBC,
2421 },
2422 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2423 .alg.crypto = {
2424 .cra_name = "ecb(des3_ede)",
2425 .cra_driver_name = "ecb-3des-talitos",
2426 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2427 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2428 CRYPTO_ALG_ASYNC,
2429 .cra_ablkcipher = {
2430 .min_keysize = DES3_EDE_KEY_SIZE,
2431 .max_keysize = DES3_EDE_KEY_SIZE,
2432 .ivsize = DES3_EDE_BLOCK_SIZE,
2433 }
2434 },
2435 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2436 DESC_HDR_SEL0_DEU |
2437 DESC_HDR_MODE0_DEU_3DES,
2438 },
d5e4aaef
LN
2439 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2440 .alg.crypto = {
4de9d0b5
LN
2441 .cra_name = "cbc(des3_ede)",
2442 .cra_driver_name = "cbc-3des-talitos",
2443 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2444 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2445 CRYPTO_ALG_ASYNC,
4de9d0b5 2446 .cra_ablkcipher = {
4de9d0b5
LN
2447 .min_keysize = DES3_EDE_KEY_SIZE,
2448 .max_keysize = DES3_EDE_KEY_SIZE,
2449 .ivsize = DES3_EDE_BLOCK_SIZE,
2450 }
2451 },
2452 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2453 DESC_HDR_SEL0_DEU |
2454 DESC_HDR_MODE0_DEU_CBC |
2455 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2456 },
2457 /* AHASH algorithms. */
2458 { .type = CRYPTO_ALG_TYPE_AHASH,
2459 .alg.hash = {
497f2e6b
LN
2460 .halg.digestsize = MD5_DIGEST_SIZE,
2461 .halg.base = {
2462 .cra_name = "md5",
2463 .cra_driver_name = "md5-talitos",
b3988618 2464 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
497f2e6b
LN
2465 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2466 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2467 }
2468 },
2469 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2470 DESC_HDR_SEL0_MDEUA |
2471 DESC_HDR_MODE0_MDEU_MD5,
2472 },
2473 { .type = CRYPTO_ALG_TYPE_AHASH,
2474 .alg.hash = {
497f2e6b
LN
2475 .halg.digestsize = SHA1_DIGEST_SIZE,
2476 .halg.base = {
2477 .cra_name = "sha1",
2478 .cra_driver_name = "sha1-talitos",
2479 .cra_blocksize = SHA1_BLOCK_SIZE,
2480 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2481 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2482 }
2483 },
2484 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2485 DESC_HDR_SEL0_MDEUA |
2486 DESC_HDR_MODE0_MDEU_SHA1,
2487 },
60f208d7
KP
2488 { .type = CRYPTO_ALG_TYPE_AHASH,
2489 .alg.hash = {
60f208d7
KP
2490 .halg.digestsize = SHA224_DIGEST_SIZE,
2491 .halg.base = {
2492 .cra_name = "sha224",
2493 .cra_driver_name = "sha224-talitos",
2494 .cra_blocksize = SHA224_BLOCK_SIZE,
2495 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2496 CRYPTO_ALG_ASYNC,
60f208d7
KP
2497 }
2498 },
2499 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2500 DESC_HDR_SEL0_MDEUA |
2501 DESC_HDR_MODE0_MDEU_SHA224,
2502 },
497f2e6b
LN
2503 { .type = CRYPTO_ALG_TYPE_AHASH,
2504 .alg.hash = {
497f2e6b
LN
2505 .halg.digestsize = SHA256_DIGEST_SIZE,
2506 .halg.base = {
2507 .cra_name = "sha256",
2508 .cra_driver_name = "sha256-talitos",
2509 .cra_blocksize = SHA256_BLOCK_SIZE,
2510 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2511 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2512 }
2513 },
2514 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2515 DESC_HDR_SEL0_MDEUA |
2516 DESC_HDR_MODE0_MDEU_SHA256,
2517 },
2518 { .type = CRYPTO_ALG_TYPE_AHASH,
2519 .alg.hash = {
497f2e6b
LN
2520 .halg.digestsize = SHA384_DIGEST_SIZE,
2521 .halg.base = {
2522 .cra_name = "sha384",
2523 .cra_driver_name = "sha384-talitos",
2524 .cra_blocksize = SHA384_BLOCK_SIZE,
2525 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2526 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2527 }
2528 },
2529 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2530 DESC_HDR_SEL0_MDEUB |
2531 DESC_HDR_MODE0_MDEUB_SHA384,
2532 },
2533 { .type = CRYPTO_ALG_TYPE_AHASH,
2534 .alg.hash = {
497f2e6b
LN
2535 .halg.digestsize = SHA512_DIGEST_SIZE,
2536 .halg.base = {
2537 .cra_name = "sha512",
2538 .cra_driver_name = "sha512-talitos",
2539 .cra_blocksize = SHA512_BLOCK_SIZE,
2540 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2541 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2542 }
2543 },
2544 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2545 DESC_HDR_SEL0_MDEUB |
2546 DESC_HDR_MODE0_MDEUB_SHA512,
2547 },
79b3a418
LN
2548 { .type = CRYPTO_ALG_TYPE_AHASH,
2549 .alg.hash = {
79b3a418
LN
2550 .halg.digestsize = MD5_DIGEST_SIZE,
2551 .halg.base = {
2552 .cra_name = "hmac(md5)",
2553 .cra_driver_name = "hmac-md5-talitos",
b3988618 2554 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
79b3a418
LN
2555 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2556 CRYPTO_ALG_ASYNC,
79b3a418
LN
2557 }
2558 },
2559 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2560 DESC_HDR_SEL0_MDEUA |
2561 DESC_HDR_MODE0_MDEU_MD5,
2562 },
2563 { .type = CRYPTO_ALG_TYPE_AHASH,
2564 .alg.hash = {
79b3a418
LN
2565 .halg.digestsize = SHA1_DIGEST_SIZE,
2566 .halg.base = {
2567 .cra_name = "hmac(sha1)",
2568 .cra_driver_name = "hmac-sha1-talitos",
2569 .cra_blocksize = SHA1_BLOCK_SIZE,
2570 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2571 CRYPTO_ALG_ASYNC,
79b3a418
LN
2572 }
2573 },
2574 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2575 DESC_HDR_SEL0_MDEUA |
2576 DESC_HDR_MODE0_MDEU_SHA1,
2577 },
2578 { .type = CRYPTO_ALG_TYPE_AHASH,
2579 .alg.hash = {
79b3a418
LN
2580 .halg.digestsize = SHA224_DIGEST_SIZE,
2581 .halg.base = {
2582 .cra_name = "hmac(sha224)",
2583 .cra_driver_name = "hmac-sha224-talitos",
2584 .cra_blocksize = SHA224_BLOCK_SIZE,
2585 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2586 CRYPTO_ALG_ASYNC,
79b3a418
LN
2587 }
2588 },
2589 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2590 DESC_HDR_SEL0_MDEUA |
2591 DESC_HDR_MODE0_MDEU_SHA224,
2592 },
2593 { .type = CRYPTO_ALG_TYPE_AHASH,
2594 .alg.hash = {
79b3a418
LN
2595 .halg.digestsize = SHA256_DIGEST_SIZE,
2596 .halg.base = {
2597 .cra_name = "hmac(sha256)",
2598 .cra_driver_name = "hmac-sha256-talitos",
2599 .cra_blocksize = SHA256_BLOCK_SIZE,
2600 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2601 CRYPTO_ALG_ASYNC,
79b3a418
LN
2602 }
2603 },
2604 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2605 DESC_HDR_SEL0_MDEUA |
2606 DESC_HDR_MODE0_MDEU_SHA256,
2607 },
2608 { .type = CRYPTO_ALG_TYPE_AHASH,
2609 .alg.hash = {
79b3a418
LN
2610 .halg.digestsize = SHA384_DIGEST_SIZE,
2611 .halg.base = {
2612 .cra_name = "hmac(sha384)",
2613 .cra_driver_name = "hmac-sha384-talitos",
2614 .cra_blocksize = SHA384_BLOCK_SIZE,
2615 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2616 CRYPTO_ALG_ASYNC,
79b3a418
LN
2617 }
2618 },
2619 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2620 DESC_HDR_SEL0_MDEUB |
2621 DESC_HDR_MODE0_MDEUB_SHA384,
2622 },
2623 { .type = CRYPTO_ALG_TYPE_AHASH,
2624 .alg.hash = {
79b3a418
LN
2625 .halg.digestsize = SHA512_DIGEST_SIZE,
2626 .halg.base = {
2627 .cra_name = "hmac(sha512)",
2628 .cra_driver_name = "hmac-sha512-talitos",
2629 .cra_blocksize = SHA512_BLOCK_SIZE,
2630 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2631 CRYPTO_ALG_ASYNC,
79b3a418
LN
2632 }
2633 },
2634 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2635 DESC_HDR_SEL0_MDEUB |
2636 DESC_HDR_MODE0_MDEUB_SHA512,
2637 }
9c4a7965
KP
2638};
2639
2640struct talitos_crypto_alg {
2641 struct list_head entry;
2642 struct device *dev;
acbf7c62 2643 struct talitos_alg_template algt;
9c4a7965
KP
2644};
2645
89d124cb
JE
2646static int talitos_init_common(struct talitos_ctx *ctx,
2647 struct talitos_crypto_alg *talitos_alg)
9c4a7965 2648{
5228f0f7 2649 struct talitos_private *priv;
9c4a7965
KP
2650
2651 /* update context with ptr to dev */
2652 ctx->dev = talitos_alg->dev;
19bbbc63 2653
5228f0f7
KP
2654 /* assign SEC channel to tfm in round-robin fashion */
2655 priv = dev_get_drvdata(ctx->dev);
2656 ctx->ch = atomic_inc_return(&priv->last_chan) &
2657 (priv->num_channels - 1);
2658
9c4a7965 2659 /* copy descriptor header template value */
acbf7c62 2660 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2661
602dba5a
KP
2662 /* select done notification */
2663 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2664
497f2e6b
LN
2665 return 0;
2666}
2667
89d124cb
JE
2668static int talitos_cra_init(struct crypto_tfm *tfm)
2669{
2670 struct crypto_alg *alg = tfm->__crt_alg;
2671 struct talitos_crypto_alg *talitos_alg;
2672 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2673
2674 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2675 talitos_alg = container_of(__crypto_ahash_alg(alg),
2676 struct talitos_crypto_alg,
2677 algt.alg.hash);
2678 else
2679 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2680 algt.alg.crypto);
2681
2682 return talitos_init_common(ctx, talitos_alg);
2683}
2684
aeb4c132 2685static int talitos_cra_init_aead(struct crypto_aead *tfm)
497f2e6b 2686{
89d124cb
JE
2687 struct aead_alg *alg = crypto_aead_alg(tfm);
2688 struct talitos_crypto_alg *talitos_alg;
2689 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2690
2691 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2692 algt.alg.aead);
2693
2694 return talitos_init_common(ctx, talitos_alg);
9c4a7965
KP
2695}
2696
497f2e6b
LN
2697static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2698{
2699 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2700
2701 talitos_cra_init(tfm);
2702
2703 ctx->keylen = 0;
2704 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2705 sizeof(struct talitos_ahash_req_ctx));
2706
2707 return 0;
2708}
2709
9c4a7965
KP
2710/*
2711 * given the alg's descriptor header template, determine whether descriptor
2712 * type and primary/secondary execution units required match the hw
2713 * capabilities description provided in the device tree node.
2714 */
2715static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2716{
2717 struct talitos_private *priv = dev_get_drvdata(dev);
2718 int ret;
2719
2720 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2721 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2722
2723 if (SECONDARY_EU(desc_hdr_template))
2724 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2725 & priv->exec_units);
2726
2727 return ret;
2728}
2729
2dc11581 2730static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2731{
2732 struct device *dev = &ofdev->dev;
2733 struct talitos_private *priv = dev_get_drvdata(dev);
2734 struct talitos_crypto_alg *t_alg, *n;
2735 int i;
2736
2737 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2738 switch (t_alg->algt.type) {
2739 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62 2740 break;
aeb4c132
HX
2741 case CRYPTO_ALG_TYPE_AEAD:
2742 crypto_unregister_aead(&t_alg->algt.alg.aead);
acbf7c62
LN
2743 case CRYPTO_ALG_TYPE_AHASH:
2744 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2745 break;
2746 }
9c4a7965
KP
2747 list_del(&t_alg->entry);
2748 kfree(t_alg);
2749 }
2750
2751 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2752 talitos_unregister_rng(dev);
2753
35a3bb3d 2754 for (i = 0; priv->chan && i < priv->num_channels; i++)
0b798247 2755 kfree(priv->chan[i].fifo);
9c4a7965 2756
4b992628 2757 kfree(priv->chan);
9c4a7965 2758
c3e337f8 2759 for (i = 0; i < 2; i++)
2cdba3cf 2760 if (priv->irq[i]) {
c3e337f8
KP
2761 free_irq(priv->irq[i], dev);
2762 irq_dispose_mapping(priv->irq[i]);
2763 }
9c4a7965 2764
c3e337f8 2765 tasklet_kill(&priv->done_task[0]);
2cdba3cf 2766 if (priv->irq[1])
c3e337f8 2767 tasklet_kill(&priv->done_task[1]);
9c4a7965
KP
2768
2769 iounmap(priv->reg);
2770
9c4a7965
KP
2771 kfree(priv);
2772
2773 return 0;
2774}
2775
2776static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2777 struct talitos_alg_template
2778 *template)
2779{
60f208d7 2780 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2781 struct talitos_crypto_alg *t_alg;
2782 struct crypto_alg *alg;
2783
2784 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2785 if (!t_alg)
2786 return ERR_PTR(-ENOMEM);
2787
acbf7c62
LN
2788 t_alg->algt = *template;
2789
2790 switch (t_alg->algt.type) {
2791 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2792 alg = &t_alg->algt.alg.crypto;
2793 alg->cra_init = talitos_cra_init;
d4cd3283 2794 alg->cra_type = &crypto_ablkcipher_type;
b286e003
KP
2795 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2796 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2797 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2798 alg->cra_ablkcipher.geniv = "eseqiv";
497f2e6b 2799 break;
acbf7c62 2800 case CRYPTO_ALG_TYPE_AEAD:
aeb4c132 2801 alg = &t_alg->algt.alg.aead.base;
aeb4c132
HX
2802 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2803 t_alg->algt.alg.aead.setkey = aead_setkey;
2804 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2805 t_alg->algt.alg.aead.decrypt = aead_decrypt;
acbf7c62
LN
2806 break;
2807 case CRYPTO_ALG_TYPE_AHASH:
2808 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2809 alg->cra_init = talitos_cra_init_ahash;
d4cd3283 2810 alg->cra_type = &crypto_ahash_type;
b286e003
KP
2811 t_alg->algt.alg.hash.init = ahash_init;
2812 t_alg->algt.alg.hash.update = ahash_update;
2813 t_alg->algt.alg.hash.final = ahash_final;
2814 t_alg->algt.alg.hash.finup = ahash_finup;
2815 t_alg->algt.alg.hash.digest = ahash_digest;
2816 t_alg->algt.alg.hash.setkey = ahash_setkey;
2817
79b3a418 2818 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
0b2730d8
KP
2819 !strncmp(alg->cra_name, "hmac", 4)) {
2820 kfree(t_alg);
79b3a418 2821 return ERR_PTR(-ENOTSUPP);
0b2730d8 2822 }
60f208d7 2823 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
79b3a418
LN
2824 (!strcmp(alg->cra_name, "sha224") ||
2825 !strcmp(alg->cra_name, "hmac(sha224)"))) {
60f208d7
KP
2826 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2827 t_alg->algt.desc_hdr_template =
2828 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2829 DESC_HDR_SEL0_MDEUA |
2830 DESC_HDR_MODE0_MDEU_SHA256;
2831 }
497f2e6b 2832 break;
1d11911a
KP
2833 default:
2834 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
5fa7dadc 2835 kfree(t_alg);
1d11911a 2836 return ERR_PTR(-EINVAL);
acbf7c62 2837 }
9c4a7965 2838
9c4a7965 2839 alg->cra_module = THIS_MODULE;
9c4a7965 2840 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2841 alg->cra_alignmask = 0;
9c4a7965 2842 alg->cra_ctxsize = sizeof(struct talitos_ctx);
d912bb76 2843 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
9c4a7965 2844
9c4a7965
KP
2845 t_alg->dev = dev;
2846
2847 return t_alg;
2848}
2849
c3e337f8
KP
2850static int talitos_probe_irq(struct platform_device *ofdev)
2851{
2852 struct device *dev = &ofdev->dev;
2853 struct device_node *np = ofdev->dev.of_node;
2854 struct talitos_private *priv = dev_get_drvdata(dev);
2855 int err;
dd3c0987 2856 bool is_sec1 = has_ftr_sec1(priv);
c3e337f8
KP
2857
2858 priv->irq[0] = irq_of_parse_and_map(np, 0);
2cdba3cf 2859 if (!priv->irq[0]) {
c3e337f8
KP
2860 dev_err(dev, "failed to map irq\n");
2861 return -EINVAL;
2862 }
dd3c0987
LC
2863 if (is_sec1) {
2864 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2865 dev_driver_string(dev), dev);
2866 goto primary_out;
2867 }
c3e337f8
KP
2868
2869 priv->irq[1] = irq_of_parse_and_map(np, 1);
2870
2871 /* get the primary irq line */
2cdba3cf 2872 if (!priv->irq[1]) {
dd3c0987 2873 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
c3e337f8
KP
2874 dev_driver_string(dev), dev);
2875 goto primary_out;
2876 }
2877
dd3c0987 2878 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
c3e337f8
KP
2879 dev_driver_string(dev), dev);
2880 if (err)
2881 goto primary_out;
2882
2883 /* get the secondary irq line */
dd3c0987 2884 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
c3e337f8
KP
2885 dev_driver_string(dev), dev);
2886 if (err) {
2887 dev_err(dev, "failed to request secondary irq\n");
2888 irq_dispose_mapping(priv->irq[1]);
2cdba3cf 2889 priv->irq[1] = 0;
c3e337f8
KP
2890 }
2891
2892 return err;
2893
2894primary_out:
2895 if (err) {
2896 dev_err(dev, "failed to request primary irq\n");
2897 irq_dispose_mapping(priv->irq[0]);
2cdba3cf 2898 priv->irq[0] = 0;
c3e337f8
KP
2899 }
2900
2901 return err;
2902}
2903
1c48a5c9 2904static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2905{
2906 struct device *dev = &ofdev->dev;
61c7a080 2907 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2908 struct talitos_private *priv;
2909 const unsigned int *prop;
2910 int i, err;
5fa7fa14 2911 int stride;
9c4a7965
KP
2912
2913 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2914 if (!priv)
2915 return -ENOMEM;
2916
f3de9cb1
KH
2917 INIT_LIST_HEAD(&priv->alg_list);
2918
9c4a7965
KP
2919 dev_set_drvdata(dev, priv);
2920
2921 priv->ofdev = ofdev;
2922
511d63cb
HG
2923 spin_lock_init(&priv->reg_lock);
2924
9c4a7965
KP
2925 priv->reg = of_iomap(np, 0);
2926 if (!priv->reg) {
2927 dev_err(dev, "failed to of_iomap\n");
2928 err = -ENOMEM;
2929 goto err_out;
2930 }
2931
2932 /* get SEC version capabilities from device tree */
2933 prop = of_get_property(np, "fsl,num-channels", NULL);
2934 if (prop)
2935 priv->num_channels = *prop;
2936
2937 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2938 if (prop)
2939 priv->chfifo_len = *prop;
2940
2941 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2942 if (prop)
2943 priv->exec_units = *prop;
2944
2945 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2946 if (prop)
2947 priv->desc_types = *prop;
2948
2949 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2950 !priv->exec_units || !priv->desc_types) {
2951 dev_err(dev, "invalid property data in device tree node\n");
2952 err = -EINVAL;
2953 goto err_out;
2954 }
2955
f3c85bc1
LN
2956 if (of_device_is_compatible(np, "fsl,sec3.0"))
2957 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2958
fe5720e2 2959 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7 2960 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
79b3a418
LN
2961 TALITOS_FTR_SHA224_HWINIT |
2962 TALITOS_FTR_HMAC_OK;
fe5720e2 2963
21590888
LC
2964 if (of_device_is_compatible(np, "fsl,sec1.0"))
2965 priv->features |= TALITOS_FTR_SEC1;
2966
5fa7fa14
LC
2967 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2968 priv->reg_deu = priv->reg + TALITOS12_DEU;
2969 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2970 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2971 stride = TALITOS1_CH_STRIDE;
2972 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2973 priv->reg_deu = priv->reg + TALITOS10_DEU;
2974 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2975 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2976 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2977 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2978 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2979 stride = TALITOS1_CH_STRIDE;
2980 } else {
2981 priv->reg_deu = priv->reg + TALITOS2_DEU;
2982 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2983 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2984 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2985 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2986 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2987 priv->reg_keu = priv->reg + TALITOS2_KEU;
2988 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2989 stride = TALITOS2_CH_STRIDE;
2990 }
2991
dd3c0987
LC
2992 err = talitos_probe_irq(ofdev);
2993 if (err)
2994 goto err_out;
2995
2996 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2997 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2998 (unsigned long)dev);
2999 } else {
3000 if (!priv->irq[1]) {
3001 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3002 (unsigned long)dev);
3003 } else {
3004 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3005 (unsigned long)dev);
3006 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3007 (unsigned long)dev);
3008 }
3009 }
3010
4b992628
KP
3011 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3012 priv->num_channels, GFP_KERNEL);
3013 if (!priv->chan) {
3014 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
3015 err = -ENOMEM;
3016 goto err_out;
3017 }
3018
f641dddd
MH
3019 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3020
c3e337f8 3021 for (i = 0; i < priv->num_channels; i++) {
5fa7fa14 3022 priv->chan[i].reg = priv->reg + stride * (i + 1);
2cdba3cf 3023 if (!priv->irq[1] || !(i & 1))
c3e337f8 3024 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
ad42d5fc 3025
4b992628
KP
3026 spin_lock_init(&priv->chan[i].head_lock);
3027 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965 3028
4b992628
KP
3029 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3030 priv->fifo_len, GFP_KERNEL);
3031 if (!priv->chan[i].fifo) {
9c4a7965
KP
3032 dev_err(dev, "failed to allocate request fifo %d\n", i);
3033 err = -ENOMEM;
3034 goto err_out;
3035 }
9c4a7965 3036
4b992628
KP
3037 atomic_set(&priv->chan[i].submit_count,
3038 -(priv->chfifo_len - 1));
f641dddd 3039 }
9c4a7965 3040
81eb024c
KP
3041 dma_set_mask(dev, DMA_BIT_MASK(36));
3042
9c4a7965
KP
3043 /* reset and initialize the h/w */
3044 err = init_device(dev);
3045 if (err) {
3046 dev_err(dev, "failed to initialize device\n");
3047 goto err_out;
3048 }
3049
3050 /* register the RNG, if available */
3051 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3052 err = talitos_register_rng(dev);
3053 if (err) {
3054 dev_err(dev, "failed to register hwrng: %d\n", err);
3055 goto err_out;
3056 } else
3057 dev_info(dev, "hwrng\n");
3058 }
3059
3060 /* register crypto algorithms the device supports */
9c4a7965
KP
3061 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3062 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3063 struct talitos_crypto_alg *t_alg;
aeb4c132 3064 struct crypto_alg *alg = NULL;
9c4a7965
KP
3065
3066 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3067 if (IS_ERR(t_alg)) {
3068 err = PTR_ERR(t_alg);
0b2730d8 3069 if (err == -ENOTSUPP)
79b3a418 3070 continue;
9c4a7965
KP
3071 goto err_out;
3072 }
3073
acbf7c62
LN
3074 switch (t_alg->algt.type) {
3075 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62
LN
3076 err = crypto_register_alg(
3077 &t_alg->algt.alg.crypto);
aeb4c132 3078 alg = &t_alg->algt.alg.crypto;
acbf7c62 3079 break;
aeb4c132
HX
3080
3081 case CRYPTO_ALG_TYPE_AEAD:
3082 err = crypto_register_aead(
3083 &t_alg->algt.alg.aead);
3084 alg = &t_alg->algt.alg.aead.base;
3085 break;
3086
acbf7c62
LN
3087 case CRYPTO_ALG_TYPE_AHASH:
3088 err = crypto_register_ahash(
3089 &t_alg->algt.alg.hash);
aeb4c132 3090 alg = &t_alg->algt.alg.hash.halg.base;
acbf7c62
LN
3091 break;
3092 }
9c4a7965
KP
3093 if (err) {
3094 dev_err(dev, "%s alg registration failed\n",
aeb4c132 3095 alg->cra_driver_name);
9c4a7965 3096 kfree(t_alg);
991155ba 3097 } else
9c4a7965 3098 list_add_tail(&t_alg->entry, &priv->alg_list);
9c4a7965
KP
3099 }
3100 }
5b859b6e
KP
3101 if (!list_empty(&priv->alg_list))
3102 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3103 (char *)of_get_property(np, "compatible", NULL));
9c4a7965
KP
3104
3105 return 0;
3106
3107err_out:
3108 talitos_remove(ofdev);
9c4a7965
KP
3109
3110 return err;
3111}
3112
6c3f975a 3113static const struct of_device_id talitos_match[] = {
0635b7db
LC
3114#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3115 {
3116 .compatible = "fsl,sec1.0",
3117 },
3118#endif
3119#ifdef CONFIG_CRYPTO_DEV_TALITOS2
9c4a7965
KP
3120 {
3121 .compatible = "fsl,sec2.0",
3122 },
0635b7db 3123#endif
9c4a7965
KP
3124 {},
3125};
3126MODULE_DEVICE_TABLE(of, talitos_match);
3127
1c48a5c9 3128static struct platform_driver talitos_driver = {
4018294b
GL
3129 .driver = {
3130 .name = "talitos",
4018294b
GL
3131 .of_match_table = talitos_match,
3132 },
9c4a7965 3133 .probe = talitos_probe,
596f1034 3134 .remove = talitos_remove,
9c4a7965
KP
3135};
3136
741e8c2d 3137module_platform_driver(talitos_driver);
9c4a7965
KP
3138
3139MODULE_LICENSE("GPL");
3140MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3141MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
This page took 0.524362 seconds and 5 git commands to generate.