crypto: talitos - SEC1 bugs on 0 data hash
[deliverable/linux.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
5af50730
RH
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
9c4a7965
KP
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
5a0e3ad6 42#include <linux/slab.h>
9c4a7965
KP
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
3952f17e 46#include <crypto/des.h>
9c4a7965 47#include <crypto/sha.h>
497f2e6b 48#include <crypto/md5.h>
9c4a7965
KP
49#include <crypto/aead.h>
50#include <crypto/authenc.h>
4de9d0b5 51#include <crypto/skcipher.h>
acbf7c62
LN
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
4de9d0b5 54#include <crypto/scatterwalk.h>
9c4a7965
KP
55
56#include "talitos.h"
57
922f9dc8
LC
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
81eb024c 60{
edc6bd69 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
922f9dc8
LC
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
64}
65
922f9dc8
LC
66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len,
67 bool is_sec1)
538caf83 68{
922f9dc8
LC
69 if (is_sec1) {
70 ptr->res = 0;
71 ptr->len1 = cpu_to_be16(len);
72 } else {
73 ptr->len = cpu_to_be16(len);
74 }
538caf83
LC
75}
76
922f9dc8
LC
77static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 bool is_sec1)
538caf83 79{
922f9dc8
LC
80 if (is_sec1)
81 return be16_to_cpu(ptr->len1);
82 else
83 return be16_to_cpu(ptr->len);
538caf83
LC
84}
85
922f9dc8 86static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
185eb79f 87{
922f9dc8
LC
88 if (!is_sec1)
89 ptr->j_extent = 0;
185eb79f
LC
90}
91
9c4a7965
KP
92/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void map_single_talitos_ptr(struct device *dev,
edc6bd69 96 struct talitos_ptr *ptr,
9c4a7965 97 unsigned short len, void *data,
9c4a7965
KP
98 enum dma_data_direction dir)
99{
81eb024c 100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
922f9dc8
LC
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
81eb024c 103
922f9dc8
LC
104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
9c4a7965
KP
107}
108
109/*
110 * unmap bus single (contiguous) h/w descriptor pointer
111 */
112static void unmap_single_talitos_ptr(struct device *dev,
edc6bd69 113 struct talitos_ptr *ptr,
9c4a7965
KP
114 enum dma_data_direction dir)
115{
922f9dc8
LC
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
edc6bd69 119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
922f9dc8 120 from_talitos_ptr_len(ptr, is_sec1), dir);
9c4a7965
KP
121}
122
123static int reset_channel(struct device *dev, int ch)
124{
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 127 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 128
dd3c0987
LC
129 if (is_sec1) {
130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 TALITOS1_CCCR_LO_RESET);
9c4a7965 132
dd3c0987
LC
133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 TALITOS1_CCCR_LO_RESET) && --timeout)
135 cpu_relax();
136 } else {
137 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 TALITOS2_CCCR_RESET);
139
140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 TALITOS2_CCCR_RESET) && --timeout)
142 cpu_relax();
143 }
9c4a7965
KP
144
145 if (timeout == 0) {
146 dev_err(dev, "failed to reset channel %d\n", ch);
147 return -EIO;
148 }
149
81eb024c 150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
ad42d5fc 151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
81eb024c 152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 153
fe5720e2
KP
154 /* and ICCR writeback, if available */
155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
ad42d5fc 156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
fe5720e2
KP
157 TALITOS_CCCR_LO_IWSE);
158
9c4a7965
KP
159 return 0;
160}
161
162static int reset_device(struct device *dev)
163{
164 struct talitos_private *priv = dev_get_drvdata(dev);
165 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987
LC
166 bool is_sec1 = has_ftr_sec1(priv);
167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
9c4a7965 168
c3e337f8 169 setbits32(priv->reg + TALITOS_MCR, mcr);
9c4a7965 170
dd3c0987 171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
9c4a7965
KP
172 && --timeout)
173 cpu_relax();
174
2cdba3cf 175 if (priv->irq[1]) {
c3e337f8
KP
176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
178 }
179
9c4a7965
KP
180 if (timeout == 0) {
181 dev_err(dev, "failed to reset device\n");
182 return -EIO;
183 }
184
185 return 0;
186}
187
188/*
189 * Reset and initialize the device
190 */
191static int init_device(struct device *dev)
192{
193 struct talitos_private *priv = dev_get_drvdata(dev);
194 int ch, err;
dd3c0987 195 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965
KP
196
197 /*
198 * Master reset
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
202 */
203 err = reset_device(dev);
204 if (err)
205 return err;
206
207 err = reset_device(dev);
208 if (err)
209 return err;
210
211 /* reset channels */
212 for (ch = 0; ch < priv->num_channels; ch++) {
213 err = reset_channel(dev, ch);
214 if (err)
215 return err;
216 }
217
218 /* enable channel done and error interrupts */
dd3c0987
LC
219 if (is_sec1) {
220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 } else {
225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 }
9c4a7965 228
fe5720e2
KP
229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
5fa7fa14 231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
fe5720e2
KP
232 TALITOS_MDEUICR_LO_ICE);
233
9c4a7965
KP
234 return 0;
235}
236
237/**
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
5228f0f7 240 * @ch: the SEC device channel to be used
9c4a7965
KP
241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
244 *
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
248 */
865d5061
HG
249int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 void (*callback)(struct device *dev,
251 struct talitos_desc *desc,
252 void *context, int error),
253 void *context)
9c4a7965
KP
254{
255 struct talitos_private *priv = dev_get_drvdata(dev);
256 struct talitos_request *request;
5228f0f7 257 unsigned long flags;
9c4a7965 258 int head;
7d607c6a 259 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 260
4b992628 261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 262
4b992628 263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 264 /* h/w fifo is full */
4b992628 265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
266 return -EAGAIN;
267 }
268
4b992628
KP
269 head = priv->chan[ch].head;
270 request = &priv->chan[ch].fifo[head];
ec6644d6 271
9c4a7965 272 /* map descriptor and save caller data */
7d607c6a
LC
273 if (is_sec1) {
274 desc->hdr1 = desc->hdr;
275 desc->next_desc = 0;
276 request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 TALITOS_DESC_SIZE,
278 DMA_BIDIRECTIONAL);
279 } else {
280 request->dma_desc = dma_map_single(dev, desc,
281 TALITOS_DESC_SIZE,
282 DMA_BIDIRECTIONAL);
283 }
9c4a7965
KP
284 request->callback = callback;
285 request->context = context;
286
287 /* increment fifo head */
4b992628 288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
289
290 smp_wmb();
291 request->desc = desc;
292
293 /* GO! */
294 wmb();
ad42d5fc
KP
295 out_be32(priv->chan[ch].reg + TALITOS_FF,
296 upper_32_bits(request->dma_desc));
297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
a752447a 298 lower_32_bits(request->dma_desc));
9c4a7965 299
4b992628 300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
301
302 return -EINPROGRESS;
303}
865d5061 304EXPORT_SYMBOL(talitos_submit);
9c4a7965
KP
305
306/*
307 * process what was done, notify callback of error if not
308 */
309static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310{
311 struct talitos_private *priv = dev_get_drvdata(dev);
312 struct talitos_request *request, saved_req;
313 unsigned long flags;
314 int tail, status;
7d607c6a 315 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 316
4b992628 317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 318
4b992628
KP
319 tail = priv->chan[ch].tail;
320 while (priv->chan[ch].fifo[tail].desc) {
7d607c6a
LC
321 __be32 hdr;
322
4b992628 323 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
324
325 /* descriptors with their done bits set don't get the error */
326 rmb();
7d607c6a
LC
327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328
329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 330 status = 0;
ca38a814 331 else
9c4a7965
KP
332 if (!error)
333 break;
334 else
335 status = error;
336
337 dma_unmap_single(dev, request->dma_desc,
7d607c6a 338 TALITOS_DESC_SIZE,
e938e465 339 DMA_BIDIRECTIONAL);
9c4a7965
KP
340
341 /* copy entries so we can call callback outside lock */
342 saved_req.desc = request->desc;
343 saved_req.callback = request->callback;
344 saved_req.context = request->context;
345
346 /* release request entry in fifo */
347 smp_wmb();
348 request->desc = NULL;
349
350 /* increment fifo tail */
4b992628 351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 352
4b992628 353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 354
4b992628 355 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 356
9c4a7965
KP
357 saved_req.callback(dev, saved_req.desc, saved_req.context,
358 status);
359 /* channel may resume processing in single desc error case */
360 if (error && !reset_ch && status == error)
361 return;
4b992628
KP
362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 tail = priv->chan[ch].tail;
9c4a7965
KP
364 }
365
4b992628 366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
367}
368
369/*
370 * process completed requests for channels that have done status
371 */
dd3c0987
LC
372#define DEF_TALITOS1_DONE(name, ch_done_mask) \
373static void talitos1_done_##name(unsigned long data) \
374{ \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
378 \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
382 goto out; \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
389 \
390out: \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
397}
398
399DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400
401#define DEF_TALITOS2_DONE(name, ch_done_mask) \
402static void talitos2_done_##name(unsigned long data) \
c3e337f8
KP
403{ \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
511d63cb 406 unsigned long flags; \
c3e337f8
KP
407 \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
411 goto out; \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
418 \
419out: \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
511d63cb 422 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8 423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
dd3c0987 424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
511d63cb 425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
9c4a7965 426}
dd3c0987
LC
427
428DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
9c4a7965
KP
431
432/*
433 * locate current (offending) descriptor
434 */
3e721aeb 435static u32 current_desc_hdr(struct device *dev, int ch)
9c4a7965
KP
436{
437 struct talitos_private *priv = dev_get_drvdata(dev);
b62ffd8c 438 int tail, iter;
9c4a7965
KP
439 dma_addr_t cur_desc;
440
b62ffd8c
HG
441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
9c4a7965 443
b62ffd8c
HG
444 if (!cur_desc) {
445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 return 0;
447 }
448
449 tail = priv->chan[ch].tail;
450
451 iter = tail;
452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 iter = (iter + 1) & (priv->fifo_len - 1);
454 if (iter == tail) {
9c4a7965 455 dev_err(dev, "couldn't locate current descriptor\n");
3e721aeb 456 return 0;
9c4a7965
KP
457 }
458 }
459
b62ffd8c 460 return priv->chan[ch].fifo[iter].desc->hdr;
9c4a7965
KP
461}
462
463/*
464 * user diagnostics; report root cause of error based on execution unit status
465 */
3e721aeb 466static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
9c4a7965
KP
467{
468 struct talitos_private *priv = dev_get_drvdata(dev);
469 int i;
470
3e721aeb 471 if (!desc_hdr)
ad42d5fc 472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3e721aeb
KP
473
474 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
9c4a7965
KP
475 case DESC_HDR_SEL0_AFEU:
476 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
5fa7fa14
LC
477 in_be32(priv->reg_afeu + TALITOS_EUISR),
478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
9c4a7965
KP
479 break;
480 case DESC_HDR_SEL0_DEU:
481 dev_err(dev, "DEUISR 0x%08x_%08x\n",
5fa7fa14
LC
482 in_be32(priv->reg_deu + TALITOS_EUISR),
483 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
9c4a7965
KP
484 break;
485 case DESC_HDR_SEL0_MDEUA:
486 case DESC_HDR_SEL0_MDEUB:
487 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
488 in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
490 break;
491 case DESC_HDR_SEL0_RNG:
492 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
5fa7fa14
LC
493 in_be32(priv->reg_rngu + TALITOS_ISR),
494 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
9c4a7965
KP
495 break;
496 case DESC_HDR_SEL0_PKEU:
497 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
5fa7fa14
LC
498 in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
500 break;
501 case DESC_HDR_SEL0_AESU:
502 dev_err(dev, "AESUISR 0x%08x_%08x\n",
5fa7fa14
LC
503 in_be32(priv->reg_aesu + TALITOS_EUISR),
504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
9c4a7965
KP
505 break;
506 case DESC_HDR_SEL0_CRCU:
507 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
508 in_be32(priv->reg_crcu + TALITOS_EUISR),
509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
510 break;
511 case DESC_HDR_SEL0_KEU:
512 dev_err(dev, "KEUISR 0x%08x_%08x\n",
5fa7fa14
LC
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
515 break;
516 }
517
3e721aeb 518 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
9c4a7965
KP
519 case DESC_HDR_SEL1_MDEUA:
520 case DESC_HDR_SEL1_MDEUB:
521 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
522 in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
524 break;
525 case DESC_HDR_SEL1_CRCU:
526 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
527 in_be32(priv->reg_crcu + TALITOS_EUISR),
528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
529 break;
530 }
531
532 for (i = 0; i < 8; i++)
533 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
ad42d5fc
KP
534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
9c4a7965
KP
536}
537
538/*
539 * recover from error interrupts
540 */
5e718a09 541static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
9c4a7965 542{
9c4a7965
KP
543 struct talitos_private *priv = dev_get_drvdata(dev);
544 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 545 int ch, error, reset_dev = 0;
40405f10 546 u32 v, v_lo;
dd3c0987
LC
547 bool is_sec1 = has_ftr_sec1(priv);
548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
9c4a7965
KP
549
550 for (ch = 0; ch < priv->num_channels; ch++) {
551 /* skip channels without errors */
dd3c0987
LC
552 if (is_sec1) {
553 /* bits 29, 31, 17, 19 */
554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 continue;
556 } else {
557 if (!(isr & (1 << (ch * 2 + 1))))
558 continue;
559 }
9c4a7965
KP
560
561 error = -EINVAL;
562
ad42d5fc
KP
563 v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
564 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
9c4a7965
KP
565
566 if (v_lo & TALITOS_CCPSR_LO_DOF) {
567 dev_err(dev, "double fetch fifo overflow error\n");
568 error = -EAGAIN;
569 reset_ch = 1;
570 }
571 if (v_lo & TALITOS_CCPSR_LO_SOF) {
572 /* h/w dropped descriptor */
573 dev_err(dev, "single fetch fifo overflow error\n");
574 error = -EAGAIN;
575 }
576 if (v_lo & TALITOS_CCPSR_LO_MDTE)
577 dev_err(dev, "master data transfer error\n");
578 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
dd3c0987
LC
579 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
580 : "s/g data length zero error\n");
9c4a7965 581 if (v_lo & TALITOS_CCPSR_LO_FPZ)
dd3c0987
LC
582 dev_err(dev, is_sec1 ? "parity error\n"
583 : "fetch pointer zero error\n");
9c4a7965
KP
584 if (v_lo & TALITOS_CCPSR_LO_IDH)
585 dev_err(dev, "illegal descriptor header error\n");
586 if (v_lo & TALITOS_CCPSR_LO_IEU)
dd3c0987
LC
587 dev_err(dev, is_sec1 ? "static assignment error\n"
588 : "invalid exec unit error\n");
9c4a7965 589 if (v_lo & TALITOS_CCPSR_LO_EU)
3e721aeb 590 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
dd3c0987
LC
591 if (!is_sec1) {
592 if (v_lo & TALITOS_CCPSR_LO_GB)
593 dev_err(dev, "gather boundary error\n");
594 if (v_lo & TALITOS_CCPSR_LO_GRL)
595 dev_err(dev, "gather return/length error\n");
596 if (v_lo & TALITOS_CCPSR_LO_SB)
597 dev_err(dev, "scatter boundary error\n");
598 if (v_lo & TALITOS_CCPSR_LO_SRL)
599 dev_err(dev, "scatter return/length error\n");
600 }
9c4a7965
KP
601
602 flush_channel(dev, ch, error, reset_ch);
603
604 if (reset_ch) {
605 reset_channel(dev, ch);
606 } else {
ad42d5fc 607 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
dd3c0987 608 TALITOS2_CCCR_CONT);
ad42d5fc
KP
609 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
610 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
dd3c0987 611 TALITOS2_CCCR_CONT) && --timeout)
9c4a7965
KP
612 cpu_relax();
613 if (timeout == 0) {
614 dev_err(dev, "failed to restart channel %d\n",
615 ch);
616 reset_dev = 1;
617 }
618 }
619 }
dd3c0987
LC
620 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
621 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
622 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
623 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
624 isr, isr_lo);
625 else
626 dev_err(dev, "done overflow, internal time out, or "
627 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
9c4a7965
KP
628
629 /* purge request queues */
630 for (ch = 0; ch < priv->num_channels; ch++)
631 flush_channel(dev, ch, -EIO, 1);
632
633 /* reset and reinitialize the device */
634 init_device(dev);
635 }
636}
637
dd3c0987
LC
638#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
639static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
640{ \
641 struct device *dev = data; \
642 struct talitos_private *priv = dev_get_drvdata(dev); \
643 u32 isr, isr_lo; \
644 unsigned long flags; \
645 \
646 spin_lock_irqsave(&priv->reg_lock, flags); \
647 isr = in_be32(priv->reg + TALITOS_ISR); \
648 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
649 /* Acknowledge interrupt */ \
650 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
651 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
652 \
653 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
654 spin_unlock_irqrestore(&priv->reg_lock, flags); \
655 talitos_error(dev, isr & ch_err_mask, isr_lo); \
656 } \
657 else { \
658 if (likely(isr & ch_done_mask)) { \
659 /* mask further done interrupts. */ \
660 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
661 /* done_task will unmask done interrupts at exit */ \
662 tasklet_schedule(&priv->done_task[tlet]); \
663 } \
664 spin_unlock_irqrestore(&priv->reg_lock, flags); \
665 } \
666 \
667 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
668 IRQ_NONE; \
669}
670
671DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
672
673#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
674static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
c3e337f8
KP
675{ \
676 struct device *dev = data; \
677 struct talitos_private *priv = dev_get_drvdata(dev); \
678 u32 isr, isr_lo; \
511d63cb 679 unsigned long flags; \
c3e337f8 680 \
511d63cb 681 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
682 isr = in_be32(priv->reg + TALITOS_ISR); \
683 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
684 /* Acknowledge interrupt */ \
685 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
687 \
511d63cb
HG
688 if (unlikely(isr & ch_err_mask || isr_lo)) { \
689 spin_unlock_irqrestore(&priv->reg_lock, flags); \
690 talitos_error(dev, isr & ch_err_mask, isr_lo); \
691 } \
692 else { \
c3e337f8
KP
693 if (likely(isr & ch_done_mask)) { \
694 /* mask further done interrupts. */ \
695 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
696 /* done_task will unmask done interrupts at exit */ \
697 tasklet_schedule(&priv->done_task[tlet]); \
698 } \
511d63cb
HG
699 spin_unlock_irqrestore(&priv->reg_lock, flags); \
700 } \
c3e337f8
KP
701 \
702 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
703 IRQ_NONE; \
9c4a7965 704}
dd3c0987
LC
705
706DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
707DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
708 0)
709DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
710 1)
9c4a7965
KP
711
712/*
713 * hwrng
714 */
715static int talitos_rng_data_present(struct hwrng *rng, int wait)
716{
717 struct device *dev = (struct device *)rng->priv;
718 struct talitos_private *priv = dev_get_drvdata(dev);
719 u32 ofl;
720 int i;
721
722 for (i = 0; i < 20; i++) {
5fa7fa14 723 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
9c4a7965
KP
724 TALITOS_RNGUSR_LO_OFL;
725 if (ofl || !wait)
726 break;
727 udelay(10);
728 }
729
730 return !!ofl;
731}
732
733static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
734{
735 struct device *dev = (struct device *)rng->priv;
736 struct talitos_private *priv = dev_get_drvdata(dev);
737
738 /* rng fifo requires 64-bit accesses */
5fa7fa14
LC
739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
740 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
9c4a7965
KP
741
742 return sizeof(u32);
743}
744
745static int talitos_rng_init(struct hwrng *rng)
746{
747 struct device *dev = (struct device *)rng->priv;
748 struct talitos_private *priv = dev_get_drvdata(dev);
749 unsigned int timeout = TALITOS_TIMEOUT;
750
5fa7fa14
LC
751 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
752 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
753 & TALITOS_RNGUSR_LO_RD)
9c4a7965
KP
754 && --timeout)
755 cpu_relax();
756 if (timeout == 0) {
757 dev_err(dev, "failed to reset rng hw\n");
758 return -ENODEV;
759 }
760
761 /* start generating */
5fa7fa14 762 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
9c4a7965
KP
763
764 return 0;
765}
766
767static int talitos_register_rng(struct device *dev)
768{
769 struct talitos_private *priv = dev_get_drvdata(dev);
770
771 priv->rng.name = dev_driver_string(dev),
772 priv->rng.init = talitos_rng_init,
773 priv->rng.data_present = talitos_rng_data_present,
774 priv->rng.data_read = talitos_rng_data_read,
775 priv->rng.priv = (unsigned long)dev;
776
777 return hwrng_register(&priv->rng);
778}
779
780static void talitos_unregister_rng(struct device *dev)
781{
782 struct talitos_private *priv = dev_get_drvdata(dev);
783
784 hwrng_unregister(&priv->rng);
785}
786
787/*
788 * crypto alg
789 */
790#define TALITOS_CRA_PRIORITY 3000
357fb605 791#define TALITOS_MAX_KEY_SIZE 96
3952f17e 792#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 793
9c4a7965
KP
794struct talitos_ctx {
795 struct device *dev;
5228f0f7 796 int ch;
9c4a7965
KP
797 __be32 desc_hdr_template;
798 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 799 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
800 unsigned int keylen;
801 unsigned int enckeylen;
802 unsigned int authkeylen;
803 unsigned int authsize;
804};
805
497f2e6b
LN
806#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
807#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
808
809struct talitos_ahash_req_ctx {
60f208d7 810 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
811 unsigned int hw_context_size;
812 u8 buf[HASH_MAX_BLOCK_SIZE];
813 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 814 unsigned int swinit;
497f2e6b
LN
815 unsigned int first;
816 unsigned int last;
817 unsigned int to_hash_later;
5e833bc4 818 u64 nbuf;
497f2e6b
LN
819 struct scatterlist bufsl[2];
820 struct scatterlist *psrc;
821};
822
56af8cd4
LN
823static int aead_setauthsize(struct crypto_aead *authenc,
824 unsigned int authsize)
9c4a7965
KP
825{
826 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
827
828 ctx->authsize = authsize;
829
830 return 0;
831}
832
56af8cd4
LN
833static int aead_setkey(struct crypto_aead *authenc,
834 const u8 *key, unsigned int keylen)
9c4a7965
KP
835{
836 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
c306a98d 837 struct crypto_authenc_keys keys;
9c4a7965 838
c306a98d 839 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
9c4a7965
KP
840 goto badkey;
841
c306a98d 842 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
9c4a7965
KP
843 goto badkey;
844
c306a98d
MK
845 memcpy(ctx->key, keys.authkey, keys.authkeylen);
846 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
9c4a7965 847
c306a98d
MK
848 ctx->keylen = keys.authkeylen + keys.enckeylen;
849 ctx->enckeylen = keys.enckeylen;
850 ctx->authkeylen = keys.authkeylen;
9c4a7965
KP
851
852 return 0;
853
854badkey:
855 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
856 return -EINVAL;
857}
858
859/*
56af8cd4 860 * talitos_edesc - s/w-extended descriptor
79fd31d3 861 * @assoc_nents: number of segments in associated data scatterlist
9c4a7965
KP
862 * @src_nents: number of segments in input scatterlist
863 * @dst_nents: number of segments in output scatterlist
79fd31d3 864 * @assoc_chained: whether assoc is chained or not
2a1cfe46
HG
865 * @src_chained: whether src is chained or not
866 * @dst_chained: whether dst is chained or not
79fd31d3 867 * @iv_dma: dma address of iv for checking continuity and link table
9c4a7965 868 * @dma_len: length of dma mapped link_tbl space
6f65f6ac 869 * @dma_link_tbl: bus physical address of link_tbl/buf
9c4a7965 870 * @desc: h/w descriptor
6f65f6ac
LC
871 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
872 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
9c4a7965
KP
873 *
874 * if decrypting (with authcheck), or either one of src_nents or dst_nents
875 * is greater than 1, an integrity check value is concatenated to the end
876 * of link_tbl data
877 */
56af8cd4 878struct talitos_edesc {
79fd31d3 879 int assoc_nents;
9c4a7965
KP
880 int src_nents;
881 int dst_nents;
79fd31d3 882 bool assoc_chained;
2a1cfe46
HG
883 bool src_chained;
884 bool dst_chained;
79fd31d3 885 dma_addr_t iv_dma;
9c4a7965
KP
886 int dma_len;
887 dma_addr_t dma_link_tbl;
888 struct talitos_desc desc;
6f65f6ac
LC
889 union {
890 struct talitos_ptr link_tbl[0];
891 u8 buf[0];
892 };
9c4a7965
KP
893};
894
4de9d0b5
LN
895static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
896 unsigned int nents, enum dma_data_direction dir,
2a1cfe46 897 bool chained)
4de9d0b5
LN
898{
899 if (unlikely(chained))
900 while (sg) {
901 dma_map_sg(dev, sg, 1, dir);
5be4d4c9 902 sg = sg_next(sg);
4de9d0b5
LN
903 }
904 else
905 dma_map_sg(dev, sg, nents, dir);
906 return nents;
907}
908
909static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
910 enum dma_data_direction dir)
911{
912 while (sg) {
913 dma_unmap_sg(dev, sg, 1, dir);
5be4d4c9 914 sg = sg_next(sg);
4de9d0b5
LN
915 }
916}
917
918static void talitos_sg_unmap(struct device *dev,
919 struct talitos_edesc *edesc,
920 struct scatterlist *src,
921 struct scatterlist *dst)
922{
923 unsigned int src_nents = edesc->src_nents ? : 1;
924 unsigned int dst_nents = edesc->dst_nents ? : 1;
925
926 if (src != dst) {
2a1cfe46 927 if (edesc->src_chained)
4de9d0b5
LN
928 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
929 else
930 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
931
497f2e6b 932 if (dst) {
2a1cfe46 933 if (edesc->dst_chained)
497f2e6b
LN
934 talitos_unmap_sg_chain(dev, dst,
935 DMA_FROM_DEVICE);
936 else
937 dma_unmap_sg(dev, dst, dst_nents,
938 DMA_FROM_DEVICE);
939 }
4de9d0b5 940 } else
2a1cfe46 941 if (edesc->src_chained)
4de9d0b5
LN
942 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
943 else
944 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
945}
946
9c4a7965 947static void ipsec_esp_unmap(struct device *dev,
56af8cd4 948 struct talitos_edesc *edesc,
9c4a7965
KP
949 struct aead_request *areq)
950{
951 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
952 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
953 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
954 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
955
79fd31d3
HG
956 if (edesc->assoc_chained)
957 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
935e99a3 958 else if (areq->assoclen)
79fd31d3
HG
959 /* assoc_nents counts also for IV in non-contiguous cases */
960 dma_unmap_sg(dev, areq->assoc,
961 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
962 DMA_TO_DEVICE);
9c4a7965 963
4de9d0b5 964 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
9c4a7965
KP
965
966 if (edesc->dma_len)
967 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
968 DMA_BIDIRECTIONAL);
969}
970
971/*
972 * ipsec_esp descriptor callbacks
973 */
974static void ipsec_esp_encrypt_done(struct device *dev,
975 struct talitos_desc *desc, void *context,
976 int err)
977{
978 struct aead_request *areq = context;
9c4a7965
KP
979 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
980 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
19bbbc63 981 struct talitos_edesc *edesc;
9c4a7965
KP
982 struct scatterlist *sg;
983 void *icvdata;
984
19bbbc63
KP
985 edesc = container_of(desc, struct talitos_edesc, desc);
986
9c4a7965
KP
987 ipsec_esp_unmap(dev, edesc, areq);
988
989 /* copy the generated ICV to dst */
60542505 990 if (edesc->dst_nents) {
9c4a7965 991 icvdata = &edesc->link_tbl[edesc->src_nents +
79fd31d3
HG
992 edesc->dst_nents + 2 +
993 edesc->assoc_nents];
9c4a7965
KP
994 sg = sg_last(areq->dst, edesc->dst_nents);
995 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
996 icvdata, ctx->authsize);
997 }
998
999 kfree(edesc);
1000
1001 aead_request_complete(areq, err);
1002}
1003
fe5720e2 1004static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
1005 struct talitos_desc *desc,
1006 void *context, int err)
9c4a7965
KP
1007{
1008 struct aead_request *req = context;
9c4a7965
KP
1009 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1010 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
19bbbc63 1011 struct talitos_edesc *edesc;
9c4a7965
KP
1012 struct scatterlist *sg;
1013 void *icvdata;
1014
19bbbc63
KP
1015 edesc = container_of(desc, struct talitos_edesc, desc);
1016
9c4a7965
KP
1017 ipsec_esp_unmap(dev, edesc, req);
1018
1019 if (!err) {
1020 /* auth check */
1021 if (edesc->dma_len)
1022 icvdata = &edesc->link_tbl[edesc->src_nents +
79fd31d3
HG
1023 edesc->dst_nents + 2 +
1024 edesc->assoc_nents];
9c4a7965
KP
1025 else
1026 icvdata = &edesc->link_tbl[0];
1027
1028 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1029 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
1030 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
1031 }
1032
1033 kfree(edesc);
1034
1035 aead_request_complete(req, err);
1036}
1037
fe5720e2 1038static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
1039 struct talitos_desc *desc,
1040 void *context, int err)
fe5720e2
KP
1041{
1042 struct aead_request *req = context;
19bbbc63
KP
1043 struct talitos_edesc *edesc;
1044
1045 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
1046
1047 ipsec_esp_unmap(dev, edesc, req);
1048
1049 /* check ICV auth status */
e938e465
KP
1050 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1051 DESC_HDR_LO_ICCR1_PASS))
1052 err = -EBADMSG;
fe5720e2
KP
1053
1054 kfree(edesc);
1055
1056 aead_request_complete(req, err);
1057}
1058
9c4a7965
KP
1059/*
1060 * convert scatterlist to SEC h/w link table format
1061 * stop at cryptlen bytes
1062 */
70bcaca7 1063static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
9c4a7965
KP
1064 int cryptlen, struct talitos_ptr *link_tbl_ptr)
1065{
70bcaca7
LN
1066 int n_sg = sg_count;
1067
1068 while (n_sg--) {
922f9dc8 1069 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
9c4a7965
KP
1070 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
1071 link_tbl_ptr->j_extent = 0;
1072 link_tbl_ptr++;
1073 cryptlen -= sg_dma_len(sg);
5be4d4c9 1074 sg = sg_next(sg);
9c4a7965
KP
1075 }
1076
70bcaca7 1077 /* adjust (decrease) last one (or two) entry's len to cryptlen */
9c4a7965 1078 link_tbl_ptr--;
c0e741d4 1079 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
70bcaca7
LN
1080 /* Empty this entry, and move to previous one */
1081 cryptlen += be16_to_cpu(link_tbl_ptr->len);
1082 link_tbl_ptr->len = 0;
1083 sg_count--;
1084 link_tbl_ptr--;
1085 }
7291a932 1086 be16_add_cpu(&link_tbl_ptr->len, cryptlen);
9c4a7965
KP
1087
1088 /* tag end of link table */
1089 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
70bcaca7
LN
1090
1091 return sg_count;
9c4a7965
KP
1092}
1093
1094/*
1095 * fill in and submit ipsec_esp descriptor
1096 */
56af8cd4 1097static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
79fd31d3
HG
1098 u64 seq, void (*callback) (struct device *dev,
1099 struct talitos_desc *desc,
1100 void *context, int error))
9c4a7965
KP
1101{
1102 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1103 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1104 struct device *dev = ctx->dev;
1105 struct talitos_desc *desc = &edesc->desc;
1106 unsigned int cryptlen = areq->cryptlen;
1107 unsigned int authsize = ctx->authsize;
e41256f1 1108 unsigned int ivsize = crypto_aead_ivsize(aead);
fa86a267 1109 int sg_count, ret;
fe5720e2 1110 int sg_link_tbl_len;
9c4a7965
KP
1111
1112 /* hmac key */
1113 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
a2b35aa8 1114 DMA_TO_DEVICE);
79fd31d3 1115
9c4a7965 1116 /* hmac data */
79fd31d3
HG
1117 desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
1118 if (edesc->assoc_nents) {
1119 int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
1120 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1121
1122 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
922f9dc8 1123 sizeof(struct talitos_ptr), 0);
79fd31d3
HG
1124 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1125
1126 /* assoc_nents - 1 entries for assoc, 1 for IV */
1127 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
1128 areq->assoclen, tbl_ptr);
1129
1130 /* add IV to link table */
1131 tbl_ptr += sg_count - 1;
1132 tbl_ptr->j_extent = 0;
1133 tbl_ptr++;
922f9dc8 1134 to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
79fd31d3
HG
1135 tbl_ptr->len = cpu_to_be16(ivsize);
1136 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1137
1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 edesc->dma_len, DMA_BIDIRECTIONAL);
1140 } else {
935e99a3
HG
1141 if (areq->assoclen)
1142 to_talitos_ptr(&desc->ptr[1],
922f9dc8 1143 sg_dma_address(areq->assoc), 0);
935e99a3 1144 else
922f9dc8 1145 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
79fd31d3
HG
1146 desc->ptr[1].j_extent = 0;
1147 }
1148
9c4a7965 1149 /* cipher iv */
922f9dc8 1150 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
79fd31d3
HG
1151 desc->ptr[2].len = cpu_to_be16(ivsize);
1152 desc->ptr[2].j_extent = 0;
1153 /* Sync needed for the aead_givencrypt case */
1154 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
9c4a7965
KP
1155
1156 /* cipher key */
1157 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
a2b35aa8 1158 (char *)&ctx->key + ctx->authkeylen,
9c4a7965
KP
1159 DMA_TO_DEVICE);
1160
1161 /*
1162 * cipher in
1163 * map and adjust cipher len to aead request cryptlen.
1164 * extent is bytes of HMAC postpended to ciphertext,
1165 * typically 12 for ipsec
1166 */
1167 desc->ptr[4].len = cpu_to_be16(cryptlen);
1168 desc->ptr[4].j_extent = authsize;
1169
e938e465
KP
1170 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1171 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1172 : DMA_TO_DEVICE,
2a1cfe46 1173 edesc->src_chained);
9c4a7965
KP
1174
1175 if (sg_count == 1) {
922f9dc8 1176 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
9c4a7965 1177 } else {
fe5720e2
KP
1178 sg_link_tbl_len = cryptlen;
1179
962a9c99 1180 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
fe5720e2 1181 sg_link_tbl_len = cryptlen + authsize;
e938e465 1182
fe5720e2 1183 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
70bcaca7
LN
1184 &edesc->link_tbl[0]);
1185 if (sg_count > 1) {
1186 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
922f9dc8 1187 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
e938e465
KP
1188 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1189 edesc->dma_len,
1190 DMA_BIDIRECTIONAL);
70bcaca7
LN
1191 } else {
1192 /* Only one segment now, so no link tbl needed */
81eb024c 1193 to_talitos_ptr(&desc->ptr[4],
922f9dc8 1194 sg_dma_address(areq->src), 0);
70bcaca7 1195 }
9c4a7965
KP
1196 }
1197
1198 /* cipher out */
1199 desc->ptr[5].len = cpu_to_be16(cryptlen);
1200 desc->ptr[5].j_extent = authsize;
1201
e938e465 1202 if (areq->src != areq->dst)
4de9d0b5
LN
1203 sg_count = talitos_map_sg(dev, areq->dst,
1204 edesc->dst_nents ? : 1,
2a1cfe46 1205 DMA_FROM_DEVICE, edesc->dst_chained);
9c4a7965
KP
1206
1207 if (sg_count == 1) {
922f9dc8 1208 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
9c4a7965 1209 } else {
79fd31d3
HG
1210 int tbl_off = edesc->src_nents + 1;
1211 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
9c4a7965 1212
81eb024c 1213 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
922f9dc8 1214 tbl_off * sizeof(struct talitos_ptr), 0);
fe5720e2 1215 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
79fd31d3 1216 tbl_ptr);
fe5720e2 1217
f3c85bc1 1218 /* Add an entry to the link table for ICV data */
79fd31d3
HG
1219 tbl_ptr += sg_count - 1;
1220 tbl_ptr->j_extent = 0;
1221 tbl_ptr++;
1222 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1223 tbl_ptr->len = cpu_to_be16(authsize);
9c4a7965
KP
1224
1225 /* icv data follows link tables */
79fd31d3
HG
1226 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1227 (tbl_off + edesc->dst_nents + 1 +
1228 edesc->assoc_nents) *
922f9dc8 1229 sizeof(struct talitos_ptr), 0);
9c4a7965
KP
1230 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1231 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1232 edesc->dma_len, DMA_BIDIRECTIONAL);
1233 }
1234
1235 /* iv out */
a2b35aa8 1236 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
9c4a7965
KP
1237 DMA_FROM_DEVICE);
1238
5228f0f7 1239 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1240 if (ret != -EINPROGRESS) {
1241 ipsec_esp_unmap(dev, edesc, areq);
1242 kfree(edesc);
1243 }
1244 return ret;
9c4a7965
KP
1245}
1246
9c4a7965
KP
1247/*
1248 * derive number of elements in scatterlist
1249 */
2a1cfe46 1250static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
9c4a7965
KP
1251{
1252 struct scatterlist *sg = sg_list;
1253 int sg_nents = 0;
1254
2a1cfe46 1255 *chained = false;
4de9d0b5 1256 while (nbytes > 0) {
9c4a7965
KP
1257 sg_nents++;
1258 nbytes -= sg->length;
4de9d0b5 1259 if (!sg_is_last(sg) && (sg + 1)->length == 0)
2a1cfe46 1260 *chained = true;
5be4d4c9 1261 sg = sg_next(sg);
9c4a7965
KP
1262 }
1263
1264 return sg_nents;
1265}
1266
1267/*
56af8cd4 1268 * allocate and map the extended descriptor
9c4a7965 1269 */
4de9d0b5 1270static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
79fd31d3 1271 struct scatterlist *assoc,
4de9d0b5
LN
1272 struct scatterlist *src,
1273 struct scatterlist *dst,
79fd31d3
HG
1274 u8 *iv,
1275 unsigned int assoclen,
4de9d0b5
LN
1276 unsigned int cryptlen,
1277 unsigned int authsize,
79fd31d3 1278 unsigned int ivsize,
4de9d0b5 1279 int icv_stashing,
62293a37
HG
1280 u32 cryptoflags,
1281 bool encrypt)
9c4a7965 1282{
56af8cd4 1283 struct talitos_edesc *edesc;
79fd31d3
HG
1284 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1285 bool assoc_chained = false, src_chained = false, dst_chained = false;
1286 dma_addr_t iv_dma = 0;
4de9d0b5 1287 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1288 GFP_ATOMIC;
6f65f6ac
LC
1289 struct talitos_private *priv = dev_get_drvdata(dev);
1290 bool is_sec1 = has_ftr_sec1(priv);
1291 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
9c4a7965 1292
6f65f6ac 1293 if (cryptlen + authsize > max_len) {
4de9d0b5 1294 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1295 return ERR_PTR(-EINVAL);
1296 }
1297
935e99a3 1298 if (ivsize)
79fd31d3
HG
1299 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1300
935e99a3 1301 if (assoclen) {
79fd31d3
HG
1302 /*
1303 * Currently it is assumed that iv is provided whenever assoc
1304 * is.
1305 */
1306 BUG_ON(!iv);
1307
1308 assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1309 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1310 assoc_chained);
1311 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1312
1313 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1314 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1315 }
1316
62293a37
HG
1317 if (!dst || dst == src) {
1318 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1319 src_nents = (src_nents == 1) ? 0 : src_nents;
1320 dst_nents = dst ? src_nents : 0;
1321 } else { /* dst && dst != src*/
1322 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1323 &src_chained);
1324 src_nents = (src_nents == 1) ? 0 : src_nents;
1325 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1326 &dst_chained);
1327 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
9c4a7965
KP
1328 }
1329
1330 /*
1331 * allocate space for base edesc plus the link tables,
f3c85bc1 1332 * allowing for two separate entries for ICV and generated ICV (+ 2),
9c4a7965
KP
1333 * and the ICV data itself
1334 */
56af8cd4 1335 alloc_len = sizeof(struct talitos_edesc);
79fd31d3 1336 if (assoc_nents || src_nents || dst_nents) {
6f65f6ac
LC
1337 if (is_sec1)
1338 dma_len = src_nents ? cryptlen : 0 +
1339 dst_nents ? cryptlen : 0;
1340 else
1341 dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1342 sizeof(struct talitos_ptr) + authsize;
9c4a7965
KP
1343 alloc_len += dma_len;
1344 } else {
1345 dma_len = 0;
4de9d0b5 1346 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1347 }
1348
586725f8 1349 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1350 if (!edesc) {
935e99a3
HG
1351 if (assoc_chained)
1352 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1353 else if (assoclen)
1354 dma_unmap_sg(dev, assoc,
1355 assoc_nents ? assoc_nents - 1 : 1,
1356 DMA_TO_DEVICE);
1357
79fd31d3
HG
1358 if (iv_dma)
1359 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
935e99a3 1360
4de9d0b5 1361 dev_err(dev, "could not allocate edescriptor\n");
9c4a7965
KP
1362 return ERR_PTR(-ENOMEM);
1363 }
1364
79fd31d3 1365 edesc->assoc_nents = assoc_nents;
9c4a7965
KP
1366 edesc->src_nents = src_nents;
1367 edesc->dst_nents = dst_nents;
79fd31d3 1368 edesc->assoc_chained = assoc_chained;
2a1cfe46
HG
1369 edesc->src_chained = src_chained;
1370 edesc->dst_chained = dst_chained;
79fd31d3 1371 edesc->iv_dma = iv_dma;
9c4a7965 1372 edesc->dma_len = dma_len;
497f2e6b
LN
1373 if (dma_len)
1374 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1375 edesc->dma_len,
1376 DMA_BIDIRECTIONAL);
9c4a7965
KP
1377
1378 return edesc;
1379}
1380
79fd31d3 1381static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
62293a37 1382 int icv_stashing, bool encrypt)
4de9d0b5
LN
1383{
1384 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1385 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
79fd31d3 1386 unsigned int ivsize = crypto_aead_ivsize(authenc);
4de9d0b5 1387
79fd31d3
HG
1388 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1389 iv, areq->assoclen, areq->cryptlen,
1390 ctx->authsize, ivsize, icv_stashing,
62293a37 1391 areq->base.flags, encrypt);
4de9d0b5
LN
1392}
1393
56af8cd4 1394static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1395{
1396 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1397 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1398 struct talitos_edesc *edesc;
9c4a7965
KP
1399
1400 /* allocate extended descriptor */
62293a37 1401 edesc = aead_edesc_alloc(req, req->iv, 0, true);
9c4a7965
KP
1402 if (IS_ERR(edesc))
1403 return PTR_ERR(edesc);
1404
1405 /* set encrypt */
70bcaca7 1406 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965 1407
79fd31d3 1408 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
9c4a7965
KP
1409}
1410
56af8cd4 1411static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1412{
1413 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1414 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1415 unsigned int authsize = ctx->authsize;
fe5720e2 1416 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1417 struct talitos_edesc *edesc;
9c4a7965
KP
1418 struct scatterlist *sg;
1419 void *icvdata;
1420
1421 req->cryptlen -= authsize;
1422
1423 /* allocate extended descriptor */
62293a37 1424 edesc = aead_edesc_alloc(req, req->iv, 1, false);
9c4a7965
KP
1425 if (IS_ERR(edesc))
1426 return PTR_ERR(edesc);
1427
fe5720e2 1428 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1429 ((!edesc->src_nents && !edesc->dst_nents) ||
1430 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1431
fe5720e2 1432 /* decrypt and check the ICV */
e938e465
KP
1433 edesc->desc.hdr = ctx->desc_hdr_template |
1434 DESC_HDR_DIR_INBOUND |
fe5720e2 1435 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1436
fe5720e2
KP
1437 /* reset integrity check result bits */
1438 edesc->desc.hdr_lo = 0;
9c4a7965 1439
79fd31d3 1440 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
e938e465 1441 }
fe5720e2 1442
e938e465
KP
1443 /* Have to check the ICV with software */
1444 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1445
e938e465
KP
1446 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1447 if (edesc->dma_len)
1448 icvdata = &edesc->link_tbl[edesc->src_nents +
79fd31d3
HG
1449 edesc->dst_nents + 2 +
1450 edesc->assoc_nents];
e938e465
KP
1451 else
1452 icvdata = &edesc->link_tbl[0];
fe5720e2 1453
e938e465 1454 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1455
e938e465
KP
1456 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1457 ctx->authsize);
fe5720e2 1458
79fd31d3 1459 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1460}
1461
56af8cd4 1462static int aead_givencrypt(struct aead_givcrypt_request *req)
9c4a7965
KP
1463{
1464 struct aead_request *areq = &req->areq;
1465 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1466 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1467 struct talitos_edesc *edesc;
9c4a7965
KP
1468
1469 /* allocate extended descriptor */
62293a37 1470 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
9c4a7965
KP
1471 if (IS_ERR(edesc))
1472 return PTR_ERR(edesc);
1473
1474 /* set encrypt */
70bcaca7 1475 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965
KP
1476
1477 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
ba95487d
KP
1478 /* avoid consecutive packets going out with same IV */
1479 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
9c4a7965 1480
79fd31d3 1481 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
9c4a7965
KP
1482}
1483
4de9d0b5
LN
1484static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1485 const u8 *key, unsigned int keylen)
1486{
1487 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
4de9d0b5
LN
1488
1489 memcpy(&ctx->key, key, keylen);
1490 ctx->keylen = keylen;
1491
1492 return 0;
4de9d0b5
LN
1493}
1494
032d197e
LC
1495static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1496 struct scatterlist *dst, unsigned int len,
1497 struct talitos_edesc *edesc)
1498{
6f65f6ac
LC
1499 struct talitos_private *priv = dev_get_drvdata(dev);
1500 bool is_sec1 = has_ftr_sec1(priv);
1501
1502 if (is_sec1) {
1503 if (!edesc->src_nents) {
1504 dma_unmap_sg(dev, src, 1,
1505 dst != src ? DMA_TO_DEVICE
1506 : DMA_BIDIRECTIONAL);
1507 }
1508 if (dst && edesc->dst_nents) {
1509 dma_sync_single_for_device(dev,
1510 edesc->dma_link_tbl + len,
1511 len, DMA_FROM_DEVICE);
1512 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1513 edesc->buf + len, len);
1514 } else if (dst && dst != src) {
1515 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1516 }
1517 } else {
1518 talitos_sg_unmap(dev, edesc, src, dst);
1519 }
032d197e
LC
1520}
1521
4de9d0b5
LN
1522static void common_nonsnoop_unmap(struct device *dev,
1523 struct talitos_edesc *edesc,
1524 struct ablkcipher_request *areq)
1525{
1526 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
032d197e
LC
1527
1528 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
4de9d0b5
LN
1529 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1530 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1531
4de9d0b5
LN
1532 if (edesc->dma_len)
1533 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1534 DMA_BIDIRECTIONAL);
1535}
1536
1537static void ablkcipher_done(struct device *dev,
1538 struct talitos_desc *desc, void *context,
1539 int err)
1540{
1541 struct ablkcipher_request *areq = context;
19bbbc63
KP
1542 struct talitos_edesc *edesc;
1543
1544 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1545
1546 common_nonsnoop_unmap(dev, edesc, areq);
1547
1548 kfree(edesc);
1549
1550 areq->base.complete(&areq->base, err);
1551}
1552
032d197e
LC
1553int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1554 unsigned int len, struct talitos_edesc *edesc,
1555 enum dma_data_direction dir, struct talitos_ptr *ptr)
1556{
1557 int sg_count;
922f9dc8
LC
1558 struct talitos_private *priv = dev_get_drvdata(dev);
1559 bool is_sec1 = has_ftr_sec1(priv);
032d197e 1560
922f9dc8 1561 to_talitos_ptr_len(ptr, len, is_sec1);
032d197e 1562
6f65f6ac
LC
1563 if (is_sec1) {
1564 sg_count = edesc->src_nents ? : 1;
032d197e 1565
6f65f6ac
LC
1566 if (sg_count == 1) {
1567 dma_map_sg(dev, src, 1, dir);
1568 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
032d197e 1569 } else {
6f65f6ac
LC
1570 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1571 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1572 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1573 len, DMA_TO_DEVICE);
1574 }
1575 } else {
1576 to_talitos_ptr_extent_clear(ptr, is_sec1);
1577
1578 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1579 edesc->src_chained);
1580
1581 if (sg_count == 1) {
922f9dc8 1582 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
6f65f6ac
LC
1583 } else {
1584 sg_count = sg_to_link_tbl(src, sg_count, len,
1585 &edesc->link_tbl[0]);
1586 if (sg_count > 1) {
1587 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1588 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1589 dma_sync_single_for_device(dev,
1590 edesc->dma_link_tbl,
1591 edesc->dma_len,
1592 DMA_BIDIRECTIONAL);
1593 } else {
1594 /* Only one segment now, so no link tbl needed*/
1595 to_talitos_ptr(ptr, sg_dma_address(src),
1596 is_sec1);
1597 }
032d197e
LC
1598 }
1599 }
1600 return sg_count;
1601}
1602
1603void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1604 unsigned int len, struct talitos_edesc *edesc,
1605 enum dma_data_direction dir,
1606 struct talitos_ptr *ptr, int sg_count)
1607{
922f9dc8
LC
1608 struct talitos_private *priv = dev_get_drvdata(dev);
1609 bool is_sec1 = has_ftr_sec1(priv);
1610
032d197e
LC
1611 if (dir != DMA_NONE)
1612 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1613 dir, edesc->dst_chained);
1614
6f65f6ac
LC
1615 to_talitos_ptr_len(ptr, len, is_sec1);
1616
1617 if (is_sec1) {
1618 if (sg_count == 1) {
1619 if (dir != DMA_NONE)
1620 dma_map_sg(dev, dst, 1, dir);
1621 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1622 } else {
1623 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1624 dma_sync_single_for_device(dev,
1625 edesc->dma_link_tbl + len,
1626 len, DMA_FROM_DEVICE);
1627 }
032d197e 1628 } else {
6f65f6ac
LC
1629 to_talitos_ptr_extent_clear(ptr, is_sec1);
1630
1631 if (sg_count == 1) {
1632 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1633 } else {
1634 struct talitos_ptr *link_tbl_ptr =
1635 &edesc->link_tbl[edesc->src_nents + 1];
1636
1637 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1638 (edesc->src_nents + 1) *
1639 sizeof(struct talitos_ptr), 0);
1640 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1641 sg_count = sg_to_link_tbl(dst, sg_count, len,
1642 link_tbl_ptr);
1643 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1644 edesc->dma_len,
1645 DMA_BIDIRECTIONAL);
1646 }
032d197e
LC
1647 }
1648}
1649
4de9d0b5
LN
1650static int common_nonsnoop(struct talitos_edesc *edesc,
1651 struct ablkcipher_request *areq,
4de9d0b5
LN
1652 void (*callback) (struct device *dev,
1653 struct talitos_desc *desc,
1654 void *context, int error))
1655{
1656 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1657 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1658 struct device *dev = ctx->dev;
1659 struct talitos_desc *desc = &edesc->desc;
1660 unsigned int cryptlen = areq->nbytes;
79fd31d3 1661 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1662 int sg_count, ret;
922f9dc8
LC
1663 struct talitos_private *priv = dev_get_drvdata(dev);
1664 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
1665
1666 /* first DWORD empty */
2529bc37 1667 desc->ptr[0] = zero_entry;
4de9d0b5
LN
1668
1669 /* cipher iv */
922f9dc8
LC
1670 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1671 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1672 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
4de9d0b5
LN
1673
1674 /* cipher key */
1675 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1676 (char *)&ctx->key, DMA_TO_DEVICE);
4de9d0b5
LN
1677
1678 /*
1679 * cipher in
1680 */
032d197e
LC
1681 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1682 (areq->src == areq->dst) ?
1683 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1684 &desc->ptr[3]);
4de9d0b5
LN
1685
1686 /* cipher out */
032d197e
LC
1687 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1688 (areq->src == areq->dst) ? DMA_NONE
1689 : DMA_FROM_DEVICE,
1690 &desc->ptr[4], sg_count);
4de9d0b5
LN
1691
1692 /* iv out */
a2b35aa8 1693 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
4de9d0b5
LN
1694 DMA_FROM_DEVICE);
1695
1696 /* last DWORD empty */
2529bc37 1697 desc->ptr[6] = zero_entry;
4de9d0b5 1698
5228f0f7 1699 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1700 if (ret != -EINPROGRESS) {
1701 common_nonsnoop_unmap(dev, edesc, areq);
1702 kfree(edesc);
1703 }
1704 return ret;
1705}
1706
e938e465 1707static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
62293a37 1708 areq, bool encrypt)
4de9d0b5
LN
1709{
1710 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1711 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
79fd31d3 1712 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1713
79fd31d3
HG
1714 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1715 areq->info, 0, areq->nbytes, 0, ivsize, 0,
62293a37 1716 areq->base.flags, encrypt);
4de9d0b5
LN
1717}
1718
1719static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1720{
1721 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1722 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1723 struct talitos_edesc *edesc;
1724
1725 /* allocate extended descriptor */
62293a37 1726 edesc = ablkcipher_edesc_alloc(areq, true);
4de9d0b5
LN
1727 if (IS_ERR(edesc))
1728 return PTR_ERR(edesc);
1729
1730 /* set encrypt */
1731 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1732
febec542 1733 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1734}
1735
1736static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1737{
1738 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1739 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1740 struct talitos_edesc *edesc;
1741
1742 /* allocate extended descriptor */
62293a37 1743 edesc = ablkcipher_edesc_alloc(areq, false);
4de9d0b5
LN
1744 if (IS_ERR(edesc))
1745 return PTR_ERR(edesc);
1746
1747 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1748
febec542 1749 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1750}
1751
497f2e6b
LN
1752static void common_nonsnoop_hash_unmap(struct device *dev,
1753 struct talitos_edesc *edesc,
1754 struct ahash_request *areq)
1755{
1756 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
922f9dc8
LC
1757 struct talitos_private *priv = dev_get_drvdata(dev);
1758 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1759
1760 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1761
032d197e
LC
1762 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1763
497f2e6b 1764 /* When using hashctx-in, must unmap it. */
922f9dc8 1765 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
497f2e6b
LN
1766 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1767 DMA_TO_DEVICE);
1768
922f9dc8 1769 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
497f2e6b
LN
1770 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1771 DMA_TO_DEVICE);
1772
497f2e6b
LN
1773 if (edesc->dma_len)
1774 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1775 DMA_BIDIRECTIONAL);
1776
1777}
1778
1779static void ahash_done(struct device *dev,
1780 struct talitos_desc *desc, void *context,
1781 int err)
1782{
1783 struct ahash_request *areq = context;
1784 struct talitos_edesc *edesc =
1785 container_of(desc, struct talitos_edesc, desc);
1786 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1787
1788 if (!req_ctx->last && req_ctx->to_hash_later) {
1789 /* Position any partial block for next update/final/finup */
1790 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1791 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1792 }
1793 common_nonsnoop_hash_unmap(dev, edesc, areq);
1794
1795 kfree(edesc);
1796
1797 areq->base.complete(&areq->base, err);
1798}
1799
2d02905e
LC
1800/*
1801 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1802 * ourself and submit a padded block
1803 */
1804void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1805 struct talitos_edesc *edesc,
1806 struct talitos_ptr *ptr)
1807{
1808 static u8 padded_hash[64] = {
1809 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1810 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1811 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1812 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1813 };
1814
1815 pr_err_once("Bug in SEC1, padding ourself\n");
1816 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1817 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1818 (char *)padded_hash, DMA_TO_DEVICE);
1819}
1820
497f2e6b
LN
1821static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1822 struct ahash_request *areq, unsigned int length,
1823 void (*callback) (struct device *dev,
1824 struct talitos_desc *desc,
1825 void *context, int error))
1826{
1827 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1828 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1829 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1830 struct device *dev = ctx->dev;
1831 struct talitos_desc *desc = &edesc->desc;
032d197e 1832 int ret;
922f9dc8
LC
1833 struct talitos_private *priv = dev_get_drvdata(dev);
1834 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1835
1836 /* first DWORD empty */
1837 desc->ptr[0] = zero_entry;
1838
60f208d7
KP
1839 /* hash context in */
1840 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1841 map_single_talitos_ptr(dev, &desc->ptr[1],
1842 req_ctx->hw_context_size,
a2b35aa8 1843 (char *)req_ctx->hw_context,
497f2e6b 1844 DMA_TO_DEVICE);
60f208d7 1845 req_ctx->swinit = 0;
497f2e6b
LN
1846 } else {
1847 desc->ptr[1] = zero_entry;
1848 /* Indicate next op is not the first. */
1849 req_ctx->first = 0;
1850 }
1851
1852 /* HMAC key */
1853 if (ctx->keylen)
1854 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1855 (char *)&ctx->key, DMA_TO_DEVICE);
497f2e6b
LN
1856 else
1857 desc->ptr[2] = zero_entry;
1858
1859 /*
1860 * data in
1861 */
032d197e
LC
1862 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1863 DMA_TO_DEVICE, &desc->ptr[3]);
497f2e6b
LN
1864
1865 /* fifth DWORD empty */
1866 desc->ptr[4] = zero_entry;
1867
1868 /* hash/HMAC out -or- hash context out */
1869 if (req_ctx->last)
1870 map_single_talitos_ptr(dev, &desc->ptr[5],
1871 crypto_ahash_digestsize(tfm),
a2b35aa8 1872 areq->result, DMA_FROM_DEVICE);
497f2e6b
LN
1873 else
1874 map_single_talitos_ptr(dev, &desc->ptr[5],
1875 req_ctx->hw_context_size,
a2b35aa8 1876 req_ctx->hw_context, DMA_FROM_DEVICE);
497f2e6b
LN
1877
1878 /* last DWORD empty */
1879 desc->ptr[6] = zero_entry;
1880
2d02905e
LC
1881 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1882 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1883
5228f0f7 1884 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1885 if (ret != -EINPROGRESS) {
1886 common_nonsnoop_hash_unmap(dev, edesc, areq);
1887 kfree(edesc);
1888 }
1889 return ret;
1890}
1891
1892static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1893 unsigned int nbytes)
1894{
1895 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1896 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1897 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1898
79fd31d3 1899 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
62293a37 1900 nbytes, 0, 0, 0, areq->base.flags, false);
497f2e6b
LN
1901}
1902
1903static int ahash_init(struct ahash_request *areq)
1904{
1905 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1906 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1907
1908 /* Initialize the context */
5e833bc4 1909 req_ctx->nbuf = 0;
60f208d7
KP
1910 req_ctx->first = 1; /* first indicates h/w must init its context */
1911 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1912 req_ctx->hw_context_size =
1913 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1914 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1915 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1916
1917 return 0;
1918}
1919
60f208d7
KP
1920/*
1921 * on h/w without explicit sha224 support, we initialize h/w context
1922 * manually with sha224 constants, and tell it to run sha256.
1923 */
1924static int ahash_init_sha224_swinit(struct ahash_request *areq)
1925{
1926 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1927
1928 ahash_init(areq);
1929 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1930
a752447a
KP
1931 req_ctx->hw_context[0] = SHA224_H0;
1932 req_ctx->hw_context[1] = SHA224_H1;
1933 req_ctx->hw_context[2] = SHA224_H2;
1934 req_ctx->hw_context[3] = SHA224_H3;
1935 req_ctx->hw_context[4] = SHA224_H4;
1936 req_ctx->hw_context[5] = SHA224_H5;
1937 req_ctx->hw_context[6] = SHA224_H6;
1938 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1939
1940 /* init 64-bit count */
1941 req_ctx->hw_context[8] = 0;
1942 req_ctx->hw_context[9] = 0;
1943
1944 return 0;
1945}
1946
497f2e6b
LN
1947static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1948{
1949 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1950 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1951 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1952 struct talitos_edesc *edesc;
1953 unsigned int blocksize =
1954 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1955 unsigned int nbytes_to_hash;
1956 unsigned int to_hash_later;
5e833bc4 1957 unsigned int nsg;
2a1cfe46 1958 bool chained;
497f2e6b 1959
5e833bc4
LN
1960 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1961 /* Buffer up to one whole block */
497f2e6b
LN
1962 sg_copy_to_buffer(areq->src,
1963 sg_count(areq->src, nbytes, &chained),
5e833bc4
LN
1964 req_ctx->buf + req_ctx->nbuf, nbytes);
1965 req_ctx->nbuf += nbytes;
497f2e6b
LN
1966 return 0;
1967 }
1968
5e833bc4
LN
1969 /* At least (blocksize + 1) bytes are available to hash */
1970 nbytes_to_hash = nbytes + req_ctx->nbuf;
1971 to_hash_later = nbytes_to_hash & (blocksize - 1);
1972
1973 if (req_ctx->last)
1974 to_hash_later = 0;
1975 else if (to_hash_later)
1976 /* There is a partial block. Hash the full block(s) now */
1977 nbytes_to_hash -= to_hash_later;
1978 else {
1979 /* Keep one block buffered */
1980 nbytes_to_hash -= blocksize;
1981 to_hash_later = blocksize;
1982 }
1983
1984 /* Chain in any previously buffered data */
1985 if (req_ctx->nbuf) {
1986 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1987 sg_init_table(req_ctx->bufsl, nsg);
1988 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1989 if (nsg > 1)
1990 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1991 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1992 } else
497f2e6b 1993 req_ctx->psrc = areq->src;
5e833bc4
LN
1994
1995 if (to_hash_later) {
1996 int nents = sg_count(areq->src, nbytes, &chained);
d0525723 1997 sg_pcopy_to_buffer(areq->src, nents,
5e833bc4
LN
1998 req_ctx->bufnext,
1999 to_hash_later,
2000 nbytes - to_hash_later);
497f2e6b 2001 }
5e833bc4 2002 req_ctx->to_hash_later = to_hash_later;
497f2e6b 2003
5e833bc4 2004 /* Allocate extended descriptor */
497f2e6b
LN
2005 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2006 if (IS_ERR(edesc))
2007 return PTR_ERR(edesc);
2008
2009 edesc->desc.hdr = ctx->desc_hdr_template;
2010
2011 /* On last one, request SEC to pad; otherwise continue */
2012 if (req_ctx->last)
2013 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2014 else
2015 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2016
60f208d7
KP
2017 /* request SEC to INIT hash. */
2018 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
2019 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2020
2021 /* When the tfm context has a keylen, it's an HMAC.
2022 * A first or last (ie. not middle) descriptor must request HMAC.
2023 */
2024 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2025 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2026
2027 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
2028 ahash_done);
2029}
2030
2031static int ahash_update(struct ahash_request *areq)
2032{
2033 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2034
2035 req_ctx->last = 0;
2036
2037 return ahash_process_req(areq, areq->nbytes);
2038}
2039
2040static int ahash_final(struct ahash_request *areq)
2041{
2042 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2043
2044 req_ctx->last = 1;
2045
2046 return ahash_process_req(areq, 0);
2047}
2048
2049static int ahash_finup(struct ahash_request *areq)
2050{
2051 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2052
2053 req_ctx->last = 1;
2054
2055 return ahash_process_req(areq, areq->nbytes);
2056}
2057
2058static int ahash_digest(struct ahash_request *areq)
2059{
2060 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 2061 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 2062
60f208d7 2063 ahash->init(areq);
497f2e6b
LN
2064 req_ctx->last = 1;
2065
2066 return ahash_process_req(areq, areq->nbytes);
2067}
2068
79b3a418
LN
2069struct keyhash_result {
2070 struct completion completion;
2071 int err;
2072};
2073
2074static void keyhash_complete(struct crypto_async_request *req, int err)
2075{
2076 struct keyhash_result *res = req->data;
2077
2078 if (err == -EINPROGRESS)
2079 return;
2080
2081 res->err = err;
2082 complete(&res->completion);
2083}
2084
2085static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2086 u8 *hash)
2087{
2088 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2089
2090 struct scatterlist sg[1];
2091 struct ahash_request *req;
2092 struct keyhash_result hresult;
2093 int ret;
2094
2095 init_completion(&hresult.completion);
2096
2097 req = ahash_request_alloc(tfm, GFP_KERNEL);
2098 if (!req)
2099 return -ENOMEM;
2100
2101 /* Keep tfm keylen == 0 during hash of the long key */
2102 ctx->keylen = 0;
2103 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2104 keyhash_complete, &hresult);
2105
2106 sg_init_one(&sg[0], key, keylen);
2107
2108 ahash_request_set_crypt(req, sg, hash, keylen);
2109 ret = crypto_ahash_digest(req);
2110 switch (ret) {
2111 case 0:
2112 break;
2113 case -EINPROGRESS:
2114 case -EBUSY:
2115 ret = wait_for_completion_interruptible(
2116 &hresult.completion);
2117 if (!ret)
2118 ret = hresult.err;
2119 break;
2120 default:
2121 break;
2122 }
2123 ahash_request_free(req);
2124
2125 return ret;
2126}
2127
2128static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2129 unsigned int keylen)
2130{
2131 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2132 unsigned int blocksize =
2133 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2134 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2135 unsigned int keysize = keylen;
2136 u8 hash[SHA512_DIGEST_SIZE];
2137 int ret;
2138
2139 if (keylen <= blocksize)
2140 memcpy(ctx->key, key, keysize);
2141 else {
2142 /* Must get the hash of the long key */
2143 ret = keyhash(tfm, key, keylen, hash);
2144
2145 if (ret) {
2146 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2147 return -EINVAL;
2148 }
2149
2150 keysize = digestsize;
2151 memcpy(ctx->key, hash, digestsize);
2152 }
2153
2154 ctx->keylen = keysize;
2155
2156 return 0;
2157}
2158
2159
9c4a7965 2160struct talitos_alg_template {
d5e4aaef
LN
2161 u32 type;
2162 union {
2163 struct crypto_alg crypto;
acbf7c62 2164 struct ahash_alg hash;
d5e4aaef 2165 } alg;
9c4a7965
KP
2166 __be32 desc_hdr_template;
2167};
2168
2169static struct talitos_alg_template driver_algs[] = {
991155ba 2170 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef
LN
2171 { .type = CRYPTO_ALG_TYPE_AEAD,
2172 .alg.crypto = {
56af8cd4
LN
2173 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2174 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
2175 .cra_blocksize = AES_BLOCK_SIZE,
2176 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2177 .cra_aead = {
56af8cd4
LN
2178 .ivsize = AES_BLOCK_SIZE,
2179 .maxauthsize = SHA1_DIGEST_SIZE,
2180 }
2181 },
9c4a7965
KP
2182 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2183 DESC_HDR_SEL0_AESU |
2184 DESC_HDR_MODE0_AESU_CBC |
2185 DESC_HDR_SEL1_MDEUA |
2186 DESC_HDR_MODE1_MDEU_INIT |
2187 DESC_HDR_MODE1_MDEU_PAD |
2188 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 2189 },
d5e4aaef
LN
2190 { .type = CRYPTO_ALG_TYPE_AEAD,
2191 .alg.crypto = {
56af8cd4
LN
2192 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
2193 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
2194 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2195 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2196 .cra_aead = {
56af8cd4
LN
2197 .ivsize = DES3_EDE_BLOCK_SIZE,
2198 .maxauthsize = SHA1_DIGEST_SIZE,
2199 }
2200 },
70bcaca7
LN
2201 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2202 DESC_HDR_SEL0_DEU |
2203 DESC_HDR_MODE0_DEU_CBC |
2204 DESC_HDR_MODE0_DEU_3DES |
2205 DESC_HDR_SEL1_MDEUA |
2206 DESC_HDR_MODE1_MDEU_INIT |
2207 DESC_HDR_MODE1_MDEU_PAD |
2208 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 2209 },
357fb605
HG
2210 { .type = CRYPTO_ALG_TYPE_AEAD,
2211 .alg.crypto = {
2212 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2213 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
2214 .cra_blocksize = AES_BLOCK_SIZE,
2215 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2216 .cra_aead = {
357fb605
HG
2217 .ivsize = AES_BLOCK_SIZE,
2218 .maxauthsize = SHA224_DIGEST_SIZE,
2219 }
2220 },
2221 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2222 DESC_HDR_SEL0_AESU |
2223 DESC_HDR_MODE0_AESU_CBC |
2224 DESC_HDR_SEL1_MDEUA |
2225 DESC_HDR_MODE1_MDEU_INIT |
2226 DESC_HDR_MODE1_MDEU_PAD |
2227 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2228 },
2229 { .type = CRYPTO_ALG_TYPE_AEAD,
2230 .alg.crypto = {
2231 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
2232 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
2233 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2234 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2235 .cra_aead = {
357fb605
HG
2236 .ivsize = DES3_EDE_BLOCK_SIZE,
2237 .maxauthsize = SHA224_DIGEST_SIZE,
2238 }
2239 },
2240 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2241 DESC_HDR_SEL0_DEU |
2242 DESC_HDR_MODE0_DEU_CBC |
2243 DESC_HDR_MODE0_DEU_3DES |
2244 DESC_HDR_SEL1_MDEUA |
2245 DESC_HDR_MODE1_MDEU_INIT |
2246 DESC_HDR_MODE1_MDEU_PAD |
2247 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2248 },
d5e4aaef
LN
2249 { .type = CRYPTO_ALG_TYPE_AEAD,
2250 .alg.crypto = {
56af8cd4
LN
2251 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2252 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2253 .cra_blocksize = AES_BLOCK_SIZE,
2254 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2255 .cra_aead = {
56af8cd4
LN
2256 .ivsize = AES_BLOCK_SIZE,
2257 .maxauthsize = SHA256_DIGEST_SIZE,
2258 }
2259 },
3952f17e
LN
2260 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2261 DESC_HDR_SEL0_AESU |
2262 DESC_HDR_MODE0_AESU_CBC |
2263 DESC_HDR_SEL1_MDEUA |
2264 DESC_HDR_MODE1_MDEU_INIT |
2265 DESC_HDR_MODE1_MDEU_PAD |
2266 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2267 },
d5e4aaef
LN
2268 { .type = CRYPTO_ALG_TYPE_AEAD,
2269 .alg.crypto = {
56af8cd4
LN
2270 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2271 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2272 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2273 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2274 .cra_aead = {
56af8cd4
LN
2275 .ivsize = DES3_EDE_BLOCK_SIZE,
2276 .maxauthsize = SHA256_DIGEST_SIZE,
2277 }
2278 },
3952f17e
LN
2279 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2280 DESC_HDR_SEL0_DEU |
2281 DESC_HDR_MODE0_DEU_CBC |
2282 DESC_HDR_MODE0_DEU_3DES |
2283 DESC_HDR_SEL1_MDEUA |
2284 DESC_HDR_MODE1_MDEU_INIT |
2285 DESC_HDR_MODE1_MDEU_PAD |
2286 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2287 },
d5e4aaef 2288 { .type = CRYPTO_ALG_TYPE_AEAD,
357fb605
HG
2289 .alg.crypto = {
2290 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2291 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2292 .cra_blocksize = AES_BLOCK_SIZE,
2293 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2294 .cra_aead = {
357fb605
HG
2295 .ivsize = AES_BLOCK_SIZE,
2296 .maxauthsize = SHA384_DIGEST_SIZE,
2297 }
2298 },
2299 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2300 DESC_HDR_SEL0_AESU |
2301 DESC_HDR_MODE0_AESU_CBC |
2302 DESC_HDR_SEL1_MDEUB |
2303 DESC_HDR_MODE1_MDEU_INIT |
2304 DESC_HDR_MODE1_MDEU_PAD |
2305 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2306 },
2307 { .type = CRYPTO_ALG_TYPE_AEAD,
2308 .alg.crypto = {
2309 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2310 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2311 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2312 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2313 .cra_aead = {
357fb605
HG
2314 .ivsize = DES3_EDE_BLOCK_SIZE,
2315 .maxauthsize = SHA384_DIGEST_SIZE,
2316 }
2317 },
2318 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2319 DESC_HDR_SEL0_DEU |
2320 DESC_HDR_MODE0_DEU_CBC |
2321 DESC_HDR_MODE0_DEU_3DES |
2322 DESC_HDR_SEL1_MDEUB |
2323 DESC_HDR_MODE1_MDEU_INIT |
2324 DESC_HDR_MODE1_MDEU_PAD |
2325 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2326 },
2327 { .type = CRYPTO_ALG_TYPE_AEAD,
2328 .alg.crypto = {
2329 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2330 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2331 .cra_blocksize = AES_BLOCK_SIZE,
2332 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2333 .cra_aead = {
357fb605
HG
2334 .ivsize = AES_BLOCK_SIZE,
2335 .maxauthsize = SHA512_DIGEST_SIZE,
2336 }
2337 },
2338 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2339 DESC_HDR_SEL0_AESU |
2340 DESC_HDR_MODE0_AESU_CBC |
2341 DESC_HDR_SEL1_MDEUB |
2342 DESC_HDR_MODE1_MDEU_INIT |
2343 DESC_HDR_MODE1_MDEU_PAD |
2344 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2345 },
2346 { .type = CRYPTO_ALG_TYPE_AEAD,
2347 .alg.crypto = {
2348 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2349 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2350 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2351 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2352 .cra_aead = {
357fb605
HG
2353 .ivsize = DES3_EDE_BLOCK_SIZE,
2354 .maxauthsize = SHA512_DIGEST_SIZE,
2355 }
2356 },
2357 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2358 DESC_HDR_SEL0_DEU |
2359 DESC_HDR_MODE0_DEU_CBC |
2360 DESC_HDR_MODE0_DEU_3DES |
2361 DESC_HDR_SEL1_MDEUB |
2362 DESC_HDR_MODE1_MDEU_INIT |
2363 DESC_HDR_MODE1_MDEU_PAD |
2364 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2365 },
2366 { .type = CRYPTO_ALG_TYPE_AEAD,
d5e4aaef 2367 .alg.crypto = {
56af8cd4
LN
2368 .cra_name = "authenc(hmac(md5),cbc(aes))",
2369 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2370 .cra_blocksize = AES_BLOCK_SIZE,
2371 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2372 .cra_aead = {
56af8cd4
LN
2373 .ivsize = AES_BLOCK_SIZE,
2374 .maxauthsize = MD5_DIGEST_SIZE,
2375 }
2376 },
3952f17e
LN
2377 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2378 DESC_HDR_SEL0_AESU |
2379 DESC_HDR_MODE0_AESU_CBC |
2380 DESC_HDR_SEL1_MDEUA |
2381 DESC_HDR_MODE1_MDEU_INIT |
2382 DESC_HDR_MODE1_MDEU_PAD |
2383 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2384 },
d5e4aaef
LN
2385 { .type = CRYPTO_ALG_TYPE_AEAD,
2386 .alg.crypto = {
56af8cd4
LN
2387 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2388 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2389 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2390 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2391 .cra_aead = {
56af8cd4
LN
2392 .ivsize = DES3_EDE_BLOCK_SIZE,
2393 .maxauthsize = MD5_DIGEST_SIZE,
2394 }
2395 },
3952f17e
LN
2396 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2397 DESC_HDR_SEL0_DEU |
2398 DESC_HDR_MODE0_DEU_CBC |
2399 DESC_HDR_MODE0_DEU_3DES |
2400 DESC_HDR_SEL1_MDEUA |
2401 DESC_HDR_MODE1_MDEU_INIT |
2402 DESC_HDR_MODE1_MDEU_PAD |
2403 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2404 },
2405 /* ABLKCIPHER algorithms. */
d5e4aaef
LN
2406 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2407 .alg.crypto = {
4de9d0b5
LN
2408 .cra_name = "cbc(aes)",
2409 .cra_driver_name = "cbc-aes-talitos",
2410 .cra_blocksize = AES_BLOCK_SIZE,
2411 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2412 CRYPTO_ALG_ASYNC,
4de9d0b5 2413 .cra_ablkcipher = {
4de9d0b5
LN
2414 .min_keysize = AES_MIN_KEY_SIZE,
2415 .max_keysize = AES_MAX_KEY_SIZE,
2416 .ivsize = AES_BLOCK_SIZE,
2417 }
2418 },
2419 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2420 DESC_HDR_SEL0_AESU |
2421 DESC_HDR_MODE0_AESU_CBC,
2422 },
d5e4aaef
LN
2423 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2424 .alg.crypto = {
4de9d0b5
LN
2425 .cra_name = "cbc(des3_ede)",
2426 .cra_driver_name = "cbc-3des-talitos",
2427 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2428 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2429 CRYPTO_ALG_ASYNC,
4de9d0b5 2430 .cra_ablkcipher = {
4de9d0b5
LN
2431 .min_keysize = DES3_EDE_KEY_SIZE,
2432 .max_keysize = DES3_EDE_KEY_SIZE,
2433 .ivsize = DES3_EDE_BLOCK_SIZE,
2434 }
2435 },
2436 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2437 DESC_HDR_SEL0_DEU |
2438 DESC_HDR_MODE0_DEU_CBC |
2439 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2440 },
2441 /* AHASH algorithms. */
2442 { .type = CRYPTO_ALG_TYPE_AHASH,
2443 .alg.hash = {
497f2e6b
LN
2444 .halg.digestsize = MD5_DIGEST_SIZE,
2445 .halg.base = {
2446 .cra_name = "md5",
2447 .cra_driver_name = "md5-talitos",
b3988618 2448 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
497f2e6b
LN
2449 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2450 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2451 }
2452 },
2453 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2454 DESC_HDR_SEL0_MDEUA |
2455 DESC_HDR_MODE0_MDEU_MD5,
2456 },
2457 { .type = CRYPTO_ALG_TYPE_AHASH,
2458 .alg.hash = {
497f2e6b
LN
2459 .halg.digestsize = SHA1_DIGEST_SIZE,
2460 .halg.base = {
2461 .cra_name = "sha1",
2462 .cra_driver_name = "sha1-talitos",
2463 .cra_blocksize = SHA1_BLOCK_SIZE,
2464 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2465 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2466 }
2467 },
2468 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2469 DESC_HDR_SEL0_MDEUA |
2470 DESC_HDR_MODE0_MDEU_SHA1,
2471 },
60f208d7
KP
2472 { .type = CRYPTO_ALG_TYPE_AHASH,
2473 .alg.hash = {
60f208d7
KP
2474 .halg.digestsize = SHA224_DIGEST_SIZE,
2475 .halg.base = {
2476 .cra_name = "sha224",
2477 .cra_driver_name = "sha224-talitos",
2478 .cra_blocksize = SHA224_BLOCK_SIZE,
2479 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2480 CRYPTO_ALG_ASYNC,
60f208d7
KP
2481 }
2482 },
2483 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2484 DESC_HDR_SEL0_MDEUA |
2485 DESC_HDR_MODE0_MDEU_SHA224,
2486 },
497f2e6b
LN
2487 { .type = CRYPTO_ALG_TYPE_AHASH,
2488 .alg.hash = {
497f2e6b
LN
2489 .halg.digestsize = SHA256_DIGEST_SIZE,
2490 .halg.base = {
2491 .cra_name = "sha256",
2492 .cra_driver_name = "sha256-talitos",
2493 .cra_blocksize = SHA256_BLOCK_SIZE,
2494 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2495 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2496 }
2497 },
2498 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2499 DESC_HDR_SEL0_MDEUA |
2500 DESC_HDR_MODE0_MDEU_SHA256,
2501 },
2502 { .type = CRYPTO_ALG_TYPE_AHASH,
2503 .alg.hash = {
497f2e6b
LN
2504 .halg.digestsize = SHA384_DIGEST_SIZE,
2505 .halg.base = {
2506 .cra_name = "sha384",
2507 .cra_driver_name = "sha384-talitos",
2508 .cra_blocksize = SHA384_BLOCK_SIZE,
2509 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2510 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2511 }
2512 },
2513 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2514 DESC_HDR_SEL0_MDEUB |
2515 DESC_HDR_MODE0_MDEUB_SHA384,
2516 },
2517 { .type = CRYPTO_ALG_TYPE_AHASH,
2518 .alg.hash = {
497f2e6b
LN
2519 .halg.digestsize = SHA512_DIGEST_SIZE,
2520 .halg.base = {
2521 .cra_name = "sha512",
2522 .cra_driver_name = "sha512-talitos",
2523 .cra_blocksize = SHA512_BLOCK_SIZE,
2524 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2525 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2526 }
2527 },
2528 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2529 DESC_HDR_SEL0_MDEUB |
2530 DESC_HDR_MODE0_MDEUB_SHA512,
2531 },
79b3a418
LN
2532 { .type = CRYPTO_ALG_TYPE_AHASH,
2533 .alg.hash = {
79b3a418
LN
2534 .halg.digestsize = MD5_DIGEST_SIZE,
2535 .halg.base = {
2536 .cra_name = "hmac(md5)",
2537 .cra_driver_name = "hmac-md5-talitos",
b3988618 2538 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
79b3a418
LN
2539 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2540 CRYPTO_ALG_ASYNC,
79b3a418
LN
2541 }
2542 },
2543 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2544 DESC_HDR_SEL0_MDEUA |
2545 DESC_HDR_MODE0_MDEU_MD5,
2546 },
2547 { .type = CRYPTO_ALG_TYPE_AHASH,
2548 .alg.hash = {
79b3a418
LN
2549 .halg.digestsize = SHA1_DIGEST_SIZE,
2550 .halg.base = {
2551 .cra_name = "hmac(sha1)",
2552 .cra_driver_name = "hmac-sha1-talitos",
2553 .cra_blocksize = SHA1_BLOCK_SIZE,
2554 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2555 CRYPTO_ALG_ASYNC,
79b3a418
LN
2556 }
2557 },
2558 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2559 DESC_HDR_SEL0_MDEUA |
2560 DESC_HDR_MODE0_MDEU_SHA1,
2561 },
2562 { .type = CRYPTO_ALG_TYPE_AHASH,
2563 .alg.hash = {
79b3a418
LN
2564 .halg.digestsize = SHA224_DIGEST_SIZE,
2565 .halg.base = {
2566 .cra_name = "hmac(sha224)",
2567 .cra_driver_name = "hmac-sha224-talitos",
2568 .cra_blocksize = SHA224_BLOCK_SIZE,
2569 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2570 CRYPTO_ALG_ASYNC,
79b3a418
LN
2571 }
2572 },
2573 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2574 DESC_HDR_SEL0_MDEUA |
2575 DESC_HDR_MODE0_MDEU_SHA224,
2576 },
2577 { .type = CRYPTO_ALG_TYPE_AHASH,
2578 .alg.hash = {
79b3a418
LN
2579 .halg.digestsize = SHA256_DIGEST_SIZE,
2580 .halg.base = {
2581 .cra_name = "hmac(sha256)",
2582 .cra_driver_name = "hmac-sha256-talitos",
2583 .cra_blocksize = SHA256_BLOCK_SIZE,
2584 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2585 CRYPTO_ALG_ASYNC,
79b3a418
LN
2586 }
2587 },
2588 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2589 DESC_HDR_SEL0_MDEUA |
2590 DESC_HDR_MODE0_MDEU_SHA256,
2591 },
2592 { .type = CRYPTO_ALG_TYPE_AHASH,
2593 .alg.hash = {
79b3a418
LN
2594 .halg.digestsize = SHA384_DIGEST_SIZE,
2595 .halg.base = {
2596 .cra_name = "hmac(sha384)",
2597 .cra_driver_name = "hmac-sha384-talitos",
2598 .cra_blocksize = SHA384_BLOCK_SIZE,
2599 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2600 CRYPTO_ALG_ASYNC,
79b3a418
LN
2601 }
2602 },
2603 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2604 DESC_HDR_SEL0_MDEUB |
2605 DESC_HDR_MODE0_MDEUB_SHA384,
2606 },
2607 { .type = CRYPTO_ALG_TYPE_AHASH,
2608 .alg.hash = {
79b3a418
LN
2609 .halg.digestsize = SHA512_DIGEST_SIZE,
2610 .halg.base = {
2611 .cra_name = "hmac(sha512)",
2612 .cra_driver_name = "hmac-sha512-talitos",
2613 .cra_blocksize = SHA512_BLOCK_SIZE,
2614 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2615 CRYPTO_ALG_ASYNC,
79b3a418
LN
2616 }
2617 },
2618 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2619 DESC_HDR_SEL0_MDEUB |
2620 DESC_HDR_MODE0_MDEUB_SHA512,
2621 }
9c4a7965
KP
2622};
2623
2624struct talitos_crypto_alg {
2625 struct list_head entry;
2626 struct device *dev;
acbf7c62 2627 struct talitos_alg_template algt;
9c4a7965
KP
2628};
2629
2630static int talitos_cra_init(struct crypto_tfm *tfm)
2631{
2632 struct crypto_alg *alg = tfm->__crt_alg;
19bbbc63 2633 struct talitos_crypto_alg *talitos_alg;
9c4a7965 2634 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
5228f0f7 2635 struct talitos_private *priv;
9c4a7965 2636
497f2e6b
LN
2637 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2638 talitos_alg = container_of(__crypto_ahash_alg(alg),
2639 struct talitos_crypto_alg,
2640 algt.alg.hash);
2641 else
2642 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2643 algt.alg.crypto);
19bbbc63 2644
9c4a7965
KP
2645 /* update context with ptr to dev */
2646 ctx->dev = talitos_alg->dev;
19bbbc63 2647
5228f0f7
KP
2648 /* assign SEC channel to tfm in round-robin fashion */
2649 priv = dev_get_drvdata(ctx->dev);
2650 ctx->ch = atomic_inc_return(&priv->last_chan) &
2651 (priv->num_channels - 1);
2652
9c4a7965 2653 /* copy descriptor header template value */
acbf7c62 2654 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2655
602dba5a
KP
2656 /* select done notification */
2657 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2658
497f2e6b
LN
2659 return 0;
2660}
2661
2662static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2663{
2664 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2665
2666 talitos_cra_init(tfm);
9c4a7965
KP
2667
2668 /* random first IV */
70bcaca7 2669 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
9c4a7965
KP
2670
2671 return 0;
2672}
2673
497f2e6b
LN
2674static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2675{
2676 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2677
2678 talitos_cra_init(tfm);
2679
2680 ctx->keylen = 0;
2681 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2682 sizeof(struct talitos_ahash_req_ctx));
2683
2684 return 0;
2685}
2686
9c4a7965
KP
2687/*
2688 * given the alg's descriptor header template, determine whether descriptor
2689 * type and primary/secondary execution units required match the hw
2690 * capabilities description provided in the device tree node.
2691 */
2692static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2693{
2694 struct talitos_private *priv = dev_get_drvdata(dev);
2695 int ret;
2696
2697 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2698 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2699
2700 if (SECONDARY_EU(desc_hdr_template))
2701 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2702 & priv->exec_units);
2703
2704 return ret;
2705}
2706
2dc11581 2707static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2708{
2709 struct device *dev = &ofdev->dev;
2710 struct talitos_private *priv = dev_get_drvdata(dev);
2711 struct talitos_crypto_alg *t_alg, *n;
2712 int i;
2713
2714 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2715 switch (t_alg->algt.type) {
2716 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2717 case CRYPTO_ALG_TYPE_AEAD:
2718 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2719 break;
2720 case CRYPTO_ALG_TYPE_AHASH:
2721 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2722 break;
2723 }
9c4a7965
KP
2724 list_del(&t_alg->entry);
2725 kfree(t_alg);
2726 }
2727
2728 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2729 talitos_unregister_rng(dev);
2730
4b992628 2731 for (i = 0; i < priv->num_channels; i++)
0b798247 2732 kfree(priv->chan[i].fifo);
9c4a7965 2733
4b992628 2734 kfree(priv->chan);
9c4a7965 2735
c3e337f8 2736 for (i = 0; i < 2; i++)
2cdba3cf 2737 if (priv->irq[i]) {
c3e337f8
KP
2738 free_irq(priv->irq[i], dev);
2739 irq_dispose_mapping(priv->irq[i]);
2740 }
9c4a7965 2741
c3e337f8 2742 tasklet_kill(&priv->done_task[0]);
2cdba3cf 2743 if (priv->irq[1])
c3e337f8 2744 tasklet_kill(&priv->done_task[1]);
9c4a7965
KP
2745
2746 iounmap(priv->reg);
2747
9c4a7965
KP
2748 kfree(priv);
2749
2750 return 0;
2751}
2752
2753static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2754 struct talitos_alg_template
2755 *template)
2756{
60f208d7 2757 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2758 struct talitos_crypto_alg *t_alg;
2759 struct crypto_alg *alg;
2760
2761 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2762 if (!t_alg)
2763 return ERR_PTR(-ENOMEM);
2764
acbf7c62
LN
2765 t_alg->algt = *template;
2766
2767 switch (t_alg->algt.type) {
2768 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2769 alg = &t_alg->algt.alg.crypto;
2770 alg->cra_init = talitos_cra_init;
d4cd3283 2771 alg->cra_type = &crypto_ablkcipher_type;
b286e003
KP
2772 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2773 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2774 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2775 alg->cra_ablkcipher.geniv = "eseqiv";
497f2e6b 2776 break;
acbf7c62
LN
2777 case CRYPTO_ALG_TYPE_AEAD:
2778 alg = &t_alg->algt.alg.crypto;
497f2e6b 2779 alg->cra_init = talitos_cra_init_aead;
d4cd3283 2780 alg->cra_type = &crypto_aead_type;
b286e003
KP
2781 alg->cra_aead.setkey = aead_setkey;
2782 alg->cra_aead.setauthsize = aead_setauthsize;
2783 alg->cra_aead.encrypt = aead_encrypt;
2784 alg->cra_aead.decrypt = aead_decrypt;
2785 alg->cra_aead.givencrypt = aead_givencrypt;
2786 alg->cra_aead.geniv = "<built-in>";
acbf7c62
LN
2787 break;
2788 case CRYPTO_ALG_TYPE_AHASH:
2789 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2790 alg->cra_init = talitos_cra_init_ahash;
d4cd3283 2791 alg->cra_type = &crypto_ahash_type;
b286e003
KP
2792 t_alg->algt.alg.hash.init = ahash_init;
2793 t_alg->algt.alg.hash.update = ahash_update;
2794 t_alg->algt.alg.hash.final = ahash_final;
2795 t_alg->algt.alg.hash.finup = ahash_finup;
2796 t_alg->algt.alg.hash.digest = ahash_digest;
2797 t_alg->algt.alg.hash.setkey = ahash_setkey;
2798
79b3a418 2799 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
0b2730d8
KP
2800 !strncmp(alg->cra_name, "hmac", 4)) {
2801 kfree(t_alg);
79b3a418 2802 return ERR_PTR(-ENOTSUPP);
0b2730d8 2803 }
60f208d7 2804 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
79b3a418
LN
2805 (!strcmp(alg->cra_name, "sha224") ||
2806 !strcmp(alg->cra_name, "hmac(sha224)"))) {
60f208d7
KP
2807 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2808 t_alg->algt.desc_hdr_template =
2809 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2810 DESC_HDR_SEL0_MDEUA |
2811 DESC_HDR_MODE0_MDEU_SHA256;
2812 }
497f2e6b 2813 break;
1d11911a
KP
2814 default:
2815 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2816 return ERR_PTR(-EINVAL);
acbf7c62 2817 }
9c4a7965 2818
9c4a7965 2819 alg->cra_module = THIS_MODULE;
9c4a7965 2820 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2821 alg->cra_alignmask = 0;
9c4a7965 2822 alg->cra_ctxsize = sizeof(struct talitos_ctx);
d912bb76 2823 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
9c4a7965 2824
9c4a7965
KP
2825 t_alg->dev = dev;
2826
2827 return t_alg;
2828}
2829
c3e337f8
KP
2830static int talitos_probe_irq(struct platform_device *ofdev)
2831{
2832 struct device *dev = &ofdev->dev;
2833 struct device_node *np = ofdev->dev.of_node;
2834 struct talitos_private *priv = dev_get_drvdata(dev);
2835 int err;
dd3c0987 2836 bool is_sec1 = has_ftr_sec1(priv);
c3e337f8
KP
2837
2838 priv->irq[0] = irq_of_parse_and_map(np, 0);
2cdba3cf 2839 if (!priv->irq[0]) {
c3e337f8
KP
2840 dev_err(dev, "failed to map irq\n");
2841 return -EINVAL;
2842 }
dd3c0987
LC
2843 if (is_sec1) {
2844 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2845 dev_driver_string(dev), dev);
2846 goto primary_out;
2847 }
c3e337f8
KP
2848
2849 priv->irq[1] = irq_of_parse_and_map(np, 1);
2850
2851 /* get the primary irq line */
2cdba3cf 2852 if (!priv->irq[1]) {
dd3c0987 2853 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
c3e337f8
KP
2854 dev_driver_string(dev), dev);
2855 goto primary_out;
2856 }
2857
dd3c0987 2858 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
c3e337f8
KP
2859 dev_driver_string(dev), dev);
2860 if (err)
2861 goto primary_out;
2862
2863 /* get the secondary irq line */
dd3c0987 2864 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
c3e337f8
KP
2865 dev_driver_string(dev), dev);
2866 if (err) {
2867 dev_err(dev, "failed to request secondary irq\n");
2868 irq_dispose_mapping(priv->irq[1]);
2cdba3cf 2869 priv->irq[1] = 0;
c3e337f8
KP
2870 }
2871
2872 return err;
2873
2874primary_out:
2875 if (err) {
2876 dev_err(dev, "failed to request primary irq\n");
2877 irq_dispose_mapping(priv->irq[0]);
2cdba3cf 2878 priv->irq[0] = 0;
c3e337f8
KP
2879 }
2880
2881 return err;
2882}
2883
1c48a5c9 2884static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2885{
2886 struct device *dev = &ofdev->dev;
61c7a080 2887 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2888 struct talitos_private *priv;
2889 const unsigned int *prop;
2890 int i, err;
5fa7fa14 2891 int stride;
9c4a7965
KP
2892
2893 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2894 if (!priv)
2895 return -ENOMEM;
2896
f3de9cb1
KH
2897 INIT_LIST_HEAD(&priv->alg_list);
2898
9c4a7965
KP
2899 dev_set_drvdata(dev, priv);
2900
2901 priv->ofdev = ofdev;
2902
511d63cb
HG
2903 spin_lock_init(&priv->reg_lock);
2904
9c4a7965
KP
2905 priv->reg = of_iomap(np, 0);
2906 if (!priv->reg) {
2907 dev_err(dev, "failed to of_iomap\n");
2908 err = -ENOMEM;
2909 goto err_out;
2910 }
2911
2912 /* get SEC version capabilities from device tree */
2913 prop = of_get_property(np, "fsl,num-channels", NULL);
2914 if (prop)
2915 priv->num_channels = *prop;
2916
2917 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2918 if (prop)
2919 priv->chfifo_len = *prop;
2920
2921 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2922 if (prop)
2923 priv->exec_units = *prop;
2924
2925 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2926 if (prop)
2927 priv->desc_types = *prop;
2928
2929 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2930 !priv->exec_units || !priv->desc_types) {
2931 dev_err(dev, "invalid property data in device tree node\n");
2932 err = -EINVAL;
2933 goto err_out;
2934 }
2935
f3c85bc1
LN
2936 if (of_device_is_compatible(np, "fsl,sec3.0"))
2937 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2938
fe5720e2 2939 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7 2940 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
79b3a418
LN
2941 TALITOS_FTR_SHA224_HWINIT |
2942 TALITOS_FTR_HMAC_OK;
fe5720e2 2943
21590888
LC
2944 if (of_device_is_compatible(np, "fsl,sec1.0"))
2945 priv->features |= TALITOS_FTR_SEC1;
2946
5fa7fa14
LC
2947 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2948 priv->reg_deu = priv->reg + TALITOS12_DEU;
2949 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2950 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2951 stride = TALITOS1_CH_STRIDE;
2952 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2953 priv->reg_deu = priv->reg + TALITOS10_DEU;
2954 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2955 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2956 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2957 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2958 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2959 stride = TALITOS1_CH_STRIDE;
2960 } else {
2961 priv->reg_deu = priv->reg + TALITOS2_DEU;
2962 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2963 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2964 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2965 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2966 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2967 priv->reg_keu = priv->reg + TALITOS2_KEU;
2968 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2969 stride = TALITOS2_CH_STRIDE;
2970 }
2971
dd3c0987
LC
2972 err = talitos_probe_irq(ofdev);
2973 if (err)
2974 goto err_out;
2975
2976 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2977 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2978 (unsigned long)dev);
2979 } else {
2980 if (!priv->irq[1]) {
2981 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2982 (unsigned long)dev);
2983 } else {
2984 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2985 (unsigned long)dev);
2986 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2987 (unsigned long)dev);
2988 }
2989 }
2990
4b992628
KP
2991 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2992 priv->num_channels, GFP_KERNEL);
2993 if (!priv->chan) {
2994 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
2995 err = -ENOMEM;
2996 goto err_out;
2997 }
2998
f641dddd
MH
2999 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3000
c3e337f8 3001 for (i = 0; i < priv->num_channels; i++) {
5fa7fa14 3002 priv->chan[i].reg = priv->reg + stride * (i + 1);
2cdba3cf 3003 if (!priv->irq[1] || !(i & 1))
c3e337f8 3004 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
ad42d5fc 3005
4b992628
KP
3006 spin_lock_init(&priv->chan[i].head_lock);
3007 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965 3008
4b992628
KP
3009 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3010 priv->fifo_len, GFP_KERNEL);
3011 if (!priv->chan[i].fifo) {
9c4a7965
KP
3012 dev_err(dev, "failed to allocate request fifo %d\n", i);
3013 err = -ENOMEM;
3014 goto err_out;
3015 }
9c4a7965 3016
4b992628
KP
3017 atomic_set(&priv->chan[i].submit_count,
3018 -(priv->chfifo_len - 1));
f641dddd 3019 }
9c4a7965 3020
81eb024c
KP
3021 dma_set_mask(dev, DMA_BIT_MASK(36));
3022
9c4a7965
KP
3023 /* reset and initialize the h/w */
3024 err = init_device(dev);
3025 if (err) {
3026 dev_err(dev, "failed to initialize device\n");
3027 goto err_out;
3028 }
3029
3030 /* register the RNG, if available */
3031 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3032 err = talitos_register_rng(dev);
3033 if (err) {
3034 dev_err(dev, "failed to register hwrng: %d\n", err);
3035 goto err_out;
3036 } else
3037 dev_info(dev, "hwrng\n");
3038 }
3039
3040 /* register crypto algorithms the device supports */
9c4a7965
KP
3041 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3042 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3043 struct talitos_crypto_alg *t_alg;
acbf7c62 3044 char *name = NULL;
9c4a7965
KP
3045
3046 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3047 if (IS_ERR(t_alg)) {
3048 err = PTR_ERR(t_alg);
0b2730d8 3049 if (err == -ENOTSUPP)
79b3a418 3050 continue;
9c4a7965
KP
3051 goto err_out;
3052 }
3053
acbf7c62
LN
3054 switch (t_alg->algt.type) {
3055 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3056 case CRYPTO_ALG_TYPE_AEAD:
3057 err = crypto_register_alg(
3058 &t_alg->algt.alg.crypto);
3059 name = t_alg->algt.alg.crypto.cra_driver_name;
3060 break;
3061 case CRYPTO_ALG_TYPE_AHASH:
3062 err = crypto_register_ahash(
3063 &t_alg->algt.alg.hash);
3064 name =
3065 t_alg->algt.alg.hash.halg.base.cra_driver_name;
3066 break;
3067 }
9c4a7965
KP
3068 if (err) {
3069 dev_err(dev, "%s alg registration failed\n",
acbf7c62 3070 name);
9c4a7965 3071 kfree(t_alg);
991155ba 3072 } else
9c4a7965 3073 list_add_tail(&t_alg->entry, &priv->alg_list);
9c4a7965
KP
3074 }
3075 }
5b859b6e
KP
3076 if (!list_empty(&priv->alg_list))
3077 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3078 (char *)of_get_property(np, "compatible", NULL));
9c4a7965
KP
3079
3080 return 0;
3081
3082err_out:
3083 talitos_remove(ofdev);
9c4a7965
KP
3084
3085 return err;
3086}
3087
6c3f975a 3088static const struct of_device_id talitos_match[] = {
9c4a7965
KP
3089 {
3090 .compatible = "fsl,sec2.0",
3091 },
3092 {},
3093};
3094MODULE_DEVICE_TABLE(of, talitos_match);
3095
1c48a5c9 3096static struct platform_driver talitos_driver = {
4018294b
GL
3097 .driver = {
3098 .name = "talitos",
4018294b
GL
3099 .of_match_table = talitos_match,
3100 },
9c4a7965 3101 .probe = talitos_probe,
596f1034 3102 .remove = talitos_remove,
9c4a7965
KP
3103};
3104
741e8c2d 3105module_platform_driver(talitos_driver);
9c4a7965
KP
3106
3107MODULE_LICENSE("GPL");
3108MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3109MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
This page took 0.50443 seconds and 5 git commands to generate.