2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
9 * Crypto algorithm registration code copied from hifn driver:
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 unsigned int len, bool is_sec1)
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
63 ptr->len1 = cpu_to_be16(len);
65 ptr->len = cpu_to_be16(len);
66 ptr->eptr = upper_32_bits(dma_addr);
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 struct talitos_ptr *src_ptr, bool is_sec1)
73 dst_ptr->ptr = src_ptr->ptr;
75 dst_ptr->len1 = src_ptr->len1;
77 dst_ptr->len = src_ptr->len;
78 dst_ptr->eptr = src_ptr->eptr;
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 return be16_to_cpu(ptr->len1);
88 return be16_to_cpu(ptr->len);
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
101 ptr->j_extent |= val;
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
107 static void __map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir,
113 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 struct talitos_private *priv = dev_get_drvdata(dev);
115 bool is_sec1 = has_ftr_sec1(priv);
117 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
120 static void map_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 unsigned int len, void *data,
123 enum dma_data_direction dir)
125 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
128 static void map_single_talitos_ptr_nosync(struct device *dev,
129 struct talitos_ptr *ptr,
130 unsigned int len, void *data,
131 enum dma_data_direction dir)
133 __map_single_talitos_ptr(dev, ptr, len, data, dir,
134 DMA_ATTR_SKIP_CPU_SYNC);
138 * unmap bus single (contiguous) h/w descriptor pointer
140 static void unmap_single_talitos_ptr(struct device *dev,
141 struct talitos_ptr *ptr,
142 enum dma_data_direction dir)
144 struct talitos_private *priv = dev_get_drvdata(dev);
145 bool is_sec1 = has_ftr_sec1(priv);
147 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148 from_talitos_ptr_len(ptr, is_sec1), dir);
151 static int reset_channel(struct device *dev, int ch)
153 struct talitos_private *priv = dev_get_drvdata(dev);
154 unsigned int timeout = TALITOS_TIMEOUT;
155 bool is_sec1 = has_ftr_sec1(priv);
158 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159 TALITOS1_CCCR_LO_RESET);
161 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162 TALITOS1_CCCR_LO_RESET) && --timeout)
165 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166 TALITOS2_CCCR_RESET);
168 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169 TALITOS2_CCCR_RESET) && --timeout)
174 dev_err(dev, "failed to reset channel %d\n", ch);
178 /* set 36-bit addressing, done writeback enable and done IRQ enable */
179 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181 /* enable chaining descriptors */
183 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
186 /* and ICCR writeback, if available */
187 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189 TALITOS_CCCR_LO_IWSE);
194 static int reset_device(struct device *dev)
196 struct talitos_private *priv = dev_get_drvdata(dev);
197 unsigned int timeout = TALITOS_TIMEOUT;
198 bool is_sec1 = has_ftr_sec1(priv);
199 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
201 setbits32(priv->reg + TALITOS_MCR, mcr);
203 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
208 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209 setbits32(priv->reg + TALITOS_MCR, mcr);
213 dev_err(dev, "failed to reset device\n");
221 * Reset and initialize the device
223 static int init_device(struct device *dev)
225 struct talitos_private *priv = dev_get_drvdata(dev);
227 bool is_sec1 = has_ftr_sec1(priv);
231 * errata documentation: warning: certain SEC interrupts
232 * are not fully cleared by writing the MCR:SWR bit,
233 * set bit twice to completely reset
235 err = reset_device(dev);
239 err = reset_device(dev);
244 for (ch = 0; ch < priv->num_channels; ch++) {
245 err = reset_channel(dev, ch);
250 /* enable channel done and error interrupts */
252 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254 /* disable parity error check in DEU (erroneous? test vect.) */
255 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
257 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
261 /* disable integrity check error interrupts (use writeback instead) */
262 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264 TALITOS_MDEUICR_LO_ICE);
270 * talitos_submit - submits a descriptor to the device for processing
271 * @dev: the SEC device to be used
272 * @ch: the SEC device channel to be used
273 * @desc: the descriptor to be processed by the device
274 * @callback: whom to call when processing is complete
275 * @context: a handle for use by caller (optional)
277 * desc must contain valid dma-mapped (bus physical) address pointers.
278 * callback must check err and feedback in descriptor header
279 * for device processing status.
281 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282 void (*callback)(struct device *dev,
283 struct talitos_desc *desc,
284 void *context, int error),
287 struct talitos_private *priv = dev_get_drvdata(dev);
288 struct talitos_request *request;
291 bool is_sec1 = has_ftr_sec1(priv);
293 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
295 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296 /* h/w fifo is full */
297 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
301 head = priv->chan[ch].head;
302 request = &priv->chan[ch].fifo[head];
304 /* map descriptor and save caller data */
306 desc->hdr1 = desc->hdr;
307 request->dma_desc = dma_map_single(dev, &desc->hdr1,
311 request->dma_desc = dma_map_single(dev, desc,
315 request->callback = callback;
316 request->context = context;
318 /* increment fifo head */
319 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
322 request->desc = desc;
326 out_be32(priv->chan[ch].reg + TALITOS_FF,
327 upper_32_bits(request->dma_desc));
328 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329 lower_32_bits(request->dma_desc));
331 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
335 EXPORT_SYMBOL(talitos_submit);
338 * process what was done, notify callback of error if not
340 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
342 struct talitos_private *priv = dev_get_drvdata(dev);
343 struct talitos_request *request, saved_req;
346 bool is_sec1 = has_ftr_sec1(priv);
348 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
350 tail = priv->chan[ch].tail;
351 while (priv->chan[ch].fifo[tail].desc) {
354 request = &priv->chan[ch].fifo[tail];
356 /* descriptors with their done bits set don't get the error */
359 hdr = request->desc->hdr;
360 else if (request->desc->next_desc)
361 hdr = (request->desc + 1)->hdr1;
363 hdr = request->desc->hdr1;
365 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
373 dma_unmap_single(dev, request->dma_desc,
377 /* copy entries so we can call callback outside lock */
378 saved_req.desc = request->desc;
379 saved_req.callback = request->callback;
380 saved_req.context = request->context;
382 /* release request entry in fifo */
384 request->desc = NULL;
386 /* increment fifo tail */
387 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
389 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
391 atomic_dec(&priv->chan[ch].submit_count);
393 saved_req.callback(dev, saved_req.desc, saved_req.context,
395 /* channel may resume processing in single desc error case */
396 if (error && !reset_ch && status == error)
398 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
399 tail = priv->chan[ch].tail;
402 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
406 * process completed requests for channels that have done status
408 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
409 static void talitos1_done_##name(unsigned long data) \
411 struct device *dev = (struct device *)data; \
412 struct talitos_private *priv = dev_get_drvdata(dev); \
413 unsigned long flags; \
415 if (ch_done_mask & 0x10000000) \
416 flush_channel(dev, 0, 0, 0); \
417 if (ch_done_mask & 0x40000000) \
418 flush_channel(dev, 1, 0, 0); \
419 if (ch_done_mask & 0x00010000) \
420 flush_channel(dev, 2, 0, 0); \
421 if (ch_done_mask & 0x00040000) \
422 flush_channel(dev, 3, 0, 0); \
424 /* At this point, all completed channels have been processed */ \
425 /* Unmask done interrupts for channels completed later on. */ \
426 spin_lock_irqsave(&priv->reg_lock, flags); \
427 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
428 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
429 spin_unlock_irqrestore(&priv->reg_lock, flags); \
432 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
433 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
435 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
436 static void talitos2_done_##name(unsigned long data) \
438 struct device *dev = (struct device *)data; \
439 struct talitos_private *priv = dev_get_drvdata(dev); \
440 unsigned long flags; \
442 if (ch_done_mask & 1) \
443 flush_channel(dev, 0, 0, 0); \
444 if (ch_done_mask & (1 << 2)) \
445 flush_channel(dev, 1, 0, 0); \
446 if (ch_done_mask & (1 << 4)) \
447 flush_channel(dev, 2, 0, 0); \
448 if (ch_done_mask & (1 << 6)) \
449 flush_channel(dev, 3, 0, 0); \
451 /* At this point, all completed channels have been processed */ \
452 /* Unmask done interrupts for channels completed later on. */ \
453 spin_lock_irqsave(&priv->reg_lock, flags); \
454 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
455 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
456 spin_unlock_irqrestore(&priv->reg_lock, flags); \
459 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
460 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
461 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
462 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
465 * locate current (offending) descriptor
467 static u32 current_desc_hdr(struct device *dev, int ch)
469 struct talitos_private *priv = dev_get_drvdata(dev);
473 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
474 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
477 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
481 tail = priv->chan[ch].tail;
484 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
485 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
486 iter = (iter + 1) & (priv->fifo_len - 1);
488 dev_err(dev, "couldn't locate current descriptor\n");
493 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
494 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
496 return priv->chan[ch].fifo[iter].desc->hdr;
500 * user diagnostics; report root cause of error based on execution unit status
502 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
504 struct talitos_private *priv = dev_get_drvdata(dev);
508 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
510 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
511 case DESC_HDR_SEL0_AFEU:
512 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
513 in_be32(priv->reg_afeu + TALITOS_EUISR),
514 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
516 case DESC_HDR_SEL0_DEU:
517 dev_err(dev, "DEUISR 0x%08x_%08x\n",
518 in_be32(priv->reg_deu + TALITOS_EUISR),
519 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
521 case DESC_HDR_SEL0_MDEUA:
522 case DESC_HDR_SEL0_MDEUB:
523 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
524 in_be32(priv->reg_mdeu + TALITOS_EUISR),
525 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
527 case DESC_HDR_SEL0_RNG:
528 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
529 in_be32(priv->reg_rngu + TALITOS_ISR),
530 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
532 case DESC_HDR_SEL0_PKEU:
533 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
534 in_be32(priv->reg_pkeu + TALITOS_EUISR),
535 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
537 case DESC_HDR_SEL0_AESU:
538 dev_err(dev, "AESUISR 0x%08x_%08x\n",
539 in_be32(priv->reg_aesu + TALITOS_EUISR),
540 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
542 case DESC_HDR_SEL0_CRCU:
543 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
544 in_be32(priv->reg_crcu + TALITOS_EUISR),
545 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
547 case DESC_HDR_SEL0_KEU:
548 dev_err(dev, "KEUISR 0x%08x_%08x\n",
549 in_be32(priv->reg_pkeu + TALITOS_EUISR),
550 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
554 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
555 case DESC_HDR_SEL1_MDEUA:
556 case DESC_HDR_SEL1_MDEUB:
557 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
558 in_be32(priv->reg_mdeu + TALITOS_EUISR),
559 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
561 case DESC_HDR_SEL1_CRCU:
562 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
563 in_be32(priv->reg_crcu + TALITOS_EUISR),
564 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
568 for (i = 0; i < 8; i++)
569 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
570 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
571 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
575 * recover from error interrupts
577 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
579 struct talitos_private *priv = dev_get_drvdata(dev);
580 unsigned int timeout = TALITOS_TIMEOUT;
581 int ch, error, reset_dev = 0;
583 bool is_sec1 = has_ftr_sec1(priv);
584 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
586 for (ch = 0; ch < priv->num_channels; ch++) {
587 /* skip channels without errors */
589 /* bits 29, 31, 17, 19 */
590 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
593 if (!(isr & (1 << (ch * 2 + 1))))
599 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
601 if (v_lo & TALITOS_CCPSR_LO_DOF) {
602 dev_err(dev, "double fetch fifo overflow error\n");
606 if (v_lo & TALITOS_CCPSR_LO_SOF) {
607 /* h/w dropped descriptor */
608 dev_err(dev, "single fetch fifo overflow error\n");
611 if (v_lo & TALITOS_CCPSR_LO_MDTE)
612 dev_err(dev, "master data transfer error\n");
613 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
614 dev_err(dev, is_sec1 ? "pointer not complete error\n"
615 : "s/g data length zero error\n");
616 if (v_lo & TALITOS_CCPSR_LO_FPZ)
617 dev_err(dev, is_sec1 ? "parity error\n"
618 : "fetch pointer zero error\n");
619 if (v_lo & TALITOS_CCPSR_LO_IDH)
620 dev_err(dev, "illegal descriptor header error\n");
621 if (v_lo & TALITOS_CCPSR_LO_IEU)
622 dev_err(dev, is_sec1 ? "static assignment error\n"
623 : "invalid exec unit error\n");
624 if (v_lo & TALITOS_CCPSR_LO_EU)
625 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
627 if (v_lo & TALITOS_CCPSR_LO_GB)
628 dev_err(dev, "gather boundary error\n");
629 if (v_lo & TALITOS_CCPSR_LO_GRL)
630 dev_err(dev, "gather return/length error\n");
631 if (v_lo & TALITOS_CCPSR_LO_SB)
632 dev_err(dev, "scatter boundary error\n");
633 if (v_lo & TALITOS_CCPSR_LO_SRL)
634 dev_err(dev, "scatter return/length error\n");
637 flush_channel(dev, ch, error, reset_ch);
640 reset_channel(dev, ch);
642 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
644 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
645 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
646 TALITOS2_CCCR_CONT) && --timeout)
649 dev_err(dev, "failed to restart channel %d\n",
655 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
656 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
657 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
658 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
661 dev_err(dev, "done overflow, internal time out, or "
662 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
664 /* purge request queues */
665 for (ch = 0; ch < priv->num_channels; ch++)
666 flush_channel(dev, ch, -EIO, 1);
668 /* reset and reinitialize the device */
673 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
674 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
676 struct device *dev = data; \
677 struct talitos_private *priv = dev_get_drvdata(dev); \
679 unsigned long flags; \
681 spin_lock_irqsave(&priv->reg_lock, flags); \
682 isr = in_be32(priv->reg + TALITOS_ISR); \
683 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
684 /* Acknowledge interrupt */ \
685 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
688 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
689 spin_unlock_irqrestore(&priv->reg_lock, flags); \
690 talitos_error(dev, isr & ch_err_mask, isr_lo); \
693 if (likely(isr & ch_done_mask)) { \
694 /* mask further done interrupts. */ \
695 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
696 /* done_task will unmask done interrupts at exit */ \
697 tasklet_schedule(&priv->done_task[tlet]); \
699 spin_unlock_irqrestore(&priv->reg_lock, flags); \
702 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
706 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
708 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
709 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
711 struct device *dev = data; \
712 struct talitos_private *priv = dev_get_drvdata(dev); \
714 unsigned long flags; \
716 spin_lock_irqsave(&priv->reg_lock, flags); \
717 isr = in_be32(priv->reg + TALITOS_ISR); \
718 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
719 /* Acknowledge interrupt */ \
720 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
721 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
723 if (unlikely(isr & ch_err_mask || isr_lo)) { \
724 spin_unlock_irqrestore(&priv->reg_lock, flags); \
725 talitos_error(dev, isr & ch_err_mask, isr_lo); \
728 if (likely(isr & ch_done_mask)) { \
729 /* mask further done interrupts. */ \
730 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
731 /* done_task will unmask done interrupts at exit */ \
732 tasklet_schedule(&priv->done_task[tlet]); \
734 spin_unlock_irqrestore(&priv->reg_lock, flags); \
737 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
741 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
742 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
744 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
750 static int talitos_rng_data_present(struct hwrng *rng, int wait)
752 struct device *dev = (struct device *)rng->priv;
753 struct talitos_private *priv = dev_get_drvdata(dev);
757 for (i = 0; i < 20; i++) {
758 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
759 TALITOS_RNGUSR_LO_OFL;
768 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
770 struct device *dev = (struct device *)rng->priv;
771 struct talitos_private *priv = dev_get_drvdata(dev);
773 /* rng fifo requires 64-bit accesses */
774 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
775 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
780 static int talitos_rng_init(struct hwrng *rng)
782 struct device *dev = (struct device *)rng->priv;
783 struct talitos_private *priv = dev_get_drvdata(dev);
784 unsigned int timeout = TALITOS_TIMEOUT;
786 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
787 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
788 & TALITOS_RNGUSR_LO_RD)
792 dev_err(dev, "failed to reset rng hw\n");
796 /* start generating */
797 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
802 static int talitos_register_rng(struct device *dev)
804 struct talitos_private *priv = dev_get_drvdata(dev);
807 priv->rng.name = dev_driver_string(dev),
808 priv->rng.init = talitos_rng_init,
809 priv->rng.data_present = talitos_rng_data_present,
810 priv->rng.data_read = talitos_rng_data_read,
811 priv->rng.priv = (unsigned long)dev;
813 err = hwrng_register(&priv->rng);
815 priv->rng_registered = true;
820 static void talitos_unregister_rng(struct device *dev)
822 struct talitos_private *priv = dev_get_drvdata(dev);
824 if (!priv->rng_registered)
827 hwrng_unregister(&priv->rng);
828 priv->rng_registered = false;
834 #define TALITOS_CRA_PRIORITY 3000
836 * Defines a priority for doing AEAD with descriptors type
837 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
839 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
840 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
841 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
846 __be32 desc_hdr_template;
847 u8 key[TALITOS_MAX_KEY_SIZE];
848 u8 iv[TALITOS_MAX_IV_LENGTH];
851 unsigned int enckeylen;
852 unsigned int authkeylen;
855 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
856 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
858 struct talitos_ahash_req_ctx {
859 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
860 unsigned int hw_context_size;
861 u8 buf[2][HASH_MAX_BLOCK_SIZE];
866 unsigned int to_hash_later;
868 struct scatterlist bufsl[2];
869 struct scatterlist *psrc;
872 struct talitos_export_state {
873 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
874 u8 buf[HASH_MAX_BLOCK_SIZE];
878 unsigned int to_hash_later;
882 static int aead_setkey(struct crypto_aead *authenc,
883 const u8 *key, unsigned int keylen)
885 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
886 struct device *dev = ctx->dev;
887 struct crypto_authenc_keys keys;
889 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
892 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
896 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
898 memcpy(ctx->key, keys.authkey, keys.authkeylen);
899 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
901 ctx->keylen = keys.authkeylen + keys.enckeylen;
902 ctx->enckeylen = keys.enckeylen;
903 ctx->authkeylen = keys.authkeylen;
904 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
907 memzero_explicit(&keys, sizeof(keys));
911 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
912 memzero_explicit(&keys, sizeof(keys));
916 static int aead_des3_setkey(struct crypto_aead *authenc,
917 const u8 *key, unsigned int keylen)
919 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
920 struct device *dev = ctx->dev;
921 struct crypto_authenc_keys keys;
925 err = crypto_authenc_extractkeys(&keys, key, keylen);
930 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
933 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
936 flags = crypto_aead_get_flags(authenc);
937 err = __des3_verify_key(&flags, keys.enckey);
939 crypto_aead_set_flags(authenc, flags);
944 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
946 memcpy(ctx->key, keys.authkey, keys.authkeylen);
947 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
949 ctx->keylen = keys.authkeylen + keys.enckeylen;
950 ctx->enckeylen = keys.enckeylen;
951 ctx->authkeylen = keys.authkeylen;
952 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
956 memzero_explicit(&keys, sizeof(keys));
960 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
965 * talitos_edesc - s/w-extended descriptor
966 * @src_nents: number of segments in input scatterlist
967 * @dst_nents: number of segments in output scatterlist
968 * @icv_ool: whether ICV is out-of-line
969 * @iv_dma: dma address of iv for checking continuity and link table
970 * @dma_len: length of dma mapped link_tbl space
971 * @dma_link_tbl: bus physical address of link_tbl/buf
972 * @desc: h/w descriptor
973 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
974 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
976 * if decrypting (with authcheck), or either one of src_nents or dst_nents
977 * is greater than 1, an integrity check value is concatenated to the end
980 struct talitos_edesc {
986 dma_addr_t dma_link_tbl;
987 struct talitos_desc desc;
989 struct talitos_ptr link_tbl[0];
994 static void talitos_sg_unmap(struct device *dev,
995 struct talitos_edesc *edesc,
996 struct scatterlist *src,
997 struct scatterlist *dst,
998 unsigned int len, unsigned int offset)
1000 struct talitos_private *priv = dev_get_drvdata(dev);
1001 bool is_sec1 = has_ftr_sec1(priv);
1002 unsigned int src_nents = edesc->src_nents ? : 1;
1003 unsigned int dst_nents = edesc->dst_nents ? : 1;
1005 if (is_sec1 && dst && dst_nents > 1) {
1006 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
1007 len, DMA_FROM_DEVICE);
1008 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
1012 if (src_nents == 1 || !is_sec1)
1013 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
1015 if (dst && (dst_nents == 1 || !is_sec1))
1016 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
1017 } else if (src_nents == 1 || !is_sec1) {
1018 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
1022 static void ipsec_esp_unmap(struct device *dev,
1023 struct talitos_edesc *edesc,
1024 struct aead_request *areq)
1026 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1027 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1028 unsigned int ivsize = crypto_aead_ivsize(aead);
1029 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1030 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1033 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1035 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1037 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
1041 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1044 if (!is_ipsec_esp) {
1045 unsigned int dst_nents = edesc->dst_nents ? : 1;
1047 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1048 areq->assoclen + areq->cryptlen - ivsize);
1053 * ipsec_esp descriptor callbacks
1055 static void ipsec_esp_encrypt_done(struct device *dev,
1056 struct talitos_desc *desc, void *context,
1059 struct talitos_private *priv = dev_get_drvdata(dev);
1060 bool is_sec1 = has_ftr_sec1(priv);
1061 struct aead_request *areq = context;
1062 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1063 unsigned int authsize = crypto_aead_authsize(authenc);
1064 unsigned int ivsize = crypto_aead_ivsize(authenc);
1065 struct talitos_edesc *edesc;
1066 struct scatterlist *sg;
1069 edesc = container_of(desc, struct talitos_edesc, desc);
1071 ipsec_esp_unmap(dev, edesc, areq);
1073 /* copy the generated ICV to dst */
1074 if (edesc->icv_ool) {
1076 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1078 icvdata = &edesc->link_tbl[edesc->src_nents +
1079 edesc->dst_nents + 2];
1080 sg = sg_last(areq->dst, edesc->dst_nents);
1081 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1085 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1089 aead_request_complete(areq, err);
1092 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1093 struct talitos_desc *desc,
1094 void *context, int err)
1096 struct aead_request *req = context;
1097 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1098 unsigned int authsize = crypto_aead_authsize(authenc);
1099 struct talitos_edesc *edesc;
1100 struct scatterlist *sg;
1102 struct talitos_private *priv = dev_get_drvdata(dev);
1103 bool is_sec1 = has_ftr_sec1(priv);
1105 edesc = container_of(desc, struct talitos_edesc, desc);
1107 ipsec_esp_unmap(dev, edesc, req);
1111 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1112 icv = (char *)sg_virt(sg) + sg->length - authsize;
1114 if (edesc->dma_len) {
1116 oicv = (char *)&edesc->dma_link_tbl +
1117 req->assoclen + req->cryptlen;
1120 &edesc->link_tbl[edesc->src_nents +
1121 edesc->dst_nents + 2];
1123 icv = oicv + authsize;
1125 oicv = (char *)&edesc->link_tbl[0];
1127 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1132 aead_request_complete(req, err);
1135 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1136 struct talitos_desc *desc,
1137 void *context, int err)
1139 struct aead_request *req = context;
1140 struct talitos_edesc *edesc;
1142 edesc = container_of(desc, struct talitos_edesc, desc);
1144 ipsec_esp_unmap(dev, edesc, req);
1146 /* check ICV auth status */
1147 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1148 DESC_HDR_LO_ICCR1_PASS))
1153 aead_request_complete(req, err);
1157 * convert scatterlist to SEC h/w link table format
1158 * stop at cryptlen bytes
1160 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1161 unsigned int offset, int cryptlen,
1162 struct talitos_ptr *link_tbl_ptr)
1164 int n_sg = sg_count;
1167 while (cryptlen && sg && n_sg--) {
1168 unsigned int len = sg_dma_len(sg);
1170 if (offset >= len) {
1180 to_talitos_ptr(link_tbl_ptr + count,
1181 sg_dma_address(sg) + offset, len, 0);
1182 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1191 /* tag end of link table */
1193 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1194 DESC_PTR_LNKTBL_RETURN, 0);
1199 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1200 unsigned int len, struct talitos_edesc *edesc,
1201 struct talitos_ptr *ptr, int sg_count,
1202 unsigned int offset, int tbl_off, int elen)
1204 struct talitos_private *priv = dev_get_drvdata(dev);
1205 bool is_sec1 = has_ftr_sec1(priv);
1208 to_talitos_ptr(ptr, 0, 0, is_sec1);
1211 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1212 if (sg_count == 1) {
1213 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1217 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1220 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1221 &edesc->link_tbl[tbl_off]);
1222 if (sg_count == 1) {
1223 /* Only one segment now, so no link tbl needed*/
1224 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1227 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1228 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1229 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1234 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1235 unsigned int len, struct talitos_edesc *edesc,
1236 struct talitos_ptr *ptr, int sg_count,
1237 unsigned int offset, int tbl_off)
1239 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1244 * fill in and submit ipsec_esp descriptor
1246 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1247 void (*callback)(struct device *dev,
1248 struct talitos_desc *desc,
1249 void *context, int error))
1251 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1252 unsigned int authsize = crypto_aead_authsize(aead);
1253 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1254 struct device *dev = ctx->dev;
1255 struct talitos_desc *desc = &edesc->desc;
1256 unsigned int cryptlen = areq->cryptlen;
1257 unsigned int ivsize = crypto_aead_ivsize(aead);
1261 bool sync_needed = false;
1262 struct talitos_private *priv = dev_get_drvdata(dev);
1263 bool is_sec1 = has_ftr_sec1(priv);
1264 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1265 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1266 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1269 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1271 sg_count = edesc->src_nents ?: 1;
1272 if (is_sec1 && sg_count > 1)
1273 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1274 areq->assoclen + cryptlen);
1276 sg_count = dma_map_sg(dev, areq->src, sg_count,
1277 (areq->src == areq->dst) ?
1278 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1281 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1282 &desc->ptr[1], sg_count, 0, tbl_off);
1290 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1293 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1294 ctx->enckeylen, is_sec1);
1298 * map and adjust cipher len to aead request cryptlen.
1299 * extent is bytes of HMAC postpended to ciphertext,
1300 * typically 12 for ipsec
1302 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1305 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1306 sg_count, areq->assoclen, tbl_off, elen);
1314 if (areq->src != areq->dst) {
1315 sg_count = edesc->dst_nents ? : 1;
1316 if (!is_sec1 || sg_count == 1)
1317 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1320 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1321 sg_count, areq->assoclen, tbl_off);
1324 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1329 edesc->icv_ool = true;
1333 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1334 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1335 sizeof(struct talitos_ptr) + authsize;
1337 /* Add an entry to the link table for ICV data */
1338 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1339 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1342 /* icv data follows link tables */
1343 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1346 dma_addr_t addr = edesc->dma_link_tbl;
1349 addr += areq->assoclen + cryptlen;
1351 addr += sizeof(struct talitos_ptr) * tbl_off;
1353 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1355 } else if (!is_ipsec_esp) {
1356 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1357 &desc->ptr[6], sg_count, areq->assoclen +
1362 edesc->icv_ool = true;
1365 edesc->icv_ool = false;
1368 edesc->icv_ool = false;
1373 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1377 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1381 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1382 if (ret != -EINPROGRESS) {
1383 ipsec_esp_unmap(dev, edesc, areq);
1390 * allocate and map the extended descriptor
1392 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1393 struct scatterlist *src,
1394 struct scatterlist *dst,
1396 unsigned int assoclen,
1397 unsigned int cryptlen,
1398 unsigned int authsize,
1399 unsigned int ivsize,
1404 struct talitos_edesc *edesc;
1405 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1406 dma_addr_t iv_dma = 0;
1407 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1409 struct talitos_private *priv = dev_get_drvdata(dev);
1410 bool is_sec1 = has_ftr_sec1(priv);
1411 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1413 if (cryptlen + authsize > max_len) {
1414 dev_err(dev, "length exceeds h/w max limit\n");
1415 return ERR_PTR(-EINVAL);
1418 if (!dst || dst == src) {
1419 src_len = assoclen + cryptlen + authsize;
1420 src_nents = sg_nents_for_len(src, src_len);
1421 if (src_nents < 0) {
1422 dev_err(dev, "Invalid number of src SG.\n");
1423 return ERR_PTR(-EINVAL);
1425 src_nents = (src_nents == 1) ? 0 : src_nents;
1426 dst_nents = dst ? src_nents : 0;
1428 } else { /* dst && dst != src*/
1429 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1430 src_nents = sg_nents_for_len(src, src_len);
1431 if (src_nents < 0) {
1432 dev_err(dev, "Invalid number of src SG.\n");
1433 return ERR_PTR(-EINVAL);
1435 src_nents = (src_nents == 1) ? 0 : src_nents;
1436 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1437 dst_nents = sg_nents_for_len(dst, dst_len);
1438 if (dst_nents < 0) {
1439 dev_err(dev, "Invalid number of dst SG.\n");
1440 return ERR_PTR(-EINVAL);
1442 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1446 * allocate space for base edesc plus the link tables,
1447 * allowing for two separate entries for AD and generated ICV (+ 2),
1448 * and space for two sets of ICVs (stashed and generated)
1450 alloc_len = sizeof(struct talitos_edesc);
1451 if (src_nents || dst_nents) {
1453 dma_len = (src_nents ? src_len : 0) +
1454 (dst_nents ? dst_len : 0);
1456 dma_len = (src_nents + dst_nents + 2) *
1457 sizeof(struct talitos_ptr) + authsize * 2;
1458 alloc_len += dma_len;
1461 alloc_len += icv_stashing ? authsize : 0;
1464 /* if its a ahash, add space for a second desc next to the first one */
1465 if (is_sec1 && !dst)
1466 alloc_len += sizeof(struct talitos_desc);
1467 alloc_len += ivsize;
1469 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1471 return ERR_PTR(-ENOMEM);
1473 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1474 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1476 memset(&edesc->desc, 0, sizeof(edesc->desc));
1478 edesc->src_nents = src_nents;
1479 edesc->dst_nents = dst_nents;
1480 edesc->iv_dma = iv_dma;
1481 edesc->dma_len = dma_len;
1483 void *addr = &edesc->link_tbl[0];
1485 if (is_sec1 && !dst)
1486 addr += sizeof(struct talitos_desc);
1487 edesc->dma_link_tbl = dma_map_single(dev, addr,
1494 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1495 int icv_stashing, bool encrypt)
1497 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1498 unsigned int authsize = crypto_aead_authsize(authenc);
1499 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1500 unsigned int ivsize = crypto_aead_ivsize(authenc);
1502 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1503 iv, areq->assoclen, areq->cryptlen,
1504 authsize, ivsize, icv_stashing,
1505 areq->base.flags, encrypt);
1508 static int aead_encrypt(struct aead_request *req)
1510 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1511 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1512 struct talitos_edesc *edesc;
1514 /* allocate extended descriptor */
1515 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1517 return PTR_ERR(edesc);
1520 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1522 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1525 static int aead_decrypt(struct aead_request *req)
1527 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1528 unsigned int authsize = crypto_aead_authsize(authenc);
1529 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1530 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1531 struct talitos_edesc *edesc;
1532 struct scatterlist *sg;
1535 req->cryptlen -= authsize;
1537 /* allocate extended descriptor */
1538 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1540 return PTR_ERR(edesc);
1542 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1543 ((!edesc->src_nents && !edesc->dst_nents) ||
1544 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1546 /* decrypt and check the ICV */
1547 edesc->desc.hdr = ctx->desc_hdr_template |
1548 DESC_HDR_DIR_INBOUND |
1549 DESC_HDR_MODE1_MDEU_CICV;
1551 /* reset integrity check result bits */
1553 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1556 /* Have to check the ICV with software */
1557 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1559 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1561 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1562 edesc->dst_nents + 2];
1564 icvdata = &edesc->link_tbl[0];
1566 sg = sg_last(req->src, edesc->src_nents ? : 1);
1568 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1570 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1573 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1574 const u8 *key, unsigned int keylen)
1576 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1577 struct device *dev = ctx->dev;
1580 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1582 memcpy(&ctx->key, key, keylen);
1583 ctx->keylen = keylen;
1585 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1590 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1591 const u8 *key, unsigned int keylen)
1593 u32 tmp[DES_EXPKEY_WORDS];
1595 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1596 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1597 !des_ekey(tmp, key)) {
1598 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1602 return ablkcipher_setkey(cipher, key, keylen);
1605 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1606 const u8 *key, unsigned int keylen)
1611 flags = crypto_ablkcipher_get_flags(cipher);
1612 err = __des3_verify_key(&flags, key);
1613 if (unlikely(err)) {
1614 crypto_ablkcipher_set_flags(cipher, flags);
1618 return ablkcipher_setkey(cipher, key, keylen);
1621 static void common_nonsnoop_unmap(struct device *dev,
1622 struct talitos_edesc *edesc,
1623 struct ablkcipher_request *areq)
1625 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1627 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1628 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1631 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1635 static void ablkcipher_done(struct device *dev,
1636 struct talitos_desc *desc, void *context,
1639 struct ablkcipher_request *areq = context;
1640 struct talitos_edesc *edesc;
1642 edesc = container_of(desc, struct talitos_edesc, desc);
1644 common_nonsnoop_unmap(dev, edesc, areq);
1648 areq->base.complete(&areq->base, err);
1651 static int common_nonsnoop(struct talitos_edesc *edesc,
1652 struct ablkcipher_request *areq,
1653 void (*callback) (struct device *dev,
1654 struct talitos_desc *desc,
1655 void *context, int error))
1657 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1658 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1659 struct device *dev = ctx->dev;
1660 struct talitos_desc *desc = &edesc->desc;
1661 unsigned int cryptlen = areq->nbytes;
1662 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1664 bool sync_needed = false;
1665 struct talitos_private *priv = dev_get_drvdata(dev);
1666 bool is_sec1 = has_ftr_sec1(priv);
1668 /* first DWORD empty */
1671 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1674 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1676 sg_count = edesc->src_nents ?: 1;
1677 if (is_sec1 && sg_count > 1)
1678 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1681 sg_count = dma_map_sg(dev, areq->src, sg_count,
1682 (areq->src == areq->dst) ?
1683 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1687 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1688 &desc->ptr[3], sg_count, 0, 0);
1693 if (areq->src != areq->dst) {
1694 sg_count = edesc->dst_nents ? : 1;
1695 if (!is_sec1 || sg_count == 1)
1696 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1699 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1700 sg_count, 0, (edesc->src_nents + 1));
1705 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1708 /* last DWORD empty */
1711 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1712 edesc->dma_len, DMA_BIDIRECTIONAL);
1714 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1715 if (ret != -EINPROGRESS) {
1716 common_nonsnoop_unmap(dev, edesc, areq);
1722 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1725 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1726 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1727 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1729 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1730 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1731 areq->base.flags, encrypt);
1734 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1736 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1737 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1738 struct talitos_edesc *edesc;
1740 /* allocate extended descriptor */
1741 edesc = ablkcipher_edesc_alloc(areq, true);
1743 return PTR_ERR(edesc);
1746 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1748 return common_nonsnoop(edesc, areq, ablkcipher_done);
1751 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1753 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1754 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1755 struct talitos_edesc *edesc;
1757 /* allocate extended descriptor */
1758 edesc = ablkcipher_edesc_alloc(areq, false);
1760 return PTR_ERR(edesc);
1762 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1764 return common_nonsnoop(edesc, areq, ablkcipher_done);
1767 static void common_nonsnoop_hash_unmap(struct device *dev,
1768 struct talitos_edesc *edesc,
1769 struct ahash_request *areq)
1771 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1772 struct talitos_private *priv = dev_get_drvdata(dev);
1773 bool is_sec1 = has_ftr_sec1(priv);
1774 struct talitos_desc *desc = &edesc->desc;
1775 struct talitos_desc *desc2 = desc + 1;
1777 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1778 if (desc->next_desc &&
1779 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1780 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1782 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1784 /* When using hashctx-in, must unmap it. */
1785 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1786 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1788 else if (desc->next_desc)
1789 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1792 if (is_sec1 && req_ctx->nbuf)
1793 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1797 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1800 if (edesc->desc.next_desc)
1801 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1802 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1805 static void ahash_done(struct device *dev,
1806 struct talitos_desc *desc, void *context,
1809 struct ahash_request *areq = context;
1810 struct talitos_edesc *edesc =
1811 container_of(desc, struct talitos_edesc, desc);
1812 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1814 if (!req_ctx->last && req_ctx->to_hash_later) {
1815 /* Position any partial block for next update/final/finup */
1816 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1817 req_ctx->nbuf = req_ctx->to_hash_later;
1819 common_nonsnoop_hash_unmap(dev, edesc, areq);
1823 areq->base.complete(&areq->base, err);
1827 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1828 * ourself and submit a padded block
1830 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1831 struct talitos_edesc *edesc,
1832 struct talitos_ptr *ptr)
1834 static u8 padded_hash[64] = {
1835 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1836 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1837 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1838 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1841 pr_err_once("Bug in SEC1, padding ourself\n");
1842 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1843 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1844 (char *)padded_hash, DMA_TO_DEVICE);
1847 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1848 struct ahash_request *areq, unsigned int length,
1849 unsigned int offset,
1850 void (*callback) (struct device *dev,
1851 struct talitos_desc *desc,
1852 void *context, int error))
1854 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1855 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1856 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1857 struct device *dev = ctx->dev;
1858 struct talitos_desc *desc = &edesc->desc;
1860 bool sync_needed = false;
1861 struct talitos_private *priv = dev_get_drvdata(dev);
1862 bool is_sec1 = has_ftr_sec1(priv);
1865 /* first DWORD empty */
1867 /* hash context in */
1868 if (!req_ctx->first || req_ctx->swinit) {
1869 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1870 req_ctx->hw_context_size,
1871 req_ctx->hw_context,
1873 req_ctx->swinit = 0;
1875 /* Indicate next op is not the first. */
1880 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1883 if (is_sec1 && req_ctx->nbuf)
1884 length -= req_ctx->nbuf;
1886 sg_count = edesc->src_nents ?: 1;
1887 if (is_sec1 && sg_count > 1)
1888 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1889 edesc->buf + sizeof(struct talitos_desc),
1890 length, req_ctx->nbuf);
1892 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1897 if (is_sec1 && req_ctx->nbuf) {
1898 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1899 req_ctx->buf[req_ctx->buf_idx],
1902 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1903 &desc->ptr[3], sg_count, offset, 0);
1908 /* fifth DWORD empty */
1910 /* hash/HMAC out -or- hash context out */
1912 map_single_talitos_ptr(dev, &desc->ptr[5],
1913 crypto_ahash_digestsize(tfm),
1914 areq->result, DMA_FROM_DEVICE);
1916 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1917 req_ctx->hw_context_size,
1918 req_ctx->hw_context,
1921 /* last DWORD empty */
1923 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1924 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1926 if (is_sec1 && req_ctx->nbuf && length) {
1927 struct talitos_desc *desc2 = desc + 1;
1928 dma_addr_t next_desc;
1930 memset(desc2, 0, sizeof(*desc2));
1931 desc2->hdr = desc->hdr;
1932 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1933 desc2->hdr1 = desc2->hdr;
1934 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1935 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1936 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1938 if (desc->ptr[1].ptr)
1939 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1942 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1943 req_ctx->hw_context_size,
1944 req_ctx->hw_context,
1946 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1947 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1948 &desc2->ptr[3], sg_count, offset, 0);
1951 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1953 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1954 req_ctx->hw_context_size,
1955 req_ctx->hw_context,
1958 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1960 desc->next_desc = cpu_to_be32(next_desc);
1964 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1965 edesc->dma_len, DMA_BIDIRECTIONAL);
1967 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1968 if (ret != -EINPROGRESS) {
1969 common_nonsnoop_hash_unmap(dev, edesc, areq);
1975 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1976 unsigned int nbytes)
1978 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1979 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1980 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1981 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1982 bool is_sec1 = has_ftr_sec1(priv);
1985 nbytes -= req_ctx->nbuf;
1987 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1988 nbytes, 0, 0, 0, areq->base.flags, false);
1991 static int ahash_init(struct ahash_request *areq)
1993 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1994 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1995 struct device *dev = ctx->dev;
1996 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2000 /* Initialize the context */
2001 req_ctx->buf_idx = 0;
2003 req_ctx->first = 1; /* first indicates h/w must init its context */
2004 req_ctx->swinit = 0; /* assume h/w init of context */
2005 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2006 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2007 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2008 req_ctx->hw_context_size = size;
2010 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2012 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2018 * on h/w without explicit sha224 support, we initialize h/w context
2019 * manually with sha224 constants, and tell it to run sha256.
2021 static int ahash_init_sha224_swinit(struct ahash_request *areq)
2023 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2025 req_ctx->hw_context[0] = SHA224_H0;
2026 req_ctx->hw_context[1] = SHA224_H1;
2027 req_ctx->hw_context[2] = SHA224_H2;
2028 req_ctx->hw_context[3] = SHA224_H3;
2029 req_ctx->hw_context[4] = SHA224_H4;
2030 req_ctx->hw_context[5] = SHA224_H5;
2031 req_ctx->hw_context[6] = SHA224_H6;
2032 req_ctx->hw_context[7] = SHA224_H7;
2034 /* init 64-bit count */
2035 req_ctx->hw_context[8] = 0;
2036 req_ctx->hw_context[9] = 0;
2039 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
2044 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2046 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2047 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2048 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2049 struct talitos_edesc *edesc;
2050 unsigned int blocksize =
2051 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2052 unsigned int nbytes_to_hash;
2053 unsigned int to_hash_later;
2056 struct device *dev = ctx->dev;
2057 struct talitos_private *priv = dev_get_drvdata(dev);
2058 bool is_sec1 = has_ftr_sec1(priv);
2060 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2062 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2063 /* Buffer up to one whole block */
2064 nents = sg_nents_for_len(areq->src, nbytes);
2066 dev_err(ctx->dev, "Invalid number of src SG.\n");
2069 sg_copy_to_buffer(areq->src, nents,
2070 ctx_buf + req_ctx->nbuf, nbytes);
2071 req_ctx->nbuf += nbytes;
2075 /* At least (blocksize + 1) bytes are available to hash */
2076 nbytes_to_hash = nbytes + req_ctx->nbuf;
2077 to_hash_later = nbytes_to_hash & (blocksize - 1);
2081 else if (to_hash_later)
2082 /* There is a partial block. Hash the full block(s) now */
2083 nbytes_to_hash -= to_hash_later;
2085 /* Keep one block buffered */
2086 nbytes_to_hash -= blocksize;
2087 to_hash_later = blocksize;
2090 /* Chain in any previously buffered data */
2091 if (!is_sec1 && req_ctx->nbuf) {
2092 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2093 sg_init_table(req_ctx->bufsl, nsg);
2094 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2096 sg_chain(req_ctx->bufsl, 2, areq->src);
2097 req_ctx->psrc = req_ctx->bufsl;
2098 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2099 if (nbytes_to_hash > blocksize)
2100 offset = blocksize - req_ctx->nbuf;
2102 offset = nbytes_to_hash - req_ctx->nbuf;
2103 nents = sg_nents_for_len(areq->src, offset);
2105 dev_err(ctx->dev, "Invalid number of src SG.\n");
2108 sg_copy_to_buffer(areq->src, nents,
2109 ctx_buf + req_ctx->nbuf, offset);
2110 req_ctx->nbuf += offset;
2111 req_ctx->psrc = areq->src;
2113 req_ctx->psrc = areq->src;
2115 if (to_hash_later) {
2116 nents = sg_nents_for_len(areq->src, nbytes);
2118 dev_err(ctx->dev, "Invalid number of src SG.\n");
2121 sg_pcopy_to_buffer(areq->src, nents,
2122 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2124 nbytes - to_hash_later);
2126 req_ctx->to_hash_later = to_hash_later;
2128 /* Allocate extended descriptor */
2129 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2131 return PTR_ERR(edesc);
2133 edesc->desc.hdr = ctx->desc_hdr_template;
2135 /* On last one, request SEC to pad; otherwise continue */
2137 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2139 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2141 /* request SEC to INIT hash. */
2142 if (req_ctx->first && !req_ctx->swinit)
2143 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2145 /* When the tfm context has a keylen, it's an HMAC.
2146 * A first or last (ie. not middle) descriptor must request HMAC.
2148 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2149 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2151 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2155 static int ahash_update(struct ahash_request *areq)
2157 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2161 return ahash_process_req(areq, areq->nbytes);
2164 static int ahash_final(struct ahash_request *areq)
2166 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2170 return ahash_process_req(areq, 0);
2173 static int ahash_finup(struct ahash_request *areq)
2175 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2179 return ahash_process_req(areq, areq->nbytes);
2182 static int ahash_digest(struct ahash_request *areq)
2184 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2185 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2190 return ahash_process_req(areq, areq->nbytes);
2193 static int ahash_export(struct ahash_request *areq, void *out)
2195 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2196 struct talitos_export_state *export = out;
2197 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2198 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2199 struct device *dev = ctx->dev;
2202 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2204 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2206 memcpy(export->hw_context, req_ctx->hw_context,
2207 req_ctx->hw_context_size);
2208 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2209 export->swinit = req_ctx->swinit;
2210 export->first = req_ctx->first;
2211 export->last = req_ctx->last;
2212 export->to_hash_later = req_ctx->to_hash_later;
2213 export->nbuf = req_ctx->nbuf;
2218 static int ahash_import(struct ahash_request *areq, const void *in)
2220 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2221 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2222 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2223 struct device *dev = ctx->dev;
2224 const struct talitos_export_state *export = in;
2228 memset(req_ctx, 0, sizeof(*req_ctx));
2229 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2230 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2231 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2232 req_ctx->hw_context_size = size;
2233 memcpy(req_ctx->hw_context, export->hw_context, size);
2234 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2235 req_ctx->swinit = export->swinit;
2236 req_ctx->first = export->first;
2237 req_ctx->last = export->last;
2238 req_ctx->to_hash_later = export->to_hash_later;
2239 req_ctx->nbuf = export->nbuf;
2241 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2243 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2248 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2251 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2253 struct scatterlist sg[1];
2254 struct ahash_request *req;
2255 struct crypto_wait wait;
2258 crypto_init_wait(&wait);
2260 req = ahash_request_alloc(tfm, GFP_KERNEL);
2264 /* Keep tfm keylen == 0 during hash of the long key */
2266 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2267 crypto_req_done, &wait);
2269 sg_init_one(&sg[0], key, keylen);
2271 ahash_request_set_crypt(req, sg, hash, keylen);
2272 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2274 ahash_request_free(req);
2279 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2280 unsigned int keylen)
2282 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2283 struct device *dev = ctx->dev;
2284 unsigned int blocksize =
2285 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2286 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2287 unsigned int keysize = keylen;
2288 u8 hash[SHA512_DIGEST_SIZE];
2291 if (keylen <= blocksize)
2292 memcpy(ctx->key, key, keysize);
2294 /* Must get the hash of the long key */
2295 ret = keyhash(tfm, key, keylen, hash);
2298 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2302 keysize = digestsize;
2303 memcpy(ctx->key, hash, digestsize);
2307 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2309 ctx->keylen = keysize;
2310 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2316 struct talitos_alg_template {
2320 struct crypto_alg crypto;
2321 struct ahash_alg hash;
2322 struct aead_alg aead;
2324 __be32 desc_hdr_template;
2327 static struct talitos_alg_template driver_algs[] = {
2328 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2329 { .type = CRYPTO_ALG_TYPE_AEAD,
2332 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2333 .cra_driver_name = "authenc-hmac-sha1-"
2335 .cra_blocksize = AES_BLOCK_SIZE,
2336 .cra_flags = CRYPTO_ALG_ASYNC,
2338 .ivsize = AES_BLOCK_SIZE,
2339 .maxauthsize = SHA1_DIGEST_SIZE,
2341 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2342 DESC_HDR_SEL0_AESU |
2343 DESC_HDR_MODE0_AESU_CBC |
2344 DESC_HDR_SEL1_MDEUA |
2345 DESC_HDR_MODE1_MDEU_INIT |
2346 DESC_HDR_MODE1_MDEU_PAD |
2347 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2349 { .type = CRYPTO_ALG_TYPE_AEAD,
2350 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2353 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2354 .cra_driver_name = "authenc-hmac-sha1-"
2356 .cra_blocksize = AES_BLOCK_SIZE,
2357 .cra_flags = CRYPTO_ALG_ASYNC,
2359 .ivsize = AES_BLOCK_SIZE,
2360 .maxauthsize = SHA1_DIGEST_SIZE,
2362 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2363 DESC_HDR_SEL0_AESU |
2364 DESC_HDR_MODE0_AESU_CBC |
2365 DESC_HDR_SEL1_MDEUA |
2366 DESC_HDR_MODE1_MDEU_INIT |
2367 DESC_HDR_MODE1_MDEU_PAD |
2368 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2370 { .type = CRYPTO_ALG_TYPE_AEAD,
2373 .cra_name = "authenc(hmac(sha1),"
2375 .cra_driver_name = "authenc-hmac-sha1-"
2377 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2378 .cra_flags = CRYPTO_ALG_ASYNC,
2380 .ivsize = DES3_EDE_BLOCK_SIZE,
2381 .maxauthsize = SHA1_DIGEST_SIZE,
2382 .setkey = aead_des3_setkey,
2384 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2386 DESC_HDR_MODE0_DEU_CBC |
2387 DESC_HDR_MODE0_DEU_3DES |
2388 DESC_HDR_SEL1_MDEUA |
2389 DESC_HDR_MODE1_MDEU_INIT |
2390 DESC_HDR_MODE1_MDEU_PAD |
2391 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2393 { .type = CRYPTO_ALG_TYPE_AEAD,
2394 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2397 .cra_name = "authenc(hmac(sha1),"
2399 .cra_driver_name = "authenc-hmac-sha1-"
2401 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2402 .cra_flags = CRYPTO_ALG_ASYNC,
2404 .ivsize = DES3_EDE_BLOCK_SIZE,
2405 .maxauthsize = SHA1_DIGEST_SIZE,
2406 .setkey = aead_des3_setkey,
2408 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2410 DESC_HDR_MODE0_DEU_CBC |
2411 DESC_HDR_MODE0_DEU_3DES |
2412 DESC_HDR_SEL1_MDEUA |
2413 DESC_HDR_MODE1_MDEU_INIT |
2414 DESC_HDR_MODE1_MDEU_PAD |
2415 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2417 { .type = CRYPTO_ALG_TYPE_AEAD,
2420 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2421 .cra_driver_name = "authenc-hmac-sha224-"
2423 .cra_blocksize = AES_BLOCK_SIZE,
2424 .cra_flags = CRYPTO_ALG_ASYNC,
2426 .ivsize = AES_BLOCK_SIZE,
2427 .maxauthsize = SHA224_DIGEST_SIZE,
2429 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2430 DESC_HDR_SEL0_AESU |
2431 DESC_HDR_MODE0_AESU_CBC |
2432 DESC_HDR_SEL1_MDEUA |
2433 DESC_HDR_MODE1_MDEU_INIT |
2434 DESC_HDR_MODE1_MDEU_PAD |
2435 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2437 { .type = CRYPTO_ALG_TYPE_AEAD,
2438 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2441 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2442 .cra_driver_name = "authenc-hmac-sha224-"
2444 .cra_blocksize = AES_BLOCK_SIZE,
2445 .cra_flags = CRYPTO_ALG_ASYNC,
2447 .ivsize = AES_BLOCK_SIZE,
2448 .maxauthsize = SHA224_DIGEST_SIZE,
2450 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2451 DESC_HDR_SEL0_AESU |
2452 DESC_HDR_MODE0_AESU_CBC |
2453 DESC_HDR_SEL1_MDEUA |
2454 DESC_HDR_MODE1_MDEU_INIT |
2455 DESC_HDR_MODE1_MDEU_PAD |
2456 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2458 { .type = CRYPTO_ALG_TYPE_AEAD,
2461 .cra_name = "authenc(hmac(sha224),"
2463 .cra_driver_name = "authenc-hmac-sha224-"
2465 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2466 .cra_flags = CRYPTO_ALG_ASYNC,
2468 .ivsize = DES3_EDE_BLOCK_SIZE,
2469 .maxauthsize = SHA224_DIGEST_SIZE,
2470 .setkey = aead_des3_setkey,
2472 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2474 DESC_HDR_MODE0_DEU_CBC |
2475 DESC_HDR_MODE0_DEU_3DES |
2476 DESC_HDR_SEL1_MDEUA |
2477 DESC_HDR_MODE1_MDEU_INIT |
2478 DESC_HDR_MODE1_MDEU_PAD |
2479 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2481 { .type = CRYPTO_ALG_TYPE_AEAD,
2482 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2485 .cra_name = "authenc(hmac(sha224),"
2487 .cra_driver_name = "authenc-hmac-sha224-"
2489 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2490 .cra_flags = CRYPTO_ALG_ASYNC,
2492 .ivsize = DES3_EDE_BLOCK_SIZE,
2493 .maxauthsize = SHA224_DIGEST_SIZE,
2494 .setkey = aead_des3_setkey,
2496 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2498 DESC_HDR_MODE0_DEU_CBC |
2499 DESC_HDR_MODE0_DEU_3DES |
2500 DESC_HDR_SEL1_MDEUA |
2501 DESC_HDR_MODE1_MDEU_INIT |
2502 DESC_HDR_MODE1_MDEU_PAD |
2503 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2505 { .type = CRYPTO_ALG_TYPE_AEAD,
2508 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2509 .cra_driver_name = "authenc-hmac-sha256-"
2511 .cra_blocksize = AES_BLOCK_SIZE,
2512 .cra_flags = CRYPTO_ALG_ASYNC,
2514 .ivsize = AES_BLOCK_SIZE,
2515 .maxauthsize = SHA256_DIGEST_SIZE,
2517 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2518 DESC_HDR_SEL0_AESU |
2519 DESC_HDR_MODE0_AESU_CBC |
2520 DESC_HDR_SEL1_MDEUA |
2521 DESC_HDR_MODE1_MDEU_INIT |
2522 DESC_HDR_MODE1_MDEU_PAD |
2523 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2525 { .type = CRYPTO_ALG_TYPE_AEAD,
2526 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2529 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2530 .cra_driver_name = "authenc-hmac-sha256-"
2532 .cra_blocksize = AES_BLOCK_SIZE,
2533 .cra_flags = CRYPTO_ALG_ASYNC,
2535 .ivsize = AES_BLOCK_SIZE,
2536 .maxauthsize = SHA256_DIGEST_SIZE,
2538 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2539 DESC_HDR_SEL0_AESU |
2540 DESC_HDR_MODE0_AESU_CBC |
2541 DESC_HDR_SEL1_MDEUA |
2542 DESC_HDR_MODE1_MDEU_INIT |
2543 DESC_HDR_MODE1_MDEU_PAD |
2544 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2546 { .type = CRYPTO_ALG_TYPE_AEAD,
2549 .cra_name = "authenc(hmac(sha256),"
2551 .cra_driver_name = "authenc-hmac-sha256-"
2553 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2554 .cra_flags = CRYPTO_ALG_ASYNC,
2556 .ivsize = DES3_EDE_BLOCK_SIZE,
2557 .maxauthsize = SHA256_DIGEST_SIZE,
2558 .setkey = aead_des3_setkey,
2560 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2562 DESC_HDR_MODE0_DEU_CBC |
2563 DESC_HDR_MODE0_DEU_3DES |
2564 DESC_HDR_SEL1_MDEUA |
2565 DESC_HDR_MODE1_MDEU_INIT |
2566 DESC_HDR_MODE1_MDEU_PAD |
2567 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2569 { .type = CRYPTO_ALG_TYPE_AEAD,
2570 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2573 .cra_name = "authenc(hmac(sha256),"
2575 .cra_driver_name = "authenc-hmac-sha256-"
2577 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2578 .cra_flags = CRYPTO_ALG_ASYNC,
2580 .ivsize = DES3_EDE_BLOCK_SIZE,
2581 .maxauthsize = SHA256_DIGEST_SIZE,
2582 .setkey = aead_des3_setkey,
2584 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2586 DESC_HDR_MODE0_DEU_CBC |
2587 DESC_HDR_MODE0_DEU_3DES |
2588 DESC_HDR_SEL1_MDEUA |
2589 DESC_HDR_MODE1_MDEU_INIT |
2590 DESC_HDR_MODE1_MDEU_PAD |
2591 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2593 { .type = CRYPTO_ALG_TYPE_AEAD,
2596 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2597 .cra_driver_name = "authenc-hmac-sha384-"
2599 .cra_blocksize = AES_BLOCK_SIZE,
2600 .cra_flags = CRYPTO_ALG_ASYNC,
2602 .ivsize = AES_BLOCK_SIZE,
2603 .maxauthsize = SHA384_DIGEST_SIZE,
2605 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2606 DESC_HDR_SEL0_AESU |
2607 DESC_HDR_MODE0_AESU_CBC |
2608 DESC_HDR_SEL1_MDEUB |
2609 DESC_HDR_MODE1_MDEU_INIT |
2610 DESC_HDR_MODE1_MDEU_PAD |
2611 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2613 { .type = CRYPTO_ALG_TYPE_AEAD,
2616 .cra_name = "authenc(hmac(sha384),"
2618 .cra_driver_name = "authenc-hmac-sha384-"
2620 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2621 .cra_flags = CRYPTO_ALG_ASYNC,
2623 .ivsize = DES3_EDE_BLOCK_SIZE,
2624 .maxauthsize = SHA384_DIGEST_SIZE,
2625 .setkey = aead_des3_setkey,
2627 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2629 DESC_HDR_MODE0_DEU_CBC |
2630 DESC_HDR_MODE0_DEU_3DES |
2631 DESC_HDR_SEL1_MDEUB |
2632 DESC_HDR_MODE1_MDEU_INIT |
2633 DESC_HDR_MODE1_MDEU_PAD |
2634 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2636 { .type = CRYPTO_ALG_TYPE_AEAD,
2639 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2640 .cra_driver_name = "authenc-hmac-sha512-"
2642 .cra_blocksize = AES_BLOCK_SIZE,
2643 .cra_flags = CRYPTO_ALG_ASYNC,
2645 .ivsize = AES_BLOCK_SIZE,
2646 .maxauthsize = SHA512_DIGEST_SIZE,
2648 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2649 DESC_HDR_SEL0_AESU |
2650 DESC_HDR_MODE0_AESU_CBC |
2651 DESC_HDR_SEL1_MDEUB |
2652 DESC_HDR_MODE1_MDEU_INIT |
2653 DESC_HDR_MODE1_MDEU_PAD |
2654 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2656 { .type = CRYPTO_ALG_TYPE_AEAD,
2659 .cra_name = "authenc(hmac(sha512),"
2661 .cra_driver_name = "authenc-hmac-sha512-"
2663 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2664 .cra_flags = CRYPTO_ALG_ASYNC,
2666 .ivsize = DES3_EDE_BLOCK_SIZE,
2667 .maxauthsize = SHA512_DIGEST_SIZE,
2668 .setkey = aead_des3_setkey,
2670 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2672 DESC_HDR_MODE0_DEU_CBC |
2673 DESC_HDR_MODE0_DEU_3DES |
2674 DESC_HDR_SEL1_MDEUB |
2675 DESC_HDR_MODE1_MDEU_INIT |
2676 DESC_HDR_MODE1_MDEU_PAD |
2677 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2679 { .type = CRYPTO_ALG_TYPE_AEAD,
2682 .cra_name = "authenc(hmac(md5),cbc(aes))",
2683 .cra_driver_name = "authenc-hmac-md5-"
2685 .cra_blocksize = AES_BLOCK_SIZE,
2686 .cra_flags = CRYPTO_ALG_ASYNC,
2688 .ivsize = AES_BLOCK_SIZE,
2689 .maxauthsize = MD5_DIGEST_SIZE,
2691 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2692 DESC_HDR_SEL0_AESU |
2693 DESC_HDR_MODE0_AESU_CBC |
2694 DESC_HDR_SEL1_MDEUA |
2695 DESC_HDR_MODE1_MDEU_INIT |
2696 DESC_HDR_MODE1_MDEU_PAD |
2697 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2699 { .type = CRYPTO_ALG_TYPE_AEAD,
2700 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2703 .cra_name = "authenc(hmac(md5),cbc(aes))",
2704 .cra_driver_name = "authenc-hmac-md5-"
2706 .cra_blocksize = AES_BLOCK_SIZE,
2707 .cra_flags = CRYPTO_ALG_ASYNC,
2709 .ivsize = AES_BLOCK_SIZE,
2710 .maxauthsize = MD5_DIGEST_SIZE,
2712 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2713 DESC_HDR_SEL0_AESU |
2714 DESC_HDR_MODE0_AESU_CBC |
2715 DESC_HDR_SEL1_MDEUA |
2716 DESC_HDR_MODE1_MDEU_INIT |
2717 DESC_HDR_MODE1_MDEU_PAD |
2718 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2720 { .type = CRYPTO_ALG_TYPE_AEAD,
2723 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2724 .cra_driver_name = "authenc-hmac-md5-"
2726 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2727 .cra_flags = CRYPTO_ALG_ASYNC,
2729 .ivsize = DES3_EDE_BLOCK_SIZE,
2730 .maxauthsize = MD5_DIGEST_SIZE,
2731 .setkey = aead_des3_setkey,
2733 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2735 DESC_HDR_MODE0_DEU_CBC |
2736 DESC_HDR_MODE0_DEU_3DES |
2737 DESC_HDR_SEL1_MDEUA |
2738 DESC_HDR_MODE1_MDEU_INIT |
2739 DESC_HDR_MODE1_MDEU_PAD |
2740 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2742 { .type = CRYPTO_ALG_TYPE_AEAD,
2743 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2746 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2747 .cra_driver_name = "authenc-hmac-md5-"
2749 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2750 .cra_flags = CRYPTO_ALG_ASYNC,
2752 .ivsize = DES3_EDE_BLOCK_SIZE,
2753 .maxauthsize = MD5_DIGEST_SIZE,
2754 .setkey = aead_des3_setkey,
2756 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2758 DESC_HDR_MODE0_DEU_CBC |
2759 DESC_HDR_MODE0_DEU_3DES |
2760 DESC_HDR_SEL1_MDEUA |
2761 DESC_HDR_MODE1_MDEU_INIT |
2762 DESC_HDR_MODE1_MDEU_PAD |
2763 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2765 /* ABLKCIPHER algorithms. */
2766 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2768 .cra_name = "ecb(aes)",
2769 .cra_driver_name = "ecb-aes-talitos",
2770 .cra_blocksize = AES_BLOCK_SIZE,
2771 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2774 .min_keysize = AES_MIN_KEY_SIZE,
2775 .max_keysize = AES_MAX_KEY_SIZE,
2776 .ivsize = AES_BLOCK_SIZE,
2779 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2782 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2784 .cra_name = "cbc(aes)",
2785 .cra_driver_name = "cbc-aes-talitos",
2786 .cra_blocksize = AES_BLOCK_SIZE,
2787 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2790 .min_keysize = AES_MIN_KEY_SIZE,
2791 .max_keysize = AES_MAX_KEY_SIZE,
2792 .ivsize = AES_BLOCK_SIZE,
2795 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2796 DESC_HDR_SEL0_AESU |
2797 DESC_HDR_MODE0_AESU_CBC,
2799 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2801 .cra_name = "ctr(aes)",
2802 .cra_driver_name = "ctr-aes-talitos",
2803 .cra_blocksize = AES_BLOCK_SIZE,
2804 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2807 .min_keysize = AES_MIN_KEY_SIZE,
2808 .max_keysize = AES_MAX_KEY_SIZE,
2809 .ivsize = AES_BLOCK_SIZE,
2812 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2813 DESC_HDR_SEL0_AESU |
2814 DESC_HDR_MODE0_AESU_CTR,
2816 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2818 .cra_name = "ecb(des)",
2819 .cra_driver_name = "ecb-des-talitos",
2820 .cra_blocksize = DES_BLOCK_SIZE,
2821 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2824 .min_keysize = DES_KEY_SIZE,
2825 .max_keysize = DES_KEY_SIZE,
2826 .ivsize = DES_BLOCK_SIZE,
2827 .setkey = ablkcipher_des_setkey,
2830 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2833 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2835 .cra_name = "cbc(des)",
2836 .cra_driver_name = "cbc-des-talitos",
2837 .cra_blocksize = DES_BLOCK_SIZE,
2838 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2841 .min_keysize = DES_KEY_SIZE,
2842 .max_keysize = DES_KEY_SIZE,
2843 .ivsize = DES_BLOCK_SIZE,
2844 .setkey = ablkcipher_des_setkey,
2847 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2849 DESC_HDR_MODE0_DEU_CBC,
2851 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2853 .cra_name = "ecb(des3_ede)",
2854 .cra_driver_name = "ecb-3des-talitos",
2855 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2856 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2859 .min_keysize = DES3_EDE_KEY_SIZE,
2860 .max_keysize = DES3_EDE_KEY_SIZE,
2861 .ivsize = DES3_EDE_BLOCK_SIZE,
2862 .setkey = ablkcipher_des3_setkey,
2865 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2867 DESC_HDR_MODE0_DEU_3DES,
2869 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2871 .cra_name = "cbc(des3_ede)",
2872 .cra_driver_name = "cbc-3des-talitos",
2873 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2874 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2877 .min_keysize = DES3_EDE_KEY_SIZE,
2878 .max_keysize = DES3_EDE_KEY_SIZE,
2879 .ivsize = DES3_EDE_BLOCK_SIZE,
2880 .setkey = ablkcipher_des3_setkey,
2883 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2885 DESC_HDR_MODE0_DEU_CBC |
2886 DESC_HDR_MODE0_DEU_3DES,
2888 /* AHASH algorithms. */
2889 { .type = CRYPTO_ALG_TYPE_AHASH,
2891 .halg.digestsize = MD5_DIGEST_SIZE,
2892 .halg.statesize = sizeof(struct talitos_export_state),
2895 .cra_driver_name = "md5-talitos",
2896 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2897 .cra_flags = CRYPTO_ALG_ASYNC,
2900 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2901 DESC_HDR_SEL0_MDEUA |
2902 DESC_HDR_MODE0_MDEU_MD5,
2904 { .type = CRYPTO_ALG_TYPE_AHASH,
2906 .halg.digestsize = SHA1_DIGEST_SIZE,
2907 .halg.statesize = sizeof(struct talitos_export_state),
2910 .cra_driver_name = "sha1-talitos",
2911 .cra_blocksize = SHA1_BLOCK_SIZE,
2912 .cra_flags = CRYPTO_ALG_ASYNC,
2915 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2916 DESC_HDR_SEL0_MDEUA |
2917 DESC_HDR_MODE0_MDEU_SHA1,
2919 { .type = CRYPTO_ALG_TYPE_AHASH,
2921 .halg.digestsize = SHA224_DIGEST_SIZE,
2922 .halg.statesize = sizeof(struct talitos_export_state),
2924 .cra_name = "sha224",
2925 .cra_driver_name = "sha224-talitos",
2926 .cra_blocksize = SHA224_BLOCK_SIZE,
2927 .cra_flags = CRYPTO_ALG_ASYNC,
2930 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2931 DESC_HDR_SEL0_MDEUA |
2932 DESC_HDR_MODE0_MDEU_SHA224,
2934 { .type = CRYPTO_ALG_TYPE_AHASH,
2936 .halg.digestsize = SHA256_DIGEST_SIZE,
2937 .halg.statesize = sizeof(struct talitos_export_state),
2939 .cra_name = "sha256",
2940 .cra_driver_name = "sha256-talitos",
2941 .cra_blocksize = SHA256_BLOCK_SIZE,
2942 .cra_flags = CRYPTO_ALG_ASYNC,
2945 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2946 DESC_HDR_SEL0_MDEUA |
2947 DESC_HDR_MODE0_MDEU_SHA256,
2949 { .type = CRYPTO_ALG_TYPE_AHASH,
2951 .halg.digestsize = SHA384_DIGEST_SIZE,
2952 .halg.statesize = sizeof(struct talitos_export_state),
2954 .cra_name = "sha384",
2955 .cra_driver_name = "sha384-talitos",
2956 .cra_blocksize = SHA384_BLOCK_SIZE,
2957 .cra_flags = CRYPTO_ALG_ASYNC,
2960 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2961 DESC_HDR_SEL0_MDEUB |
2962 DESC_HDR_MODE0_MDEUB_SHA384,
2964 { .type = CRYPTO_ALG_TYPE_AHASH,
2966 .halg.digestsize = SHA512_DIGEST_SIZE,
2967 .halg.statesize = sizeof(struct talitos_export_state),
2969 .cra_name = "sha512",
2970 .cra_driver_name = "sha512-talitos",
2971 .cra_blocksize = SHA512_BLOCK_SIZE,
2972 .cra_flags = CRYPTO_ALG_ASYNC,
2975 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2976 DESC_HDR_SEL0_MDEUB |
2977 DESC_HDR_MODE0_MDEUB_SHA512,
2979 { .type = CRYPTO_ALG_TYPE_AHASH,
2981 .halg.digestsize = MD5_DIGEST_SIZE,
2982 .halg.statesize = sizeof(struct talitos_export_state),
2984 .cra_name = "hmac(md5)",
2985 .cra_driver_name = "hmac-md5-talitos",
2986 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2987 .cra_flags = CRYPTO_ALG_ASYNC,
2990 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2991 DESC_HDR_SEL0_MDEUA |
2992 DESC_HDR_MODE0_MDEU_MD5,
2994 { .type = CRYPTO_ALG_TYPE_AHASH,
2996 .halg.digestsize = SHA1_DIGEST_SIZE,
2997 .halg.statesize = sizeof(struct talitos_export_state),
2999 .cra_name = "hmac(sha1)",
3000 .cra_driver_name = "hmac-sha1-talitos",
3001 .cra_blocksize = SHA1_BLOCK_SIZE,
3002 .cra_flags = CRYPTO_ALG_ASYNC,
3005 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3006 DESC_HDR_SEL0_MDEUA |
3007 DESC_HDR_MODE0_MDEU_SHA1,
3009 { .type = CRYPTO_ALG_TYPE_AHASH,
3011 .halg.digestsize = SHA224_DIGEST_SIZE,
3012 .halg.statesize = sizeof(struct talitos_export_state),
3014 .cra_name = "hmac(sha224)",
3015 .cra_driver_name = "hmac-sha224-talitos",
3016 .cra_blocksize = SHA224_BLOCK_SIZE,
3017 .cra_flags = CRYPTO_ALG_ASYNC,
3020 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3021 DESC_HDR_SEL0_MDEUA |
3022 DESC_HDR_MODE0_MDEU_SHA224,
3024 { .type = CRYPTO_ALG_TYPE_AHASH,
3026 .halg.digestsize = SHA256_DIGEST_SIZE,
3027 .halg.statesize = sizeof(struct talitos_export_state),
3029 .cra_name = "hmac(sha256)",
3030 .cra_driver_name = "hmac-sha256-talitos",
3031 .cra_blocksize = SHA256_BLOCK_SIZE,
3032 .cra_flags = CRYPTO_ALG_ASYNC,
3035 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3036 DESC_HDR_SEL0_MDEUA |
3037 DESC_HDR_MODE0_MDEU_SHA256,
3039 { .type = CRYPTO_ALG_TYPE_AHASH,
3041 .halg.digestsize = SHA384_DIGEST_SIZE,
3042 .halg.statesize = sizeof(struct talitos_export_state),
3044 .cra_name = "hmac(sha384)",
3045 .cra_driver_name = "hmac-sha384-talitos",
3046 .cra_blocksize = SHA384_BLOCK_SIZE,
3047 .cra_flags = CRYPTO_ALG_ASYNC,
3050 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3051 DESC_HDR_SEL0_MDEUB |
3052 DESC_HDR_MODE0_MDEUB_SHA384,
3054 { .type = CRYPTO_ALG_TYPE_AHASH,
3056 .halg.digestsize = SHA512_DIGEST_SIZE,
3057 .halg.statesize = sizeof(struct talitos_export_state),
3059 .cra_name = "hmac(sha512)",
3060 .cra_driver_name = "hmac-sha512-talitos",
3061 .cra_blocksize = SHA512_BLOCK_SIZE,
3062 .cra_flags = CRYPTO_ALG_ASYNC,
3065 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3066 DESC_HDR_SEL0_MDEUB |
3067 DESC_HDR_MODE0_MDEUB_SHA512,
3071 struct talitos_crypto_alg {
3072 struct list_head entry;
3074 struct talitos_alg_template algt;
3077 static int talitos_init_common(struct talitos_ctx *ctx,
3078 struct talitos_crypto_alg *talitos_alg)
3080 struct talitos_private *priv;
3082 /* update context with ptr to dev */
3083 ctx->dev = talitos_alg->dev;
3085 /* assign SEC channel to tfm in round-robin fashion */
3086 priv = dev_get_drvdata(ctx->dev);
3087 ctx->ch = atomic_inc_return(&priv->last_chan) &
3088 (priv->num_channels - 1);
3090 /* copy descriptor header template value */
3091 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3093 /* select done notification */
3094 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3099 static int talitos_cra_init(struct crypto_tfm *tfm)
3101 struct crypto_alg *alg = tfm->__crt_alg;
3102 struct talitos_crypto_alg *talitos_alg;
3103 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3105 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3106 talitos_alg = container_of(__crypto_ahash_alg(alg),
3107 struct talitos_crypto_alg,
3110 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3113 return talitos_init_common(ctx, talitos_alg);
3116 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3118 struct aead_alg *alg = crypto_aead_alg(tfm);
3119 struct talitos_crypto_alg *talitos_alg;
3120 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3122 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3125 return talitos_init_common(ctx, talitos_alg);
3128 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3130 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3132 talitos_cra_init(tfm);
3135 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3136 sizeof(struct talitos_ahash_req_ctx));
3141 static void talitos_cra_exit(struct crypto_tfm *tfm)
3143 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3144 struct device *dev = ctx->dev;
3147 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3151 * given the alg's descriptor header template, determine whether descriptor
3152 * type and primary/secondary execution units required match the hw
3153 * capabilities description provided in the device tree node.
3155 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3157 struct talitos_private *priv = dev_get_drvdata(dev);
3160 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3161 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3163 if (SECONDARY_EU(desc_hdr_template))
3164 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3165 & priv->exec_units);
3170 static int talitos_remove(struct platform_device *ofdev)
3172 struct device *dev = &ofdev->dev;
3173 struct talitos_private *priv = dev_get_drvdata(dev);
3174 struct talitos_crypto_alg *t_alg, *n;
3177 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3178 switch (t_alg->algt.type) {
3179 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3181 case CRYPTO_ALG_TYPE_AEAD:
3182 crypto_unregister_aead(&t_alg->algt.alg.aead);
3183 case CRYPTO_ALG_TYPE_AHASH:
3184 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3187 list_del(&t_alg->entry);
3190 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3191 talitos_unregister_rng(dev);
3193 for (i = 0; i < 2; i++)
3195 free_irq(priv->irq[i], dev);
3196 irq_dispose_mapping(priv->irq[i]);
3199 tasklet_kill(&priv->done_task[0]);
3201 tasklet_kill(&priv->done_task[1]);
3206 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3207 struct talitos_alg_template
3210 struct talitos_private *priv = dev_get_drvdata(dev);
3211 struct talitos_crypto_alg *t_alg;
3212 struct crypto_alg *alg;
3214 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3217 return ERR_PTR(-ENOMEM);
3219 t_alg->algt = *template;
3221 switch (t_alg->algt.type) {
3222 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3223 alg = &t_alg->algt.alg.crypto;
3224 alg->cra_init = talitos_cra_init;
3225 alg->cra_exit = talitos_cra_exit;
3226 alg->cra_type = &crypto_ablkcipher_type;
3227 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3229 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3230 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3232 case CRYPTO_ALG_TYPE_AEAD:
3233 alg = &t_alg->algt.alg.aead.base;
3234 alg->cra_exit = talitos_cra_exit;
3235 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3236 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3238 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3239 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3240 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3241 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3242 devm_kfree(dev, t_alg);
3243 return ERR_PTR(-ENOTSUPP);
3246 case CRYPTO_ALG_TYPE_AHASH:
3247 alg = &t_alg->algt.alg.hash.halg.base;
3248 alg->cra_init = talitos_cra_init_ahash;
3249 alg->cra_exit = talitos_cra_exit;
3250 t_alg->algt.alg.hash.init = ahash_init;
3251 t_alg->algt.alg.hash.update = ahash_update;
3252 t_alg->algt.alg.hash.final = ahash_final;
3253 t_alg->algt.alg.hash.finup = ahash_finup;
3254 t_alg->algt.alg.hash.digest = ahash_digest;
3255 if (!strncmp(alg->cra_name, "hmac", 4))
3256 t_alg->algt.alg.hash.setkey = ahash_setkey;
3257 t_alg->algt.alg.hash.import = ahash_import;
3258 t_alg->algt.alg.hash.export = ahash_export;
3260 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3261 !strncmp(alg->cra_name, "hmac", 4)) {
3262 devm_kfree(dev, t_alg);
3263 return ERR_PTR(-ENOTSUPP);
3265 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3266 (!strcmp(alg->cra_name, "sha224") ||
3267 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3268 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3269 t_alg->algt.desc_hdr_template =
3270 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3271 DESC_HDR_SEL0_MDEUA |
3272 DESC_HDR_MODE0_MDEU_SHA256;
3276 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3277 devm_kfree(dev, t_alg);
3278 return ERR_PTR(-EINVAL);
3281 alg->cra_module = THIS_MODULE;
3282 if (t_alg->algt.priority)
3283 alg->cra_priority = t_alg->algt.priority;
3285 alg->cra_priority = TALITOS_CRA_PRIORITY;
3286 alg->cra_alignmask = 0;
3287 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3288 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3295 static int talitos_probe_irq(struct platform_device *ofdev)
3297 struct device *dev = &ofdev->dev;
3298 struct device_node *np = ofdev->dev.of_node;
3299 struct talitos_private *priv = dev_get_drvdata(dev);
3301 bool is_sec1 = has_ftr_sec1(priv);
3303 priv->irq[0] = irq_of_parse_and_map(np, 0);
3304 if (!priv->irq[0]) {
3305 dev_err(dev, "failed to map irq\n");
3309 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3310 dev_driver_string(dev), dev);
3314 priv->irq[1] = irq_of_parse_and_map(np, 1);
3316 /* get the primary irq line */
3317 if (!priv->irq[1]) {
3318 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3319 dev_driver_string(dev), dev);
3323 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3324 dev_driver_string(dev), dev);
3328 /* get the secondary irq line */
3329 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3330 dev_driver_string(dev), dev);
3332 dev_err(dev, "failed to request secondary irq\n");
3333 irq_dispose_mapping(priv->irq[1]);
3341 dev_err(dev, "failed to request primary irq\n");
3342 irq_dispose_mapping(priv->irq[0]);
3349 static int talitos_probe(struct platform_device *ofdev)
3351 struct device *dev = &ofdev->dev;
3352 struct device_node *np = ofdev->dev.of_node;
3353 struct talitos_private *priv;
3356 struct resource *res;
3358 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3362 INIT_LIST_HEAD(&priv->alg_list);
3364 dev_set_drvdata(dev, priv);
3366 priv->ofdev = ofdev;
3368 spin_lock_init(&priv->reg_lock);
3370 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3373 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3375 dev_err(dev, "failed to of_iomap\n");
3380 /* get SEC version capabilities from device tree */
3381 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3382 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3383 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3384 of_property_read_u32(np, "fsl,descriptor-types-mask",
3387 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3388 !priv->exec_units || !priv->desc_types) {
3389 dev_err(dev, "invalid property data in device tree node\n");
3394 if (of_device_is_compatible(np, "fsl,sec3.0"))
3395 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3397 if (of_device_is_compatible(np, "fsl,sec2.1"))
3398 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3399 TALITOS_FTR_SHA224_HWINIT |
3400 TALITOS_FTR_HMAC_OK;
3402 if (of_device_is_compatible(np, "fsl,sec1.0"))
3403 priv->features |= TALITOS_FTR_SEC1;
3405 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3406 priv->reg_deu = priv->reg + TALITOS12_DEU;
3407 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3408 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3409 stride = TALITOS1_CH_STRIDE;
3410 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3411 priv->reg_deu = priv->reg + TALITOS10_DEU;
3412 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3413 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3414 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3415 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3416 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3417 stride = TALITOS1_CH_STRIDE;
3419 priv->reg_deu = priv->reg + TALITOS2_DEU;
3420 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3421 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3422 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3423 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3424 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3425 priv->reg_keu = priv->reg + TALITOS2_KEU;
3426 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3427 stride = TALITOS2_CH_STRIDE;
3430 err = talitos_probe_irq(ofdev);
3434 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3435 if (priv->num_channels == 1)
3436 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3437 (unsigned long)dev);
3439 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3440 (unsigned long)dev);
3443 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3444 (unsigned long)dev);
3445 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3446 (unsigned long)dev);
3447 } else if (priv->num_channels == 1) {
3448 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3449 (unsigned long)dev);
3451 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3452 (unsigned long)dev);
3456 priv->chan = devm_kcalloc(dev,
3458 sizeof(struct talitos_channel),
3461 dev_err(dev, "failed to allocate channel management space\n");
3466 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3468 for (i = 0; i < priv->num_channels; i++) {
3469 priv->chan[i].reg = priv->reg + stride * (i + 1);
3470 if (!priv->irq[1] || !(i & 1))
3471 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3473 spin_lock_init(&priv->chan[i].head_lock);
3474 spin_lock_init(&priv->chan[i].tail_lock);
3476 priv->chan[i].fifo = devm_kcalloc(dev,
3478 sizeof(struct talitos_request),
3480 if (!priv->chan[i].fifo) {
3481 dev_err(dev, "failed to allocate request fifo %d\n", i);
3486 atomic_set(&priv->chan[i].submit_count,
3487 -(priv->chfifo_len - 1));
3490 dma_set_mask(dev, DMA_BIT_MASK(36));
3492 /* reset and initialize the h/w */
3493 err = init_device(dev);
3495 dev_err(dev, "failed to initialize device\n");
3499 /* register the RNG, if available */
3500 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3501 err = talitos_register_rng(dev);
3503 dev_err(dev, "failed to register hwrng: %d\n", err);
3506 dev_info(dev, "hwrng\n");
3509 /* register crypto algorithms the device supports */
3510 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3511 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3512 struct talitos_crypto_alg *t_alg;
3513 struct crypto_alg *alg = NULL;
3515 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3516 if (IS_ERR(t_alg)) {
3517 err = PTR_ERR(t_alg);
3518 if (err == -ENOTSUPP)
3523 switch (t_alg->algt.type) {
3524 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3525 err = crypto_register_alg(
3526 &t_alg->algt.alg.crypto);
3527 alg = &t_alg->algt.alg.crypto;
3530 case CRYPTO_ALG_TYPE_AEAD:
3531 err = crypto_register_aead(
3532 &t_alg->algt.alg.aead);
3533 alg = &t_alg->algt.alg.aead.base;
3536 case CRYPTO_ALG_TYPE_AHASH:
3537 err = crypto_register_ahash(
3538 &t_alg->algt.alg.hash);
3539 alg = &t_alg->algt.alg.hash.halg.base;
3543 dev_err(dev, "%s alg registration failed\n",
3544 alg->cra_driver_name);
3545 devm_kfree(dev, t_alg);
3547 list_add_tail(&t_alg->entry, &priv->alg_list);
3550 if (!list_empty(&priv->alg_list))
3551 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3552 (char *)of_get_property(np, "compatible", NULL));
3557 talitos_remove(ofdev);
3562 static const struct of_device_id talitos_match[] = {
3563 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3565 .compatible = "fsl,sec1.0",
3568 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3570 .compatible = "fsl,sec2.0",
3575 MODULE_DEVICE_TABLE(of, talitos_match);
3577 static struct platform_driver talitos_driver = {
3580 .of_match_table = talitos_match,
3582 .probe = talitos_probe,
3583 .remove = talitos_remove,
3586 module_platform_driver(talitos_driver);
3588 MODULE_LICENSE("GPL");
3590 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");