Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
[pandora-kernel.git] / drivers / crypto / picoxcell_crypto.c
1 /*
2  * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  */
18 #include <crypto/aead.h>
19 #include <crypto/aes.h>
20 #include <crypto/algapi.h>
21 #include <crypto/authenc.h>
22 #include <crypto/des.h>
23 #include <crypto/md5.h>
24 #include <crypto/sha.h>
25 #include <crypto/internal/skcipher.h>
26 #include <linux/clk.h>
27 #include <linux/crypto.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/err.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/io.h>
35 #include <linux/list.h>
36 #include <linux/module.h>
37 #include <linux/platform_device.h>
38 #include <linux/pm.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/scatterlist.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/timer.h>
44
45 #include "picoxcell_crypto_regs.h"
46
47 /*
48  * The threshold for the number of entries in the CMD FIFO available before
49  * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
50  * number of interrupts raised to the CPU.
51  */
52 #define CMD0_IRQ_THRESHOLD   1
53
54 /*
55  * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
56  * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
57  * When there are packets in flight but lower than the threshold, we enable
58  * the timer and at expiry, attempt to remove any processed packets from the
59  * queue and if there are still packets left, schedule the timer again.
60  */
61 #define PACKET_TIMEOUT      1
62
63 /* The priority to register each algorithm with. */
64 #define SPACC_CRYPTO_ALG_PRIORITY       10000
65
66 #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN  16
67 #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
68 #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ   64
69 #define SPACC_CRYPTO_IPSEC_MAX_CTXS     32
70 #define SPACC_CRYPTO_IPSEC_FIFO_SZ      32
71 #define SPACC_CRYPTO_L2_CIPHER_PG_SZ    64
72 #define SPACC_CRYPTO_L2_HASH_PG_SZ      64
73 #define SPACC_CRYPTO_L2_MAX_CTXS        128
74 #define SPACC_CRYPTO_L2_FIFO_SZ         128
75
76 #define MAX_DDT_LEN                     16
77
78 /* DDT format. This must match the hardware DDT format exactly. */
79 struct spacc_ddt {
80         dma_addr_t      p;
81         u32             len;
82 };
83
84 /*
85  * Asynchronous crypto request structure.
86  *
87  * This structure defines a request that is either queued for processing or
88  * being processed.
89  */
90 struct spacc_req {
91         struct list_head                list;
92         struct spacc_engine             *engine;
93         struct crypto_async_request     *req;
94         int                             result;
95         bool                            is_encrypt;
96         unsigned                        ctx_id;
97         dma_addr_t                      src_addr, dst_addr;
98         struct spacc_ddt                *src_ddt, *dst_ddt;
99         void                            (*complete)(struct spacc_req *req);
100
101         /* AEAD specific bits. */
102         u8                              *giv;
103         size_t                          giv_len;
104         dma_addr_t                      giv_pa;
105 };
106
107 struct spacc_engine {
108         void __iomem                    *regs;
109         struct list_head                pending;
110         int                             next_ctx;
111         spinlock_t                      hw_lock;
112         int                             in_flight;
113         struct list_head                completed;
114         struct list_head                in_progress;
115         struct tasklet_struct           complete;
116         unsigned long                   fifo_sz;
117         void __iomem                    *cipher_ctx_base;
118         void __iomem                    *hash_key_base;
119         struct spacc_alg                *algs;
120         unsigned                        num_algs;
121         struct list_head                registered_algs;
122         size_t                          cipher_pg_sz;
123         size_t                          hash_pg_sz;
124         const char                      *name;
125         struct clk                      *clk;
126         struct device                   *dev;
127         unsigned                        max_ctxs;
128         struct timer_list               packet_timeout;
129         unsigned                        stat_irq_thresh;
130         struct dma_pool                 *req_pool;
131 };
132
133 /* Algorithm type mask. */
134 #define SPACC_CRYPTO_ALG_MASK           0x7
135
136 /* SPACC definition of a crypto algorithm. */
137 struct spacc_alg {
138         unsigned long                   ctrl_default;
139         unsigned long                   type;
140         struct crypto_alg               alg;
141         struct spacc_engine             *engine;
142         struct list_head                entry;
143         int                             key_offs;
144         int                             iv_offs;
145 };
146
147 /* Generic context structure for any algorithm type. */
148 struct spacc_generic_ctx {
149         struct spacc_engine             *engine;
150         int                             flags;
151         int                             key_offs;
152         int                             iv_offs;
153 };
154
155 /* Block cipher context. */
156 struct spacc_ablk_ctx {
157         struct spacc_generic_ctx        generic;
158         u8                              key[AES_MAX_KEY_SIZE];
159         u8                              key_len;
160         /*
161          * The fallback cipher. If the operation can't be done in hardware,
162          * fallback to a software version.
163          */
164         struct crypto_ablkcipher        *sw_cipher;
165 };
166
167 /* AEAD cipher context. */
168 struct spacc_aead_ctx {
169         struct spacc_generic_ctx        generic;
170         u8                              cipher_key[AES_MAX_KEY_SIZE];
171         u8                              hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
172         u8                              cipher_key_len;
173         u8                              hash_key_len;
174         struct crypto_aead              *sw_cipher;
175         size_t                          auth_size;
176         u8                              salt[AES_BLOCK_SIZE];
177 };
178
179 static int spacc_ablk_submit(struct spacc_req *req);
180
181 static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
182 {
183         return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
184 }
185
186 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
187 {
188         u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
189
190         return fifo_stat & SPA_FIFO_CMD_FULL;
191 }
192
193 /*
194  * Given a cipher context, and a context number, get the base address of the
195  * context page.
196  *
197  * Returns the address of the context page where the key/context may
198  * be written.
199  */
200 static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
201                                                 unsigned indx,
202                                                 bool is_cipher_ctx)
203 {
204         return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
205                         (indx * ctx->engine->cipher_pg_sz) :
206                 ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
207 }
208
209 /* The context pages can only be written with 32-bit accesses. */
210 static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
211                                  unsigned count)
212 {
213         const u32 *src32 = (const u32 *) src;
214
215         while (count--)
216                 writel(*src32++, dst++);
217 }
218
219 static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
220                                    void __iomem *page_addr, const u8 *key,
221                                    size_t key_len, const u8 *iv, size_t iv_len)
222 {
223         void __iomem *key_ptr = page_addr + ctx->key_offs;
224         void __iomem *iv_ptr = page_addr + ctx->iv_offs;
225
226         memcpy_toio32(key_ptr, key, key_len / 4);
227         memcpy_toio32(iv_ptr, iv, iv_len / 4);
228 }
229
230 /*
231  * Load a context into the engines context memory.
232  *
233  * Returns the index of the context page where the context was loaded.
234  */
235 static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
236                                const u8 *ciph_key, size_t ciph_len,
237                                const u8 *iv, size_t ivlen, const u8 *hash_key,
238                                size_t hash_len)
239 {
240         unsigned indx = ctx->engine->next_ctx++;
241         void __iomem *ciph_page_addr, *hash_page_addr;
242
243         ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
244         hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
245
246         ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
247         spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
248                                ivlen);
249         writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
250                (1 << SPA_KEY_SZ_CIPHER_OFFSET),
251                ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
252
253         if (hash_key) {
254                 memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
255                 writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
256                        ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
257         }
258
259         return indx;
260 }
261
262 /* Count the number of scatterlist entries in a scatterlist. */
263 static int sg_count(struct scatterlist *sg_list, int nbytes)
264 {
265         struct scatterlist *sg = sg_list;
266         int sg_nents = 0;
267
268         while (nbytes > 0) {
269                 ++sg_nents;
270                 nbytes -= sg->length;
271                 sg = sg_next(sg);
272         }
273
274         return sg_nents;
275 }
276
277 static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
278 {
279         ddt->p = phys;
280         ddt->len = len;
281 }
282
283 /*
284  * Take a crypto request and scatterlists for the data and turn them into DDTs
285  * for passing to the crypto engines. This also DMA maps the data so that the
286  * crypto engines can DMA to/from them.
287  */
288 static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
289                                          struct scatterlist *payload,
290                                          unsigned nbytes,
291                                          enum dma_data_direction dir,
292                                          dma_addr_t *ddt_phys)
293 {
294         unsigned nents, mapped_ents;
295         struct scatterlist *cur;
296         struct spacc_ddt *ddt;
297         int i;
298
299         nents = sg_count(payload, nbytes);
300         mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
301
302         if (mapped_ents + 1 > MAX_DDT_LEN)
303                 goto out;
304
305         ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
306         if (!ddt)
307                 goto out;
308
309         for_each_sg(payload, cur, mapped_ents, i)
310                 ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
311         ddt_set(&ddt[mapped_ents], 0, 0);
312
313         return ddt;
314
315 out:
316         dma_unmap_sg(engine->dev, payload, nents, dir);
317         return NULL;
318 }
319
320 static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
321 {
322         struct aead_request *areq = container_of(req->req, struct aead_request,
323                                                  base);
324         struct spacc_engine *engine = req->engine;
325         struct spacc_ddt *src_ddt, *dst_ddt;
326         unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
327         unsigned nents = sg_count(areq->src, areq->cryptlen);
328         dma_addr_t iv_addr;
329         struct scatterlist *cur;
330         int i, dst_ents, src_ents, assoc_ents;
331         u8 *iv = giv ? giv : areq->iv;
332
333         src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
334         if (!src_ddt)
335                 return -ENOMEM;
336
337         dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
338         if (!dst_ddt) {
339                 dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
340                 return -ENOMEM;
341         }
342
343         req->src_ddt = src_ddt;
344         req->dst_ddt = dst_ddt;
345
346         assoc_ents = dma_map_sg(engine->dev, areq->assoc,
347                 sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
348         if (areq->src != areq->dst) {
349                 src_ents = dma_map_sg(engine->dev, areq->src, nents,
350                                       DMA_TO_DEVICE);
351                 dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
352                                       DMA_FROM_DEVICE);
353         } else {
354                 src_ents = dma_map_sg(engine->dev, areq->src, nents,
355                                       DMA_BIDIRECTIONAL);
356                 dst_ents = 0;
357         }
358
359         /*
360          * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
361          * formed by the crypto block and sent as the ESP IV for IPSEC.
362          */
363         iv_addr = dma_map_single(engine->dev, iv, ivsize,
364                                  giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
365         req->giv_pa = iv_addr;
366
367         /*
368          * Map the associated data. For decryption we don't copy the
369          * associated data.
370          */
371         for_each_sg(areq->assoc, cur, assoc_ents, i) {
372                 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
373                 if (req->is_encrypt)
374                         ddt_set(dst_ddt++, sg_dma_address(cur),
375                                 sg_dma_len(cur));
376         }
377         ddt_set(src_ddt++, iv_addr, ivsize);
378
379         if (giv || req->is_encrypt)
380                 ddt_set(dst_ddt++, iv_addr, ivsize);
381
382         /*
383          * Now map in the payload for the source and destination and terminate
384          * with the NULL pointers.
385          */
386         for_each_sg(areq->src, cur, src_ents, i) {
387                 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
388                 if (areq->src == areq->dst)
389                         ddt_set(dst_ddt++, sg_dma_address(cur),
390                                 sg_dma_len(cur));
391         }
392
393         for_each_sg(areq->dst, cur, dst_ents, i)
394                 ddt_set(dst_ddt++, sg_dma_address(cur),
395                         sg_dma_len(cur));
396
397         ddt_set(src_ddt, 0, 0);
398         ddt_set(dst_ddt, 0, 0);
399
400         return 0;
401 }
402
403 static void spacc_aead_free_ddts(struct spacc_req *req)
404 {
405         struct aead_request *areq = container_of(req->req, struct aead_request,
406                                                  base);
407         struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
408         struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
409         struct spacc_engine *engine = aead_ctx->generic.engine;
410         unsigned ivsize = alg->alg.cra_aead.ivsize;
411         unsigned nents = sg_count(areq->src, areq->cryptlen);
412
413         if (areq->src != areq->dst) {
414                 dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
415                 dma_unmap_sg(engine->dev, areq->dst,
416                              sg_count(areq->dst, areq->cryptlen),
417                              DMA_FROM_DEVICE);
418         } else
419                 dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
420
421         dma_unmap_sg(engine->dev, areq->assoc,
422                      sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
423
424         dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
425
426         dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
427         dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
428 }
429
430 static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
431                            dma_addr_t ddt_addr, struct scatterlist *payload,
432                            unsigned nbytes, enum dma_data_direction dir)
433 {
434         unsigned nents = sg_count(payload, nbytes);
435
436         dma_unmap_sg(req->engine->dev, payload, nents, dir);
437         dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
438 }
439
440 /*
441  * Set key for a DES operation in an AEAD cipher. This also performs weak key
442  * checking if required.
443  */
444 static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
445                                  unsigned int len)
446 {
447         struct crypto_tfm *tfm = crypto_aead_tfm(aead);
448         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
449         u32 tmp[DES_EXPKEY_WORDS];
450
451         if (unlikely(!des_ekey(tmp, key)) &&
452             (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
453                 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
454                 return -EINVAL;
455         }
456
457         memcpy(ctx->cipher_key, key, len);
458         ctx->cipher_key_len = len;
459
460         return 0;
461 }
462
463 /* Set the key for the AES block cipher component of the AEAD transform. */
464 static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
465                                  unsigned int len)
466 {
467         struct crypto_tfm *tfm = crypto_aead_tfm(aead);
468         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
469
470         /*
471          * IPSec engine only supports 128 and 256 bit AES keys. If we get a
472          * request for any other size (192 bits) then we need to do a software
473          * fallback.
474          */
475         if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
476                 /*
477                  * Set the fallback transform to use the same request flags as
478                  * the hardware transform.
479                  */
480                 ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
481                 ctx->sw_cipher->base.crt_flags |=
482                         tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
483                 return crypto_aead_setkey(ctx->sw_cipher, key, len);
484         }
485
486         memcpy(ctx->cipher_key, key, len);
487         ctx->cipher_key_len = len;
488
489         return 0;
490 }
491
492 static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
493                              unsigned int keylen)
494 {
495         struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
496         struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
497         struct rtattr *rta = (void *)key;
498         struct crypto_authenc_key_param *param;
499         unsigned int authkeylen, enckeylen;
500         int err = -EINVAL;
501
502         if (!RTA_OK(rta, keylen))
503                 goto badkey;
504
505         if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
506                 goto badkey;
507
508         if (RTA_PAYLOAD(rta) < sizeof(*param))
509                 goto badkey;
510
511         param = RTA_DATA(rta);
512         enckeylen = be32_to_cpu(param->enckeylen);
513
514         key += RTA_ALIGN(rta->rta_len);
515         keylen -= RTA_ALIGN(rta->rta_len);
516
517         if (keylen < enckeylen)
518                 goto badkey;
519
520         authkeylen = keylen - enckeylen;
521
522         if (enckeylen > AES_MAX_KEY_SIZE)
523                 goto badkey;
524
525         if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
526             SPA_CTRL_CIPH_ALG_AES)
527                 err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
528         else
529                 err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
530
531         if (err)
532                 goto badkey;
533
534         memcpy(ctx->hash_ctx, key, authkeylen);
535         ctx->hash_key_len = authkeylen;
536
537         return 0;
538
539 badkey:
540         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
541         return -EINVAL;
542 }
543
544 static int spacc_aead_setauthsize(struct crypto_aead *tfm,
545                                   unsigned int authsize)
546 {
547         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
548
549         ctx->auth_size = authsize;
550
551         return 0;
552 }
553
554 /*
555  * Check if an AEAD request requires a fallback operation. Some requests can't
556  * be completed in hardware because the hardware may not support certain key
557  * sizes. In these cases we need to complete the request in software.
558  */
559 static int spacc_aead_need_fallback(struct spacc_req *req)
560 {
561         struct aead_request *aead_req;
562         struct crypto_tfm *tfm = req->req->tfm;
563         struct crypto_alg *alg = req->req->tfm->__crt_alg;
564         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
565         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
566
567         aead_req = container_of(req->req, struct aead_request, base);
568         /*
569          * If we have a non-supported key-length, then we need to do a
570          * software fallback.
571          */
572         if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
573             SPA_CTRL_CIPH_ALG_AES &&
574             ctx->cipher_key_len != AES_KEYSIZE_128 &&
575             ctx->cipher_key_len != AES_KEYSIZE_256)
576                 return 1;
577
578         return 0;
579 }
580
581 static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
582                                   bool is_encrypt)
583 {
584         struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
585         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
586         int err;
587
588         if (ctx->sw_cipher) {
589                 /*
590                  * Change the request to use the software fallback transform,
591                  * and once the ciphering has completed, put the old transform
592                  * back into the request.
593                  */
594                 aead_request_set_tfm(req, ctx->sw_cipher);
595                 err = is_encrypt ? crypto_aead_encrypt(req) :
596                     crypto_aead_decrypt(req);
597                 aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
598         } else
599                 err = -EINVAL;
600
601         return err;
602 }
603
604 static void spacc_aead_complete(struct spacc_req *req)
605 {
606         spacc_aead_free_ddts(req);
607         req->req->complete(req->req, req->result);
608 }
609
610 static int spacc_aead_submit(struct spacc_req *req)
611 {
612         struct crypto_tfm *tfm = req->req->tfm;
613         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
614         struct crypto_alg *alg = req->req->tfm->__crt_alg;
615         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
616         struct spacc_engine *engine = ctx->generic.engine;
617         u32 ctrl, proc_len, assoc_len;
618         struct aead_request *aead_req =
619                 container_of(req->req, struct aead_request, base);
620
621         req->result = -EINPROGRESS;
622         req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
623                 ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
624                 ctx->hash_ctx, ctx->hash_key_len);
625
626         /* Set the source and destination DDT pointers. */
627         writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
628         writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
629         writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
630
631         assoc_len = aead_req->assoclen;
632         proc_len = aead_req->cryptlen + assoc_len;
633
634         /*
635          * If we aren't generating an IV, then we need to include the IV in the
636          * associated data so that it is included in the hash.
637          */
638         if (!req->giv) {
639                 assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
640                 proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
641         } else
642                 proc_len += req->giv_len;
643
644         /*
645          * If we are decrypting, we need to take the length of the ICV out of
646          * the processing length.
647          */
648         if (!req->is_encrypt)
649                 proc_len -= ctx->auth_size;
650
651         writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
652         writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
653         writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
654         writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
655         writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
656
657         ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
658                 (1 << SPA_CTRL_ICV_APPEND);
659         if (req->is_encrypt)
660                 ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
661         else
662                 ctrl |= (1 << SPA_CTRL_KEY_EXP);
663
664         mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
665
666         writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
667
668         return -EINPROGRESS;
669 }
670
671 static int spacc_req_submit(struct spacc_req *req);
672
673 static void spacc_push(struct spacc_engine *engine)
674 {
675         struct spacc_req *req;
676
677         while (!list_empty(&engine->pending) &&
678                engine->in_flight + 1 <= engine->fifo_sz) {
679
680                 ++engine->in_flight;
681                 req = list_first_entry(&engine->pending, struct spacc_req,
682                                        list);
683                 list_move_tail(&req->list, &engine->in_progress);
684
685                 req->result = spacc_req_submit(req);
686         }
687 }
688
689 /*
690  * Setup an AEAD request for processing. This will configure the engine, load
691  * the context and then start the packet processing.
692  *
693  * @giv Pointer to destination address for a generated IV. If the
694  *      request does not need to generate an IV then this should be set to NULL.
695  */
696 static int spacc_aead_setup(struct aead_request *req, u8 *giv,
697                             unsigned alg_type, bool is_encrypt)
698 {
699         struct crypto_alg *alg = req->base.tfm->__crt_alg;
700         struct spacc_engine *engine = to_spacc_alg(alg)->engine;
701         struct spacc_req *dev_req = aead_request_ctx(req);
702         int err = -EINPROGRESS;
703         unsigned long flags;
704         unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
705
706         dev_req->giv            = giv;
707         dev_req->giv_len        = ivsize;
708         dev_req->req            = &req->base;
709         dev_req->is_encrypt     = is_encrypt;
710         dev_req->result         = -EBUSY;
711         dev_req->engine         = engine;
712         dev_req->complete       = spacc_aead_complete;
713
714         if (unlikely(spacc_aead_need_fallback(dev_req)))
715                 return spacc_aead_do_fallback(req, alg_type, is_encrypt);
716
717         spacc_aead_make_ddts(dev_req, dev_req->giv);
718
719         err = -EINPROGRESS;
720         spin_lock_irqsave(&engine->hw_lock, flags);
721         if (unlikely(spacc_fifo_cmd_full(engine)) ||
722             engine->in_flight + 1 > engine->fifo_sz) {
723                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
724                         err = -EBUSY;
725                         spin_unlock_irqrestore(&engine->hw_lock, flags);
726                         goto out_free_ddts;
727                 }
728                 list_add_tail(&dev_req->list, &engine->pending);
729         } else {
730                 list_add_tail(&dev_req->list, &engine->pending);
731                 spacc_push(engine);
732         }
733         spin_unlock_irqrestore(&engine->hw_lock, flags);
734
735         goto out;
736
737 out_free_ddts:
738         spacc_aead_free_ddts(dev_req);
739 out:
740         return err;
741 }
742
743 static int spacc_aead_encrypt(struct aead_request *req)
744 {
745         struct crypto_aead *aead = crypto_aead_reqtfm(req);
746         struct crypto_tfm *tfm = crypto_aead_tfm(aead);
747         struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
748
749         return spacc_aead_setup(req, NULL, alg->type, 1);
750 }
751
752 static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
753 {
754         struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
755         struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
756         size_t ivsize = crypto_aead_ivsize(tfm);
757         struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
758         unsigned len;
759         __be64 seq;
760
761         memcpy(req->areq.iv, ctx->salt, ivsize);
762         len = ivsize;
763         if (ivsize > sizeof(u64)) {
764                 memset(req->giv, 0, ivsize - sizeof(u64));
765                 len = sizeof(u64);
766         }
767         seq = cpu_to_be64(req->seq);
768         memcpy(req->giv + ivsize - len, &seq, len);
769
770         return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
771 }
772
773 static int spacc_aead_decrypt(struct aead_request *req)
774 {
775         struct crypto_aead *aead = crypto_aead_reqtfm(req);
776         struct crypto_tfm *tfm = crypto_aead_tfm(aead);
777         struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
778
779         return spacc_aead_setup(req, NULL, alg->type, 0);
780 }
781
782 /*
783  * Initialise a new AEAD context. This is responsible for allocating the
784  * fallback cipher and initialising the context.
785  */
786 static int spacc_aead_cra_init(struct crypto_tfm *tfm)
787 {
788         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
789         struct crypto_alg *alg = tfm->__crt_alg;
790         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
791         struct spacc_engine *engine = spacc_alg->engine;
792
793         ctx->generic.flags = spacc_alg->type;
794         ctx->generic.engine = engine;
795         ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
796                                            CRYPTO_ALG_ASYNC |
797                                            CRYPTO_ALG_NEED_FALLBACK);
798         if (IS_ERR(ctx->sw_cipher)) {
799                 dev_warn(engine->dev, "failed to allocate fallback for %s\n",
800                          alg->cra_name);
801                 ctx->sw_cipher = NULL;
802         }
803         ctx->generic.key_offs = spacc_alg->key_offs;
804         ctx->generic.iv_offs = spacc_alg->iv_offs;
805
806         get_random_bytes(ctx->salt, sizeof(ctx->salt));
807
808         tfm->crt_aead.reqsize = sizeof(struct spacc_req);
809
810         return 0;
811 }
812
813 /*
814  * Destructor for an AEAD context. This is called when the transform is freed
815  * and must free the fallback cipher.
816  */
817 static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
818 {
819         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
820
821         if (ctx->sw_cipher)
822                 crypto_free_aead(ctx->sw_cipher);
823         ctx->sw_cipher = NULL;
824 }
825
826 /*
827  * Set the DES key for a block cipher transform. This also performs weak key
828  * checking if the transform has requested it.
829  */
830 static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
831                             unsigned int len)
832 {
833         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
834         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
835         u32 tmp[DES_EXPKEY_WORDS];
836
837         if (len > DES3_EDE_KEY_SIZE) {
838                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
839                 return -EINVAL;
840         }
841
842         if (unlikely(!des_ekey(tmp, key)) &&
843             (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
844                 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
845                 return -EINVAL;
846         }
847
848         memcpy(ctx->key, key, len);
849         ctx->key_len = len;
850
851         return 0;
852 }
853
854 /*
855  * Set the key for an AES block cipher. Some key lengths are not supported in
856  * hardware so this must also check whether a fallback is needed.
857  */
858 static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
859                             unsigned int len)
860 {
861         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
862         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
863         int err = 0;
864
865         if (len > AES_MAX_KEY_SIZE) {
866                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
867                 return -EINVAL;
868         }
869
870         /*
871          * IPSec engine only supports 128 and 256 bit AES keys. If we get a
872          * request for any other size (192 bits) then we need to do a software
873          * fallback.
874          */
875         if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
876             ctx->sw_cipher) {
877                 /*
878                  * Set the fallback transform to use the same request flags as
879                  * the hardware transform.
880                  */
881                 ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
882                 ctx->sw_cipher->base.crt_flags |=
883                         cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
884
885                 err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
886                 if (err)
887                         goto sw_setkey_failed;
888         } else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
889                    !ctx->sw_cipher)
890                 err = -EINVAL;
891
892         memcpy(ctx->key, key, len);
893         ctx->key_len = len;
894
895 sw_setkey_failed:
896         if (err && ctx->sw_cipher) {
897                 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
898                 tfm->crt_flags |=
899                         ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
900         }
901
902         return err;
903 }
904
905 static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
906                                   const u8 *key, unsigned int len)
907 {
908         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
909         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
910         int err = 0;
911
912         if (len > AES_MAX_KEY_SIZE) {
913                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
914                 err = -EINVAL;
915                 goto out;
916         }
917
918         memcpy(ctx->key, key, len);
919         ctx->key_len = len;
920
921 out:
922         return err;
923 }
924
925 static int spacc_ablk_need_fallback(struct spacc_req *req)
926 {
927         struct spacc_ablk_ctx *ctx;
928         struct crypto_tfm *tfm = req->req->tfm;
929         struct crypto_alg *alg = req->req->tfm->__crt_alg;
930         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
931
932         ctx = crypto_tfm_ctx(tfm);
933
934         return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
935                         SPA_CTRL_CIPH_ALG_AES &&
936                         ctx->key_len != AES_KEYSIZE_128 &&
937                         ctx->key_len != AES_KEYSIZE_256;
938 }
939
940 static void spacc_ablk_complete(struct spacc_req *req)
941 {
942         struct ablkcipher_request *ablk_req =
943                 container_of(req->req, struct ablkcipher_request, base);
944
945         if (ablk_req->src != ablk_req->dst) {
946                 spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
947                                ablk_req->nbytes, DMA_TO_DEVICE);
948                 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
949                                ablk_req->nbytes, DMA_FROM_DEVICE);
950         } else
951                 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
952                                ablk_req->nbytes, DMA_BIDIRECTIONAL);
953
954         req->req->complete(req->req, req->result);
955 }
956
957 static int spacc_ablk_submit(struct spacc_req *req)
958 {
959         struct crypto_tfm *tfm = req->req->tfm;
960         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
961         struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
962         struct crypto_alg *alg = req->req->tfm->__crt_alg;
963         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
964         struct spacc_engine *engine = ctx->generic.engine;
965         u32 ctrl;
966
967         req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
968                 ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
969                 NULL, 0);
970
971         writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
972         writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
973         writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
974
975         writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
976         writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
977         writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
978         writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
979
980         ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
981                 (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
982                  (1 << SPA_CTRL_KEY_EXP));
983
984         mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
985
986         writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
987
988         return -EINPROGRESS;
989 }
990
991 static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
992                                   unsigned alg_type, bool is_encrypt)
993 {
994         struct crypto_tfm *old_tfm =
995             crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
996         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
997         int err;
998
999         if (!ctx->sw_cipher)
1000                 return -EINVAL;
1001
1002         /*
1003          * Change the request to use the software fallback transform, and once
1004          * the ciphering has completed, put the old transform back into the
1005          * request.
1006          */
1007         ablkcipher_request_set_tfm(req, ctx->sw_cipher);
1008         err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
1009                 crypto_ablkcipher_decrypt(req);
1010         ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
1011
1012         return err;
1013 }
1014
1015 static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
1016                             bool is_encrypt)
1017 {
1018         struct crypto_alg *alg = req->base.tfm->__crt_alg;
1019         struct spacc_engine *engine = to_spacc_alg(alg)->engine;
1020         struct spacc_req *dev_req = ablkcipher_request_ctx(req);
1021         unsigned long flags;
1022         int err = -ENOMEM;
1023
1024         dev_req->req            = &req->base;
1025         dev_req->is_encrypt     = is_encrypt;
1026         dev_req->engine         = engine;
1027         dev_req->complete       = spacc_ablk_complete;
1028         dev_req->result         = -EINPROGRESS;
1029
1030         if (unlikely(spacc_ablk_need_fallback(dev_req)))
1031                 return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
1032
1033         /*
1034          * Create the DDT's for the engine. If we share the same source and
1035          * destination then we can optimize by reusing the DDT's.
1036          */
1037         if (req->src != req->dst) {
1038                 dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
1039                         req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
1040                 if (!dev_req->src_ddt)
1041                         goto out;
1042
1043                 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
1044                         req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
1045                 if (!dev_req->dst_ddt)
1046                         goto out_free_src;
1047         } else {
1048                 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
1049                         req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
1050                 if (!dev_req->dst_ddt)
1051                         goto out;
1052
1053                 dev_req->src_ddt = NULL;
1054                 dev_req->src_addr = dev_req->dst_addr;
1055         }
1056
1057         err = -EINPROGRESS;
1058         spin_lock_irqsave(&engine->hw_lock, flags);
1059         /*
1060          * Check if the engine will accept the operation now. If it won't then
1061          * we either stick it on the end of a pending list if we can backlog,
1062          * or bailout with an error if not.
1063          */
1064         if (unlikely(spacc_fifo_cmd_full(engine)) ||
1065             engine->in_flight + 1 > engine->fifo_sz) {
1066                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1067                         err = -EBUSY;
1068                         spin_unlock_irqrestore(&engine->hw_lock, flags);
1069                         goto out_free_ddts;
1070                 }
1071                 list_add_tail(&dev_req->list, &engine->pending);
1072         } else {
1073                 list_add_tail(&dev_req->list, &engine->pending);
1074                 spacc_push(engine);
1075         }
1076         spin_unlock_irqrestore(&engine->hw_lock, flags);
1077
1078         goto out;
1079
1080 out_free_ddts:
1081         spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
1082                        req->nbytes, req->src == req->dst ?
1083                        DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
1084 out_free_src:
1085         if (req->src != req->dst)
1086                 spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
1087                                req->src, req->nbytes, DMA_TO_DEVICE);
1088 out:
1089         return err;
1090 }
1091
1092 static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
1093 {
1094         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1095         struct crypto_alg *alg = tfm->__crt_alg;
1096         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
1097         struct spacc_engine *engine = spacc_alg->engine;
1098
1099         ctx->generic.flags = spacc_alg->type;
1100         ctx->generic.engine = engine;
1101         if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
1102                 ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
1103                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1104                 if (IS_ERR(ctx->sw_cipher)) {
1105                         dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1106                                  alg->cra_name);
1107                         ctx->sw_cipher = NULL;
1108                 }
1109         }
1110         ctx->generic.key_offs = spacc_alg->key_offs;
1111         ctx->generic.iv_offs = spacc_alg->iv_offs;
1112
1113         tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
1114
1115         return 0;
1116 }
1117
1118 static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
1119 {
1120         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1121
1122         if (ctx->sw_cipher)
1123                 crypto_free_ablkcipher(ctx->sw_cipher);
1124         ctx->sw_cipher = NULL;
1125 }
1126
1127 static int spacc_ablk_encrypt(struct ablkcipher_request *req)
1128 {
1129         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1130         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1131         struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1132
1133         return spacc_ablk_setup(req, alg->type, 1);
1134 }
1135
1136 static int spacc_ablk_decrypt(struct ablkcipher_request *req)
1137 {
1138         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1139         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1140         struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1141
1142         return spacc_ablk_setup(req, alg->type, 0);
1143 }
1144
1145 static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
1146 {
1147         return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
1148                 SPA_FIFO_STAT_EMPTY;
1149 }
1150
1151 static void spacc_process_done(struct spacc_engine *engine)
1152 {
1153         struct spacc_req *req;
1154         unsigned long flags;
1155
1156         spin_lock_irqsave(&engine->hw_lock, flags);
1157
1158         while (!spacc_fifo_stat_empty(engine)) {
1159                 req = list_first_entry(&engine->in_progress, struct spacc_req,
1160                                        list);
1161                 list_move_tail(&req->list, &engine->completed);
1162                 --engine->in_flight;
1163
1164                 /* POP the status register. */
1165                 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
1166                 req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
1167                      SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
1168
1169                 /*
1170                  * Convert the SPAcc error status into the standard POSIX error
1171                  * codes.
1172                  */
1173                 if (unlikely(req->result)) {
1174                         switch (req->result) {
1175                         case SPA_STATUS_ICV_FAIL:
1176                                 req->result = -EBADMSG;
1177                                 break;
1178
1179                         case SPA_STATUS_MEMORY_ERROR:
1180                                 dev_warn(engine->dev,
1181                                          "memory error triggered\n");
1182                                 req->result = -EFAULT;
1183                                 break;
1184
1185                         case SPA_STATUS_BLOCK_ERROR:
1186                                 dev_warn(engine->dev,
1187                                          "block error triggered\n");
1188                                 req->result = -EIO;
1189                                 break;
1190                         }
1191                 }
1192         }
1193
1194         tasklet_schedule(&engine->complete);
1195
1196         spin_unlock_irqrestore(&engine->hw_lock, flags);
1197 }
1198
1199 static irqreturn_t spacc_spacc_irq(int irq, void *dev)
1200 {
1201         struct spacc_engine *engine = (struct spacc_engine *)dev;
1202         u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1203
1204         writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1205         spacc_process_done(engine);
1206
1207         return IRQ_HANDLED;
1208 }
1209
1210 static void spacc_packet_timeout(unsigned long data)
1211 {
1212         struct spacc_engine *engine = (struct spacc_engine *)data;
1213
1214         spacc_process_done(engine);
1215 }
1216
1217 static int spacc_req_submit(struct spacc_req *req)
1218 {
1219         struct crypto_alg *alg = req->req->tfm->__crt_alg;
1220
1221         if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
1222                 return spacc_aead_submit(req);
1223         else
1224                 return spacc_ablk_submit(req);
1225 }
1226
1227 static void spacc_spacc_complete(unsigned long data)
1228 {
1229         struct spacc_engine *engine = (struct spacc_engine *)data;
1230         struct spacc_req *req, *tmp;
1231         unsigned long flags;
1232         LIST_HEAD(completed);
1233
1234         spin_lock_irqsave(&engine->hw_lock, flags);
1235
1236         list_splice_init(&engine->completed, &completed);
1237         spacc_push(engine);
1238         if (engine->in_flight)
1239                 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1240
1241         spin_unlock_irqrestore(&engine->hw_lock, flags);
1242
1243         list_for_each_entry_safe(req, tmp, &completed, list) {
1244                 req->complete(req);
1245                 list_del(&req->list);
1246         }
1247 }
1248
1249 #ifdef CONFIG_PM
1250 static int spacc_suspend(struct device *dev)
1251 {
1252         struct platform_device *pdev = to_platform_device(dev);
1253         struct spacc_engine *engine = platform_get_drvdata(pdev);
1254
1255         /*
1256          * We only support standby mode. All we have to do is gate the clock to
1257          * the spacc. The hardware will preserve state until we turn it back
1258          * on again.
1259          */
1260         clk_disable(engine->clk);
1261
1262         return 0;
1263 }
1264
1265 static int spacc_resume(struct device *dev)
1266 {
1267         struct platform_device *pdev = to_platform_device(dev);
1268         struct spacc_engine *engine = platform_get_drvdata(pdev);
1269
1270         return clk_enable(engine->clk);
1271 }
1272
1273 static const struct dev_pm_ops spacc_pm_ops = {
1274         .suspend        = spacc_suspend,
1275         .resume         = spacc_resume,
1276 };
1277 #endif /* CONFIG_PM */
1278
1279 static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
1280 {
1281         return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
1282 }
1283
1284 static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
1285                                           struct device_attribute *attr,
1286                                           char *buf)
1287 {
1288         struct spacc_engine *engine = spacc_dev_to_engine(dev);
1289
1290         return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
1291 }
1292
1293 static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
1294                                            struct device_attribute *attr,
1295                                            const char *buf, size_t len)
1296 {
1297         struct spacc_engine *engine = spacc_dev_to_engine(dev);
1298         unsigned long thresh;
1299
1300         if (strict_strtoul(buf, 0, &thresh))
1301                 return -EINVAL;
1302
1303         thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
1304
1305         engine->stat_irq_thresh = thresh;
1306         writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1307                engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1308
1309         return len;
1310 }
1311 static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
1312                    spacc_stat_irq_thresh_store);
1313
1314 static struct spacc_alg ipsec_engine_algs[] = {
1315         {
1316                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
1317                 .key_offs = 0,
1318                 .iv_offs = AES_MAX_KEY_SIZE,
1319                 .alg = {
1320                         .cra_name = "cbc(aes)",
1321                         .cra_driver_name = "cbc-aes-picoxcell",
1322                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1323                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1324                                      CRYPTO_ALG_ASYNC |
1325                                      CRYPTO_ALG_NEED_FALLBACK,
1326                         .cra_blocksize = AES_BLOCK_SIZE,
1327                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1328                         .cra_type = &crypto_ablkcipher_type,
1329                         .cra_module = THIS_MODULE,
1330                         .cra_ablkcipher = {
1331                                 .setkey = spacc_aes_setkey,
1332                                 .encrypt = spacc_ablk_encrypt,
1333                                 .decrypt = spacc_ablk_decrypt,
1334                                 .min_keysize = AES_MIN_KEY_SIZE,
1335                                 .max_keysize = AES_MAX_KEY_SIZE,
1336                                 .ivsize = AES_BLOCK_SIZE,
1337                         },
1338                         .cra_init = spacc_ablk_cra_init,
1339                         .cra_exit = spacc_ablk_cra_exit,
1340                 },
1341         },
1342         {
1343                 .key_offs = 0,
1344                 .iv_offs = AES_MAX_KEY_SIZE,
1345                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
1346                 .alg = {
1347                         .cra_name = "ecb(aes)",
1348                         .cra_driver_name = "ecb-aes-picoxcell",
1349                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1350                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1351                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1352                         .cra_blocksize = AES_BLOCK_SIZE,
1353                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1354                         .cra_type = &crypto_ablkcipher_type,
1355                         .cra_module = THIS_MODULE,
1356                         .cra_ablkcipher = {
1357                                 .setkey = spacc_aes_setkey,
1358                                 .encrypt = spacc_ablk_encrypt,
1359                                 .decrypt = spacc_ablk_decrypt,
1360                                 .min_keysize = AES_MIN_KEY_SIZE,
1361                                 .max_keysize = AES_MAX_KEY_SIZE,
1362                         },
1363                         .cra_init = spacc_ablk_cra_init,
1364                         .cra_exit = spacc_ablk_cra_exit,
1365                 },
1366         },
1367         {
1368                 .key_offs = DES_BLOCK_SIZE,
1369                 .iv_offs = 0,
1370                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1371                 .alg = {
1372                         .cra_name = "cbc(des)",
1373                         .cra_driver_name = "cbc-des-picoxcell",
1374                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1375                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1376                         .cra_blocksize = DES_BLOCK_SIZE,
1377                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1378                         .cra_type = &crypto_ablkcipher_type,
1379                         .cra_module = THIS_MODULE,
1380                         .cra_ablkcipher = {
1381                                 .setkey = spacc_des_setkey,
1382                                 .encrypt = spacc_ablk_encrypt,
1383                                 .decrypt = spacc_ablk_decrypt,
1384                                 .min_keysize = DES_KEY_SIZE,
1385                                 .max_keysize = DES_KEY_SIZE,
1386                                 .ivsize = DES_BLOCK_SIZE,
1387                         },
1388                         .cra_init = spacc_ablk_cra_init,
1389                         .cra_exit = spacc_ablk_cra_exit,
1390                 },
1391         },
1392         {
1393                 .key_offs = DES_BLOCK_SIZE,
1394                 .iv_offs = 0,
1395                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1396                 .alg = {
1397                         .cra_name = "ecb(des)",
1398                         .cra_driver_name = "ecb-des-picoxcell",
1399                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1400                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1401                         .cra_blocksize = DES_BLOCK_SIZE,
1402                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1403                         .cra_type = &crypto_ablkcipher_type,
1404                         .cra_module = THIS_MODULE,
1405                         .cra_ablkcipher = {
1406                                 .setkey = spacc_des_setkey,
1407                                 .encrypt = spacc_ablk_encrypt,
1408                                 .decrypt = spacc_ablk_decrypt,
1409                                 .min_keysize = DES_KEY_SIZE,
1410                                 .max_keysize = DES_KEY_SIZE,
1411                         },
1412                         .cra_init = spacc_ablk_cra_init,
1413                         .cra_exit = spacc_ablk_cra_exit,
1414                 },
1415         },
1416         {
1417                 .key_offs = DES_BLOCK_SIZE,
1418                 .iv_offs = 0,
1419                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1420                 .alg = {
1421                         .cra_name = "cbc(des3_ede)",
1422                         .cra_driver_name = "cbc-des3-ede-picoxcell",
1423                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1424                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1425                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1426                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1427                         .cra_type = &crypto_ablkcipher_type,
1428                         .cra_module = THIS_MODULE,
1429                         .cra_ablkcipher = {
1430                                 .setkey = spacc_des_setkey,
1431                                 .encrypt = spacc_ablk_encrypt,
1432                                 .decrypt = spacc_ablk_decrypt,
1433                                 .min_keysize = DES3_EDE_KEY_SIZE,
1434                                 .max_keysize = DES3_EDE_KEY_SIZE,
1435                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1436                         },
1437                         .cra_init = spacc_ablk_cra_init,
1438                         .cra_exit = spacc_ablk_cra_exit,
1439                 },
1440         },
1441         {
1442                 .key_offs = DES_BLOCK_SIZE,
1443                 .iv_offs = 0,
1444                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1445                 .alg = {
1446                         .cra_name = "ecb(des3_ede)",
1447                         .cra_driver_name = "ecb-des3-ede-picoxcell",
1448                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1449                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1450                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1451                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1452                         .cra_type = &crypto_ablkcipher_type,
1453                         .cra_module = THIS_MODULE,
1454                         .cra_ablkcipher = {
1455                                 .setkey = spacc_des_setkey,
1456                                 .encrypt = spacc_ablk_encrypt,
1457                                 .decrypt = spacc_ablk_decrypt,
1458                                 .min_keysize = DES3_EDE_KEY_SIZE,
1459                                 .max_keysize = DES3_EDE_KEY_SIZE,
1460                         },
1461                         .cra_init = spacc_ablk_cra_init,
1462                         .cra_exit = spacc_ablk_cra_exit,
1463                 },
1464         },
1465         {
1466                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1467                                 SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
1468                 .key_offs = 0,
1469                 .iv_offs = AES_MAX_KEY_SIZE,
1470                 .alg = {
1471                         .cra_name = "authenc(hmac(sha1),cbc(aes))",
1472                         .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
1473                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1474                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1475                         .cra_blocksize = AES_BLOCK_SIZE,
1476                         .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1477                         .cra_type = &crypto_aead_type,
1478                         .cra_module = THIS_MODULE,
1479                         .cra_aead = {
1480                                 .setkey = spacc_aead_setkey,
1481                                 .setauthsize = spacc_aead_setauthsize,
1482                                 .encrypt = spacc_aead_encrypt,
1483                                 .decrypt = spacc_aead_decrypt,
1484                                 .givencrypt = spacc_aead_givencrypt,
1485                                 .ivsize = AES_BLOCK_SIZE,
1486                                 .maxauthsize = SHA1_DIGEST_SIZE,
1487                         },
1488                         .cra_init = spacc_aead_cra_init,
1489                         .cra_exit = spacc_aead_cra_exit,
1490                 },
1491         },
1492         {
1493                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1494                                 SPA_CTRL_HASH_ALG_SHA256 |
1495                                 SPA_CTRL_HASH_MODE_HMAC,
1496                 .key_offs = 0,
1497                 .iv_offs = AES_MAX_KEY_SIZE,
1498                 .alg = {
1499                         .cra_name = "authenc(hmac(sha256),cbc(aes))",
1500                         .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
1501                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1502                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1503                         .cra_blocksize = AES_BLOCK_SIZE,
1504                         .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1505                         .cra_type = &crypto_aead_type,
1506                         .cra_module = THIS_MODULE,
1507                         .cra_aead = {
1508                                 .setkey = spacc_aead_setkey,
1509                                 .setauthsize = spacc_aead_setauthsize,
1510                                 .encrypt = spacc_aead_encrypt,
1511                                 .decrypt = spacc_aead_decrypt,
1512                                 .givencrypt = spacc_aead_givencrypt,
1513                                 .ivsize = AES_BLOCK_SIZE,
1514                                 .maxauthsize = SHA256_DIGEST_SIZE,
1515                         },
1516                         .cra_init = spacc_aead_cra_init,
1517                         .cra_exit = spacc_aead_cra_exit,
1518                 },
1519         },
1520         {
1521                 .key_offs = 0,
1522                 .iv_offs = AES_MAX_KEY_SIZE,
1523                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1524                                 SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
1525                 .alg = {
1526                         .cra_name = "authenc(hmac(md5),cbc(aes))",
1527                         .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
1528                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1529                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1530                         .cra_blocksize = AES_BLOCK_SIZE,
1531                         .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1532                         .cra_type = &crypto_aead_type,
1533                         .cra_module = THIS_MODULE,
1534                         .cra_aead = {
1535                                 .setkey = spacc_aead_setkey,
1536                                 .setauthsize = spacc_aead_setauthsize,
1537                                 .encrypt = spacc_aead_encrypt,
1538                                 .decrypt = spacc_aead_decrypt,
1539                                 .givencrypt = spacc_aead_givencrypt,
1540                                 .ivsize = AES_BLOCK_SIZE,
1541                                 .maxauthsize = MD5_DIGEST_SIZE,
1542                         },
1543                         .cra_init = spacc_aead_cra_init,
1544                         .cra_exit = spacc_aead_cra_exit,
1545                 },
1546         },
1547         {
1548                 .key_offs = DES_BLOCK_SIZE,
1549                 .iv_offs = 0,
1550                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
1551                                 SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
1552                 .alg = {
1553                         .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1554                         .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
1555                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1556                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1557                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1558                         .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1559                         .cra_type = &crypto_aead_type,
1560                         .cra_module = THIS_MODULE,
1561                         .cra_aead = {
1562                                 .setkey = spacc_aead_setkey,
1563                                 .setauthsize = spacc_aead_setauthsize,
1564                                 .encrypt = spacc_aead_encrypt,
1565                                 .decrypt = spacc_aead_decrypt,
1566                                 .givencrypt = spacc_aead_givencrypt,
1567                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1568                                 .maxauthsize = SHA1_DIGEST_SIZE,
1569                         },
1570                         .cra_init = spacc_aead_cra_init,
1571                         .cra_exit = spacc_aead_cra_exit,
1572                 },
1573         },
1574         {
1575                 .key_offs = DES_BLOCK_SIZE,
1576                 .iv_offs = 0,
1577                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
1578                                 SPA_CTRL_HASH_ALG_SHA256 |
1579                                 SPA_CTRL_HASH_MODE_HMAC,
1580                 .alg = {
1581                         .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1582                         .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
1583                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1584                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1585                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1586                         .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1587                         .cra_type = &crypto_aead_type,
1588                         .cra_module = THIS_MODULE,
1589                         .cra_aead = {
1590                                 .setkey = spacc_aead_setkey,
1591                                 .setauthsize = spacc_aead_setauthsize,
1592                                 .encrypt = spacc_aead_encrypt,
1593                                 .decrypt = spacc_aead_decrypt,
1594                                 .givencrypt = spacc_aead_givencrypt,
1595                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1596                                 .maxauthsize = SHA256_DIGEST_SIZE,
1597                         },
1598                         .cra_init = spacc_aead_cra_init,
1599                         .cra_exit = spacc_aead_cra_exit,
1600                 },
1601         },
1602         {
1603                 .key_offs = DES_BLOCK_SIZE,
1604                 .iv_offs = 0,
1605                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
1606                                 SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
1607                 .alg = {
1608                         .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1609                         .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
1610                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1611                         .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1612                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1613                         .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1614                         .cra_type = &crypto_aead_type,
1615                         .cra_module = THIS_MODULE,
1616                         .cra_aead = {
1617                                 .setkey = spacc_aead_setkey,
1618                                 .setauthsize = spacc_aead_setauthsize,
1619                                 .encrypt = spacc_aead_encrypt,
1620                                 .decrypt = spacc_aead_decrypt,
1621                                 .givencrypt = spacc_aead_givencrypt,
1622                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1623                                 .maxauthsize = MD5_DIGEST_SIZE,
1624                         },
1625                         .cra_init = spacc_aead_cra_init,
1626                         .cra_exit = spacc_aead_cra_exit,
1627                 },
1628         },
1629 };
1630
1631 static struct spacc_alg l2_engine_algs[] = {
1632         {
1633                 .key_offs = 0,
1634                 .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
1635                 .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
1636                                 SPA_CTRL_CIPH_MODE_F8,
1637                 .alg = {
1638                         .cra_name = "f8(kasumi)",
1639                         .cra_driver_name = "f8-kasumi-picoxcell",
1640                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1641                         .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC,
1642                         .cra_blocksize = 8,
1643                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1644                         .cra_type = &crypto_ablkcipher_type,
1645                         .cra_module = THIS_MODULE,
1646                         .cra_ablkcipher = {
1647                                 .setkey = spacc_kasumi_f8_setkey,
1648                                 .encrypt = spacc_ablk_encrypt,
1649                                 .decrypt = spacc_ablk_decrypt,
1650                                 .min_keysize = 16,
1651                                 .max_keysize = 16,
1652                                 .ivsize = 8,
1653                         },
1654                         .cra_init = spacc_ablk_cra_init,
1655                         .cra_exit = spacc_ablk_cra_exit,
1656                 },
1657         },
1658 };
1659
1660 static int __devinit spacc_probe(struct platform_device *pdev,
1661                                  unsigned max_ctxs, size_t cipher_pg_sz,
1662                                  size_t hash_pg_sz, size_t fifo_sz,
1663                                  struct spacc_alg *algs, size_t num_algs)
1664 {
1665         int i, err, ret = -EINVAL;
1666         struct resource *mem, *irq;
1667         struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
1668                                                    GFP_KERNEL);
1669         if (!engine)
1670                 return -ENOMEM;
1671
1672         engine->max_ctxs        = max_ctxs;
1673         engine->cipher_pg_sz    = cipher_pg_sz;
1674         engine->hash_pg_sz      = hash_pg_sz;
1675         engine->fifo_sz         = fifo_sz;
1676         engine->algs            = algs;
1677         engine->num_algs        = num_algs;
1678         engine->name            = dev_name(&pdev->dev);
1679
1680         mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1681         irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1682         if (!mem || !irq) {
1683                 dev_err(&pdev->dev, "no memory/irq resource for engine\n");
1684                 return -ENXIO;
1685         }
1686
1687         if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
1688                                      engine->name))
1689                 return -ENOMEM;
1690
1691         engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
1692         if (!engine->regs) {
1693                 dev_err(&pdev->dev, "memory map failed\n");
1694                 return -ENOMEM;
1695         }
1696
1697         if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
1698                              engine->name, engine)) {
1699                 dev_err(engine->dev, "failed to request IRQ\n");
1700                 return -EBUSY;
1701         }
1702
1703         engine->dev             = &pdev->dev;
1704         engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
1705         engine->hash_key_base   = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
1706
1707         engine->req_pool = dmam_pool_create(engine->name, engine->dev,
1708                 MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
1709         if (!engine->req_pool)
1710                 return -ENOMEM;
1711
1712         spin_lock_init(&engine->hw_lock);
1713
1714         engine->clk = clk_get(&pdev->dev, NULL);
1715         if (IS_ERR(engine->clk)) {
1716                 dev_info(&pdev->dev, "clk unavailable\n");
1717                 device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1718                 return PTR_ERR(engine->clk);
1719         }
1720
1721         if (clk_enable(engine->clk)) {
1722                 dev_info(&pdev->dev, "unable to enable clk\n");
1723                 clk_put(engine->clk);
1724                 return -EIO;
1725         }
1726
1727         err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1728         if (err) {
1729                 clk_disable(engine->clk);
1730                 clk_put(engine->clk);
1731                 return err;
1732         }
1733
1734
1735         /*
1736          * Use an IRQ threshold of 50% as a default. This seems to be a
1737          * reasonable trade off of latency against throughput but can be
1738          * changed at runtime.
1739          */
1740         engine->stat_irq_thresh = (engine->fifo_sz / 2);
1741
1742         /*
1743          * Configure the interrupts. We only use the STAT_CNT interrupt as we
1744          * only submit a new packet for processing when we complete another in
1745          * the queue. This minimizes time spent in the interrupt handler.
1746          */
1747         writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1748                engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1749         writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
1750                engine->regs + SPA_IRQ_EN_REG_OFFSET);
1751
1752         setup_timer(&engine->packet_timeout, spacc_packet_timeout,
1753                     (unsigned long)engine);
1754
1755         INIT_LIST_HEAD(&engine->pending);
1756         INIT_LIST_HEAD(&engine->completed);
1757         INIT_LIST_HEAD(&engine->in_progress);
1758         engine->in_flight = 0;
1759         tasklet_init(&engine->complete, spacc_spacc_complete,
1760                      (unsigned long)engine);
1761
1762         platform_set_drvdata(pdev, engine);
1763
1764         INIT_LIST_HEAD(&engine->registered_algs);
1765         for (i = 0; i < engine->num_algs; ++i) {
1766                 engine->algs[i].engine = engine;
1767                 err = crypto_register_alg(&engine->algs[i].alg);
1768                 if (!err) {
1769                         list_add_tail(&engine->algs[i].entry,
1770                                       &engine->registered_algs);
1771                         ret = 0;
1772                 }
1773                 if (err)
1774                         dev_err(engine->dev, "failed to register alg \"%s\"\n",
1775                                 engine->algs[i].alg.cra_name);
1776                 else
1777                         dev_dbg(engine->dev, "registered alg \"%s\"\n",
1778                                 engine->algs[i].alg.cra_name);
1779         }
1780
1781         return ret;
1782 }
1783
1784 static int __devexit spacc_remove(struct platform_device *pdev)
1785 {
1786         struct spacc_alg *alg, *next;
1787         struct spacc_engine *engine = platform_get_drvdata(pdev);
1788
1789         del_timer_sync(&engine->packet_timeout);
1790         device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1791
1792         list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
1793                 list_del(&alg->entry);
1794                 crypto_unregister_alg(&alg->alg);
1795         }
1796
1797         clk_disable(engine->clk);
1798         clk_put(engine->clk);
1799
1800         return 0;
1801 }
1802
1803 static int __devinit ipsec_probe(struct platform_device *pdev)
1804 {
1805         return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,
1806                            SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
1807                            SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
1808                            SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
1809                            ARRAY_SIZE(ipsec_engine_algs));
1810 }
1811
1812 static struct platform_driver ipsec_driver = {
1813         .probe          = ipsec_probe,
1814         .remove         = __devexit_p(spacc_remove),
1815         .driver         = {
1816                 .name   = "picoxcell-ipsec",
1817 #ifdef CONFIG_PM
1818                 .pm     = &spacc_pm_ops,
1819 #endif /* CONFIG_PM */
1820         },
1821 };
1822
1823 static int __devinit l2_probe(struct platform_device *pdev)
1824 {
1825         return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
1826                            SPACC_CRYPTO_L2_CIPHER_PG_SZ,
1827                            SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
1828                            l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
1829 }
1830
1831 static struct platform_driver l2_driver = {
1832         .probe          = l2_probe,
1833         .remove         = __devexit_p(spacc_remove),
1834         .driver         = {
1835                 .name   = "picoxcell-l2",
1836 #ifdef CONFIG_PM
1837                 .pm     = &spacc_pm_ops,
1838 #endif /* CONFIG_PM */
1839         },
1840 };
1841
1842 static int __init spacc_init(void)
1843 {
1844         int ret = platform_driver_register(&ipsec_driver);
1845         if (ret) {
1846                 pr_err("failed to register ipsec spacc driver");
1847                 goto out;
1848         }
1849
1850         ret = platform_driver_register(&l2_driver);
1851         if (ret) {
1852                 pr_err("failed to register l2 spacc driver");
1853                 goto l2_failed;
1854         }
1855
1856         return 0;
1857
1858 l2_failed:
1859         platform_driver_unregister(&ipsec_driver);
1860 out:
1861         return ret;
1862 }
1863 module_init(spacc_init);
1864
1865 static void __exit spacc_exit(void)
1866 {
1867         platform_driver_unregister(&ipsec_driver);
1868         platform_driver_unregister(&l2_driver);
1869 }
1870 module_exit(spacc_exit);
1871
1872 MODULE_LICENSE("GPL");
1873 MODULE_AUTHOR("Jamie Iles");