4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
15 #include <crypto/internal/hash.h>
16 #include <crypto/sha.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/scatterlist.h>
27 struct padlock_sha_ctx {
31 void (*f_sha_padlock)(const char *in, char *out, int count);
32 struct shash_desc *fallback;
35 static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
37 return crypto_tfm_ctx(tfm);
40 /* We'll need aligned address on the stack */
41 #define NEAREST_ALIGNED(ptr) \
42 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
44 static struct crypto_alg sha1_alg, sha256_alg;
46 static int padlock_sha_bypass(struct crypto_tfm *tfm)
53 err = crypto_shash_init(ctx(tfm)->fallback);
57 if (ctx(tfm)->data && ctx(tfm)->used)
58 err = crypto_shash_update(ctx(tfm)->fallback, ctx(tfm)->data,
68 static void padlock_sha_init(struct crypto_tfm *tfm)
74 static void padlock_sha_update(struct crypto_tfm *tfm,
75 const uint8_t *data, unsigned int length)
79 /* Our buffer is always one page. */
80 if (unlikely(!ctx(tfm)->bypass &&
81 (ctx(tfm)->used + length > PAGE_SIZE))) {
82 err = padlock_sha_bypass(tfm);
86 if (unlikely(ctx(tfm)->bypass)) {
87 err = crypto_shash_update(ctx(tfm)->fallback, data, length);
92 memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
93 ctx(tfm)->used += length;
96 static inline void padlock_output_block(uint32_t *src,
97 uint32_t *dst, size_t count)
100 *dst++ = swab32(*src++);
103 static void padlock_do_sha1(const char *in, char *out, int count)
105 /* We can't store directly to *out as it may be unaligned. */
106 /* BTW Don't reduce the buffer size below 128 Bytes!
107 * PadLock microcode needs it that big. */
109 char *result = NEAREST_ALIGNED(buf);
112 ((uint32_t *)result)[0] = SHA1_H0;
113 ((uint32_t *)result)[1] = SHA1_H1;
114 ((uint32_t *)result)[2] = SHA1_H2;
115 ((uint32_t *)result)[3] = SHA1_H3;
116 ((uint32_t *)result)[4] = SHA1_H4;
118 /* prevent taking the spurious DNA fault with padlock. */
119 ts_state = irq_ts_save();
120 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
121 : "+S"(in), "+D"(result)
122 : "c"(count), "a"(0));
123 irq_ts_restore(ts_state);
125 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
128 static void padlock_do_sha256(const char *in, char *out, int count)
130 /* We can't store directly to *out as it may be unaligned. */
131 /* BTW Don't reduce the buffer size below 128 Bytes!
132 * PadLock microcode needs it that big. */
134 char *result = NEAREST_ALIGNED(buf);
137 ((uint32_t *)result)[0] = SHA256_H0;
138 ((uint32_t *)result)[1] = SHA256_H1;
139 ((uint32_t *)result)[2] = SHA256_H2;
140 ((uint32_t *)result)[3] = SHA256_H3;
141 ((uint32_t *)result)[4] = SHA256_H4;
142 ((uint32_t *)result)[5] = SHA256_H5;
143 ((uint32_t *)result)[6] = SHA256_H6;
144 ((uint32_t *)result)[7] = SHA256_H7;
146 /* prevent taking the spurious DNA fault with padlock. */
147 ts_state = irq_ts_save();
148 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
149 : "+S"(in), "+D"(result)
150 : "c"(count), "a"(0));
151 irq_ts_restore(ts_state);
153 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
156 static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
160 if (unlikely(ctx(tfm)->bypass)) {
161 err = crypto_shash_final(ctx(tfm)->fallback, out);
163 ctx(tfm)->bypass = 0;
167 /* Pass the input buffer to PadLock microcode... */
168 ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
173 static int padlock_cra_init(struct crypto_tfm *tfm)
175 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
176 struct crypto_shash *fallback_tfm;
179 /* For now we'll allocate one page. This
180 * could eventually be configurable one day. */
181 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
185 /* Allocate a fallback and abort if it failed. */
186 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
187 CRYPTO_ALG_NEED_FALLBACK);
188 if (IS_ERR(fallback_tfm)) {
189 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
190 fallback_driver_name);
191 err = PTR_ERR(fallback_tfm);
195 ctx(tfm)->fallback = kmalloc(sizeof(struct shash_desc) +
196 crypto_shash_descsize(fallback_tfm),
198 if (!ctx(tfm)->fallback)
201 ctx(tfm)->fallback->tfm = fallback_tfm;
202 ctx(tfm)->fallback->flags = 0;
206 crypto_free_shash(fallback_tfm);
208 free_page((unsigned long)(ctx(tfm)->data));
213 static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
215 ctx(tfm)->f_sha_padlock = padlock_do_sha1;
217 return padlock_cra_init(tfm);
220 static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
222 ctx(tfm)->f_sha_padlock = padlock_do_sha256;
224 return padlock_cra_init(tfm);
227 static void padlock_cra_exit(struct crypto_tfm *tfm)
229 if (ctx(tfm)->data) {
230 free_page((unsigned long)(ctx(tfm)->data));
231 ctx(tfm)->data = NULL;
234 crypto_free_shash(ctx(tfm)->fallback->tfm);
236 kzfree(ctx(tfm)->fallback);
239 static struct crypto_alg sha1_alg = {
241 .cra_driver_name = "sha1-padlock",
242 .cra_priority = PADLOCK_CRA_PRIORITY,
243 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
244 CRYPTO_ALG_NEED_FALLBACK,
245 .cra_blocksize = SHA1_BLOCK_SIZE,
246 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
247 .cra_module = THIS_MODULE,
248 .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
249 .cra_init = padlock_sha1_cra_init,
250 .cra_exit = padlock_cra_exit,
253 .dia_digestsize = SHA1_DIGEST_SIZE,
254 .dia_init = padlock_sha_init,
255 .dia_update = padlock_sha_update,
256 .dia_final = padlock_sha_final,
261 static struct crypto_alg sha256_alg = {
262 .cra_name = "sha256",
263 .cra_driver_name = "sha256-padlock",
264 .cra_priority = PADLOCK_CRA_PRIORITY,
265 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
266 CRYPTO_ALG_NEED_FALLBACK,
267 .cra_blocksize = SHA256_BLOCK_SIZE,
268 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
269 .cra_module = THIS_MODULE,
270 .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
271 .cra_init = padlock_sha256_cra_init,
272 .cra_exit = padlock_cra_exit,
275 .dia_digestsize = SHA256_DIGEST_SIZE,
276 .dia_init = padlock_sha_init,
277 .dia_update = padlock_sha_update,
278 .dia_final = padlock_sha_final,
283 static int __init padlock_init(void)
288 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
292 if (!cpu_has_phe_enabled) {
293 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
297 rc = crypto_register_alg(&sha1_alg);
301 rc = crypto_register_alg(&sha256_alg);
305 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
310 crypto_unregister_alg(&sha1_alg);
312 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
316 static void __exit padlock_fini(void)
318 crypto_unregister_alg(&sha1_alg);
319 crypto_unregister_alg(&sha256_alg);
322 module_init(padlock_init);
323 module_exit(padlock_fini);
325 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
326 MODULE_LICENSE("GPL");
327 MODULE_AUTHOR("Michal Ludvig");
329 MODULE_ALIAS("sha1-all");
330 MODULE_ALIAS("sha256-all");
331 MODULE_ALIAS("sha1-padlock");
332 MODULE_ALIAS("sha256-padlock");