2 * linux/net/sunrpc/gss_krb5_crypto.c
4 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
12 * Copyright (C) 1998 by the FundsXpress, INC.
14 * All rights reserved.
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
37 #include <linux/err.h>
38 #include <linux/types.h>
40 #include <linux/scatterlist.h>
41 #include <linux/crypto.h>
42 #include <linux/highmem.h>
43 #include <linux/pagemap.h>
44 #include <linux/random.h>
45 #include <linux/sunrpc/gss_krb5.h>
46 #include <linux/sunrpc/xdr.h>
49 # define RPCDBG_FACILITY RPCDBG_AUTH
54 struct crypto_blkcipher *tfm,
61 struct scatterlist sg[1];
62 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
65 if (length % crypto_blkcipher_blocksize(tfm) != 0)
68 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
70 crypto_blkcipher_ivsize(tfm));
75 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
77 memcpy(out, in, length);
78 sg_init_one(sg, out, length);
80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
82 dprintk("RPC: krb5_encrypt returns %d\n", ret);
88 struct crypto_blkcipher *tfm,
95 struct scatterlist sg[1];
96 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
97 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
99 if (length % crypto_blkcipher_blocksize(tfm) != 0)
102 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
103 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
104 crypto_blkcipher_ivsize(tfm));
108 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
110 memcpy(out, in, length);
111 sg_init_one(sg, out, length);
113 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
115 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
120 checksummer(struct scatterlist *sg, void *data)
122 struct hash_desc *desc = data;
124 return crypto_hash_update(desc, sg, sg->length);
128 * checksum the plaintext data and hdrlen bytes of the token header
129 * The checksum is performed over the first 8 bytes of the
130 * gss token header and then over the data body
133 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
134 struct xdr_buf *body, int body_offset, u8 *cksumkey,
135 unsigned int usage, struct xdr_netobj *cksumout)
137 struct hash_desc desc;
138 struct scatterlist sg[1];
140 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
141 unsigned int checksumlen;
143 if (cksumout->len < kctx->gk5e->cksumlength) {
144 dprintk("%s: checksum buffer length, %u, too small for %s\n",
145 __func__, cksumout->len, kctx->gk5e->name);
146 return GSS_S_FAILURE;
149 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
150 if (IS_ERR(desc.tfm))
151 return GSS_S_FAILURE;
152 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
154 checksumlen = crypto_hash_digestsize(desc.tfm);
156 if (cksumkey != NULL) {
157 err = crypto_hash_setkey(desc.tfm, cksumkey,
158 kctx->gk5e->keylength);
163 err = crypto_hash_init(&desc);
166 sg_init_one(sg, header, hdrlen);
167 err = crypto_hash_update(&desc, sg, hdrlen);
170 err = xdr_process_buf(body, body_offset, body->len - body_offset,
174 err = crypto_hash_final(&desc, checksumdata);
178 switch (kctx->gk5e->ctype) {
179 case CKSUMTYPE_RSA_MD5:
180 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
181 checksumdata, checksumlen);
184 memcpy(cksumout->data,
185 checksumdata + checksumlen - kctx->gk5e->cksumlength,
186 kctx->gk5e->cksumlength);
188 case CKSUMTYPE_HMAC_SHA1_DES3:
189 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
195 cksumout->len = kctx->gk5e->cksumlength;
197 crypto_free_hash(desc.tfm);
198 return err ? GSS_S_FAILURE : 0;
202 * checksum the plaintext data and hdrlen bytes of the token header
203 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
204 * body then over the first 16 octets of the MIC token
205 * Inclusion of the header data in the calculation of the
206 * checksum is optional.
209 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
210 struct xdr_buf *body, int body_offset, u8 *cksumkey,
211 unsigned int usage, struct xdr_netobj *cksumout)
213 struct hash_desc desc;
214 struct scatterlist sg[1];
216 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
217 unsigned int checksumlen;
219 if (kctx->gk5e->keyed_cksum == 0) {
220 dprintk("%s: expected keyed hash for %s\n",
221 __func__, kctx->gk5e->name);
222 return GSS_S_FAILURE;
224 if (cksumkey == NULL) {
225 dprintk("%s: no key supplied for %s\n",
226 __func__, kctx->gk5e->name);
227 return GSS_S_FAILURE;
230 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
232 if (IS_ERR(desc.tfm))
233 return GSS_S_FAILURE;
234 checksumlen = crypto_hash_digestsize(desc.tfm);
235 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
237 err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
241 err = crypto_hash_init(&desc);
244 err = xdr_process_buf(body, body_offset, body->len - body_offset,
248 if (header != NULL) {
249 sg_init_one(sg, header, hdrlen);
250 err = crypto_hash_update(&desc, sg, hdrlen);
254 err = crypto_hash_final(&desc, checksumdata);
258 cksumout->len = kctx->gk5e->cksumlength;
260 switch (kctx->gk5e->ctype) {
261 case CKSUMTYPE_HMAC_SHA1_96_AES128:
262 case CKSUMTYPE_HMAC_SHA1_96_AES256:
263 /* note that this truncates the hash */
264 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
271 crypto_free_hash(desc.tfm);
272 return err ? GSS_S_FAILURE : 0;
275 struct encryptor_desc {
276 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
277 struct blkcipher_desc desc;
279 struct xdr_buf *outbuf;
281 struct scatterlist infrags[4];
282 struct scatterlist outfrags[4];
288 encryptor(struct scatterlist *sg, void *data)
290 struct encryptor_desc *desc = data;
291 struct xdr_buf *outbuf = desc->outbuf;
292 struct page *in_page;
293 int thislen = desc->fraglen + sg->length;
297 /* Worst case is 4 fragments: head, end of page 1, start
298 * of page 2, tail. Anything more is a bug. */
299 BUG_ON(desc->fragno > 3);
301 page_pos = desc->pos - outbuf->head[0].iov_len;
302 if (page_pos >= 0 && page_pos < outbuf->page_len) {
303 /* pages are not in place: */
304 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
305 in_page = desc->pages[i];
307 in_page = sg_page(sg);
309 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
311 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
314 desc->fraglen += sg->length;
315 desc->pos += sg->length;
317 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
323 sg_mark_end(&desc->infrags[desc->fragno - 1]);
324 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
326 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
327 desc->infrags, thislen);
331 sg_init_table(desc->infrags, 4);
332 sg_init_table(desc->outfrags, 4);
335 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
336 sg->offset + sg->length - fraglen);
337 desc->infrags[0] = desc->outfrags[0];
338 sg_assign_page(&desc->infrags[0], in_page);
340 desc->fraglen = fraglen;
349 gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
350 int offset, struct page **pages)
353 struct encryptor_desc desc;
355 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
357 memset(desc.iv, 0, sizeof(desc.iv));
359 desc.desc.info = desc.iv;
367 sg_init_table(desc.infrags, 4);
368 sg_init_table(desc.outfrags, 4);
370 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
374 struct decryptor_desc {
375 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
376 struct blkcipher_desc desc;
377 struct scatterlist frags[4];
383 decryptor(struct scatterlist *sg, void *data)
385 struct decryptor_desc *desc = data;
386 int thislen = desc->fraglen + sg->length;
389 /* Worst case is 4 fragments: head, end of page 1, start
390 * of page 2, tail. Anything more is a bug. */
391 BUG_ON(desc->fragno > 3);
392 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
395 desc->fraglen += sg->length;
397 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
403 sg_mark_end(&desc->frags[desc->fragno - 1]);
405 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
406 desc->frags, thislen);
410 sg_init_table(desc->frags, 4);
413 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
414 sg->offset + sg->length - fraglen);
416 desc->fraglen = fraglen;
425 gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
428 struct decryptor_desc desc;
431 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
433 memset(desc.iv, 0, sizeof(desc.iv));
435 desc.desc.info = desc.iv;
440 sg_init_table(desc.frags, 4);
442 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
446 * This function makes the assumption that it was ultimately called
449 * The client auth_gss code moves any existing tail data into a
450 * separate page before calling gss_wrap.
451 * The server svcauth_gss code ensures that both the head and the
452 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
454 * Even with that guarantee, this function may be called more than
455 * once in the processing of gss_wrap(). The best we can do is
456 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
457 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
458 * At run-time we can verify that a single invocation of this
459 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
463 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
470 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
471 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
473 p = buf->head[0].iov_base + base;
475 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
477 buf->head[0].iov_len += shiftlen;
478 buf->len += shiftlen;
484 gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
485 u32 offset, u8 *iv, struct page **pages, int encrypt)
488 struct scatterlist sg[1];
489 struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
490 u8 data[crypto_blkcipher_blocksize(cipher) * 2];
491 struct page **save_pages;
492 u32 len = buf->len - offset;
494 BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
497 * For encryption, we want to read from the cleartext
498 * page cache pages, and write the encrypted data to
499 * the supplied xdr_buf pages.
501 save_pages = buf->pages;
505 ret = read_bytes_from_xdr_buf(buf, offset, data, len);
506 buf->pages = save_pages;
510 sg_init_one(sg, data, len);
513 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
515 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
520 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
527 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
528 struct xdr_buf *buf, int ec, struct page **pages)
531 struct xdr_netobj hmac;
534 struct crypto_blkcipher *cipher, *aux_cipher;
536 struct page **save_pages;
538 struct encryptor_desc desc;
542 if (kctx->initiate) {
543 cipher = kctx->initiator_enc;
544 aux_cipher = kctx->initiator_enc_aux;
545 cksumkey = kctx->initiator_integ;
546 usage = KG_USAGE_INITIATOR_SEAL;
548 cipher = kctx->acceptor_enc;
549 aux_cipher = kctx->acceptor_enc_aux;
550 cksumkey = kctx->acceptor_integ;
551 usage = KG_USAGE_ACCEPTOR_SEAL;
553 blocksize = crypto_blkcipher_blocksize(cipher);
555 /* hide the gss token header and insert the confounder */
556 offset += GSS_KRB5_TOK_HDR_LEN;
557 if (xdr_extend_head(buf, offset, blocksize))
558 return GSS_S_FAILURE;
559 gss_krb5_make_confounder(buf->head[0].iov_base + offset, blocksize);
560 offset -= GSS_KRB5_TOK_HDR_LEN;
562 if (buf->tail[0].iov_base != NULL) {
563 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
565 buf->tail[0].iov_base = buf->head[0].iov_base
566 + buf->head[0].iov_len;
567 buf->tail[0].iov_len = 0;
568 ecptr = buf->tail[0].iov_base;
571 memset(ecptr, 'X', ec);
572 buf->tail[0].iov_len += ec;
575 /* copy plaintext gss token header after filler (if any) */
576 memcpy(ecptr + ec, buf->head[0].iov_base + offset,
577 GSS_KRB5_TOK_HDR_LEN);
578 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
579 buf->len += GSS_KRB5_TOK_HDR_LEN;
582 hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
583 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
586 * When we are called, pages points to the real page cache
587 * data -- which we can't go and encrypt! buf->pages points
588 * to scratch pages which we are going to send off to the
589 * client/server. Swap in the plaintext pages to calculate
592 save_pages = buf->pages;
595 err = make_checksum_v2(kctx, NULL, 0, buf,
596 offset + GSS_KRB5_TOK_HDR_LEN,
597 cksumkey, usage, &hmac);
598 buf->pages = save_pages;
600 return GSS_S_FAILURE;
602 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
603 nblocks = (nbytes + blocksize - 1) / blocksize;
606 cbcbytes = (nblocks - 2) * blocksize;
608 memset(desc.iv, 0, sizeof(desc.iv));
611 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
616 desc.desc.info = desc.iv;
618 desc.desc.tfm = aux_cipher;
620 sg_init_table(desc.infrags, 4);
621 sg_init_table(desc.outfrags, 4);
623 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
624 cbcbytes, encryptor, &desc);
629 /* Make sure IV carries forward from any CBC results. */
630 err = gss_krb5_cts_crypt(cipher, buf,
631 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
638 /* Now update buf to account for HMAC */
639 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
640 buf->len += kctx->gk5e->cksumlength;
649 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
650 u32 *headskip, u32 *tailskip)
652 struct xdr_buf subbuf;
655 struct crypto_blkcipher *cipher, *aux_cipher;
656 struct xdr_netobj our_hmac_obj;
657 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
658 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
659 int nblocks, blocksize, cbcbytes;
660 struct decryptor_desc desc;
663 if (kctx->initiate) {
664 cipher = kctx->acceptor_enc;
665 aux_cipher = kctx->acceptor_enc_aux;
666 cksum_key = kctx->acceptor_integ;
667 usage = KG_USAGE_ACCEPTOR_SEAL;
669 cipher = kctx->initiator_enc;
670 aux_cipher = kctx->initiator_enc_aux;
671 cksum_key = kctx->initiator_integ;
672 usage = KG_USAGE_INITIATOR_SEAL;
674 blocksize = crypto_blkcipher_blocksize(cipher);
677 /* create a segment skipping the header and leaving out the checksum */
678 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
679 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
680 kctx->gk5e->cksumlength));
682 nblocks = (subbuf.len + blocksize - 1) / blocksize;
686 cbcbytes = (nblocks - 2) * blocksize;
688 memset(desc.iv, 0, sizeof(desc.iv));
693 desc.desc.info = desc.iv;
695 desc.desc.tfm = aux_cipher;
697 sg_init_table(desc.frags, 4);
699 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
704 /* Make sure IV carries forward from any CBC results. */
705 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
710 /* Calculate our hmac over the plaintext data */
711 our_hmac_obj.len = sizeof(our_hmac);
712 our_hmac_obj.data = our_hmac;
714 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
715 cksum_key, usage, &our_hmac_obj);
719 /* Get the packet's hmac value */
720 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
721 pkt_hmac, kctx->gk5e->cksumlength);
725 if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
729 *headskip = crypto_blkcipher_blocksize(cipher);
730 *tailskip = kctx->gk5e->cksumlength;
732 if (ret && ret != GSS_S_BAD_SIG)