Merge branch 'nfs-for-2.6.35' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6
[pandora-kernel.git] / net / sunrpc / auth_gss / gss_krb5_wrap.c
index a1a3585..2763e3e 100644 (file)
@@ -168,6 +168,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
        struct page             **tmp_pages;
        u32                     seq_send;
        u8                      *cksumkey;
+       u32                     conflen = kctx->gk5e->conflen;
 
        dprintk("RPC:       %s\n", __func__);
 
@@ -176,7 +177,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
        blocksize = crypto_blkcipher_blocksize(kctx->enc);
        gss_krb5_add_padding(buf, offset, blocksize);
        BUG_ON((buf->len - offset) % blocksize);
-       plainlen = blocksize + buf->len - offset;
+       plainlen = conflen + buf->len - offset;
 
        headlen = g_token_size(&kctx->mech_used,
                GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
@@ -204,7 +205,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
        memset(ptr + 4, 0xff, 4);
        *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
 
-       gss_krb5_make_confounder(msg_start, blocksize);
+       gss_krb5_make_confounder(msg_start, conflen);
 
        if (kctx->gk5e->keyed_cksum)
                cksumkey = kctx->cksum;
@@ -214,8 +215,8 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
        /* XXXJBF: UGH!: */
        tmp_pages = buf->pages;
        buf->pages = pages;
-       if (make_checksum(kctx, ptr, 8, buf, offset + headlen - blocksize,
-                                       cksumkey, &md5cksum))
+       if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
+                                       cksumkey, KG_USAGE_SEAL, &md5cksum))
                return GSS_S_FAILURE;
        buf->pages = tmp_pages;
 
@@ -227,13 +228,30 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
        /* XXX would probably be more efficient to compute checksum
         * and encrypt at the same time: */
-       if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
+       if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
                               seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
                return GSS_S_FAILURE;
 
-       if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
-                                                                       pages))
-               return GSS_S_FAILURE;
+       if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
+               struct crypto_blkcipher *cipher;
+               int err;
+               cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+                                               CRYPTO_ALG_ASYNC);
+               if (IS_ERR(cipher))
+                       return GSS_S_FAILURE;
+
+               krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
+
+               err = gss_encrypt_xdr_buf(cipher, buf,
+                                         offset + headlen - conflen, pages);
+               crypto_free_blkcipher(cipher);
+               if (err)
+                       return GSS_S_FAILURE;
+       } else {
+               if (gss_encrypt_xdr_buf(kctx->enc, buf,
+                                       offset + headlen - conflen, pages))
+                       return GSS_S_FAILURE;
+       }
 
        return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 }
@@ -254,6 +272,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
        void                    *data_start, *orig_start;
        int                     data_len;
        int                     blocksize;
+       u32                     conflen = kctx->gk5e->conflen;
        int                     crypt_offset;
        u8                      *cksumkey;
 
@@ -289,8 +308,37 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
         */
        crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
                                        (unsigned char *)buf->head[0].iov_base;
-       if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
-               return GSS_S_DEFECTIVE_TOKEN;
+
+       /*
+        * Need plaintext seqnum to derive encryption key for arcfour-hmac
+        */
+       if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
+                            ptr + 8, &direction, &seqnum))
+               return GSS_S_BAD_SIG;
+
+       if ((kctx->initiate && direction != 0xff) ||
+           (!kctx->initiate && direction != 0))
+               return GSS_S_BAD_SIG;
+
+       if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
+               struct crypto_blkcipher *cipher;
+               int err;
+
+               cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
+                                               CRYPTO_ALG_ASYNC);
+               if (IS_ERR(cipher))
+                       return GSS_S_FAILURE;
+
+               krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
+
+               err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
+               crypto_free_blkcipher(cipher);
+               if (err)
+                       return GSS_S_DEFECTIVE_TOKEN;
+       } else {
+               if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
+                       return GSS_S_DEFECTIVE_TOKEN;
+       }
 
        if (kctx->gk5e->keyed_cksum)
                cksumkey = kctx->cksum;
@@ -298,7 +346,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
                cksumkey = NULL;
 
        if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
-                                               cksumkey, &md5cksum))
+                                       cksumkey, KG_USAGE_SEAL, &md5cksum))
                return GSS_S_FAILURE;
 
        if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
@@ -314,20 +362,12 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
 
        /* do sequencing checks */
 
-       if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
-                                   &direction, &seqnum))
-               return GSS_S_BAD_SIG;
-
-       if ((kctx->initiate && direction != 0xff) ||
-           (!kctx->initiate && direction != 0))
-               return GSS_S_BAD_SIG;
-
        /* Copy the data back to the right position.  XXX: Would probably be
         * better to copy and encrypt at the same time. */
 
        blocksize = crypto_blkcipher_blocksize(kctx->enc);
        data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
-                                       blocksize;
+                                       conflen;
        orig_start = buf->head[0].iov_base + offset;
        data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
        memmove(orig_start, data_start, data_len);
@@ -519,6 +559,7 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
                BUG();
        case ENCTYPE_DES_CBC_RAW:
        case ENCTYPE_DES3_CBC_RAW:
+       case ENCTYPE_ARCFOUR_HMAC:
                return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
        case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
        case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
@@ -536,6 +577,7 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
                BUG();
        case ENCTYPE_DES_CBC_RAW:
        case ENCTYPE_DES3_CBC_RAW:
+       case ENCTYPE_ARCFOUR_HMAC:
                return gss_unwrap_kerberos_v1(kctx, offset, buf);
        case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
        case ENCTYPE_AES256_CTS_HMAC_SHA1_96: