[SUNRPC] GSS: Use block ciphers where applicable
[deliverable/linux.git] / net / sunrpc / auth_gss / gss_krb5_crypto.c
index ee6ae74cd1b226b2bb7d39655f2a1862e4507c21..57192dfe30656d33f5beb30e362e2402dd1e372d 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
 #include <linux/crypto.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
@@ -49,7 +49,7 @@
 
 u32
 krb5_encrypt(
-       struct crypto_tfm *tfm,
+       struct crypto_blkcipher *tfm,
        void * iv,
        void * in,
        void * out,
@@ -58,28 +58,27 @@ krb5_encrypt(
        u32 ret = -EINVAL;
         struct scatterlist sg[1];
        u8 local_iv[16] = {0};
+       struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 
        dprintk("RPC:      krb5_encrypt: input data:\n");
        print_hexl((u32 *)in, length, 0);
 
-       if (length % crypto_tfm_alg_blocksize(tfm) != 0)
+       if (length % crypto_blkcipher_blocksize(tfm) != 0)
                goto out;
 
-       if (crypto_tfm_alg_ivsize(tfm) > 16) {
+       if (crypto_blkcipher_ivsize(tfm) > 16) {
                dprintk("RPC:      gss_k5encrypt: tfm iv size to large %d\n",
-                        crypto_tfm_alg_ivsize(tfm));
+                        crypto_blkcipher_ivsize(tfm));
                goto out;
        }
 
        if (iv)
-               memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm));
+               memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
 
        memcpy(out, in, length);
-       sg[0].page = virt_to_page(out);
-       sg[0].offset = offset_in_page(out);
-       sg[0].length = length;
+       sg_set_buf(sg, out, length);
 
-       ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv);
+       ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
 
        dprintk("RPC:      krb5_encrypt: output data:\n");
        print_hexl((u32 *)out, length, 0);
@@ -92,7 +91,7 @@ EXPORT_SYMBOL(krb5_encrypt);
 
 u32
 krb5_decrypt(
-     struct crypto_tfm *tfm,
+     struct crypto_blkcipher *tfm,
      void * iv,
      void * in,
      void * out,
@@ -101,27 +100,26 @@ krb5_decrypt(
        u32 ret = -EINVAL;
        struct scatterlist sg[1];
        u8 local_iv[16] = {0};
+       struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 
        dprintk("RPC:      krb5_decrypt: input data:\n");
        print_hexl((u32 *)in, length, 0);
 
-       if (length % crypto_tfm_alg_blocksize(tfm) != 0)
+       if (length % crypto_blkcipher_blocksize(tfm) != 0)
                goto out;
 
-       if (crypto_tfm_alg_ivsize(tfm) > 16) {
+       if (crypto_blkcipher_ivsize(tfm) > 16) {
                dprintk("RPC:      gss_k5decrypt: tfm iv size to large %d\n",
-                       crypto_tfm_alg_ivsize(tfm));
+                       crypto_blkcipher_ivsize(tfm));
                goto out;
        }
        if (iv)
-               memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm));
+               memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
 
        memcpy(out, in, length);
-       sg[0].page = virt_to_page(out);
-       sg[0].offset = offset_in_page(out);
-       sg[0].length = length;
+       sg_set_buf(sg, out, length);
 
-       ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv);
+       ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
 
        dprintk("RPC:      krb5_decrypt: output_data:\n");
        print_hexl((u32 *)out, length, 0);
@@ -132,24 +130,90 @@ out:
 
 EXPORT_SYMBOL(krb5_decrypt);
 
-static void
-buf_to_sg(struct scatterlist *sg, char *ptr, int len) {
-       sg->page = virt_to_page(ptr);
-       sg->offset = offset_in_page(ptr);
-       sg->length = len;
+static int
+process_xdr_buf(struct xdr_buf *buf, int offset, int len,
+               int (*actor)(struct scatterlist *, void *), void *data)
+{
+       int i, page_len, thislen, page_offset, ret = 0;
+       struct scatterlist      sg[1];
+
+       if (offset >= buf->head[0].iov_len) {
+               offset -= buf->head[0].iov_len;
+       } else {
+               thislen = buf->head[0].iov_len - offset;
+               if (thislen > len)
+                       thislen = len;
+               sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
+               ret = actor(sg, data);
+               if (ret)
+                       goto out;
+               offset = 0;
+               len -= thislen;
+       }
+       if (len == 0)
+               goto out;
+
+       if (offset >= buf->page_len) {
+               offset -= buf->page_len;
+       } else {
+               page_len = buf->page_len - offset;
+               if (page_len > len)
+                       page_len = len;
+               len -= page_len;
+               page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
+               i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
+               thislen = PAGE_CACHE_SIZE - page_offset;
+               do {
+                       if (thislen > page_len)
+                               thislen = page_len;
+                       sg->page = buf->pages[i];
+                       sg->offset = page_offset;
+                       sg->length = thislen;
+                       ret = actor(sg, data);
+                       if (ret)
+                               goto out;
+                       page_len -= thislen;
+                       i++;
+                       page_offset = 0;
+                       thislen = PAGE_CACHE_SIZE;
+               } while (page_len != 0);
+               offset = 0;
+       }
+       if (len == 0)
+               goto out;
+
+       if (offset < buf->tail[0].iov_len) {
+               thislen = buf->tail[0].iov_len - offset;
+               if (thislen > len)
+                       thislen = len;
+               sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
+               ret = actor(sg, data);
+               len -= thislen;
+       }
+       if (len != 0)
+               ret = -EINVAL;
+out:
+       return ret;
+}
+
+static int
+checksummer(struct scatterlist *sg, void *data)
+{
+       struct crypto_tfm *tfm = (struct crypto_tfm *)data;
+
+       crypto_digest_update(tfm, sg, 1);
+
+       return 0;
 }
 
 /* checksum the plaintext data and hdrlen bytes of the token header */
 s32
 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
-                  struct xdr_netobj *cksum)
+                  int body_offset, struct xdr_netobj *cksum)
 {
        char                            *cksumname;
        struct crypto_tfm               *tfm = NULL; /* XXX add to ctx? */
        struct scatterlist              sg[1];
-       u32                             code = GSS_S_FAILURE;
-       int                             len, thislen, offset;
-       int                             i;
 
        switch (cksumtype) {
                case CKSUMTYPE_RSA_MD5:
@@ -158,49 +222,176 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
                default:
                        dprintk("RPC:      krb5_make_checksum:"
                                " unsupported checksum %d", cksumtype);
-                       goto out;
+                       return GSS_S_FAILURE;
        }
        if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP)))
-               goto out;
+               return GSS_S_FAILURE;
        cksum->len = crypto_tfm_alg_digestsize(tfm);
-       if ((cksum->data = kmalloc(cksum->len, GFP_KERNEL)) == NULL)
-               goto out;
 
        crypto_digest_init(tfm);
-       buf_to_sg(sg, header, hdrlen);
+       sg_set_buf(sg, header, hdrlen);
        crypto_digest_update(tfm, sg, 1);
-       if (body->head[0].iov_len) {
-               buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len);
-               crypto_digest_update(tfm, sg, 1);
+       process_xdr_buf(body, body_offset, body->len - body_offset,
+                       checksummer, tfm);
+       crypto_digest_final(tfm, cksum->data);
+       crypto_free_tfm(tfm);
+       return 0;
+}
+
+EXPORT_SYMBOL(make_checksum);
+
+struct encryptor_desc {
+       u8 iv[8]; /* XXX hard-coded blocksize */
+       struct blkcipher_desc desc;
+       int pos;
+       struct xdr_buf *outbuf;
+       struct page **pages;
+       struct scatterlist infrags[4];
+       struct scatterlist outfrags[4];
+       int fragno;
+       int fraglen;
+};
+
+static int
+encryptor(struct scatterlist *sg, void *data)
+{
+       struct encryptor_desc *desc = data;
+       struct xdr_buf *outbuf = desc->outbuf;
+       struct page *in_page;
+       int thislen = desc->fraglen + sg->length;
+       int fraglen, ret;
+       int page_pos;
+
+       /* Worst case is 4 fragments: head, end of page 1, start
+        * of page 2, tail.  Anything more is a bug. */
+       BUG_ON(desc->fragno > 3);
+       desc->infrags[desc->fragno] = *sg;
+       desc->outfrags[desc->fragno] = *sg;
+
+       page_pos = desc->pos - outbuf->head[0].iov_len;
+       if (page_pos >= 0 && page_pos < outbuf->page_len) {
+               /* pages are not in place: */
+               int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
+               in_page = desc->pages[i];
+       } else {
+               in_page = sg->page;
        }
+       desc->infrags[desc->fragno].page = in_page;
+       desc->fragno++;
+       desc->fraglen += sg->length;
+       desc->pos += sg->length;
 
-       len = body->page_len;
-       if (len != 0) {
-               offset = body->page_base & (PAGE_CACHE_SIZE - 1);
-               i = body->page_base >> PAGE_CACHE_SHIFT;
-               thislen = PAGE_CACHE_SIZE - offset;
-               do {
-                       if (thislen > len)
-                               thislen = len;
-                       sg->page = body->pages[i];
-                       sg->offset = offset;
-                       sg->length = thislen;
-                       crypto_digest_update(tfm, sg, 1);
-                       len -= thislen;
-                       i++;
-                       offset = 0;
-                       thislen = PAGE_CACHE_SIZE;
-               } while(len != 0);
+       fraglen = thislen & 7; /* XXX hardcoded blocksize */
+       thislen -= fraglen;
+
+       if (thislen == 0)
+               return 0;
+
+       ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
+                                         desc->infrags, thislen);
+       if (ret)
+               return ret;
+       if (fraglen) {
+               desc->outfrags[0].page = sg->page;
+               desc->outfrags[0].offset = sg->offset + sg->length - fraglen;
+               desc->outfrags[0].length = fraglen;
+               desc->infrags[0] = desc->outfrags[0];
+               desc->infrags[0].page = in_page;
+               desc->fragno = 1;
+               desc->fraglen = fraglen;
+       } else {
+               desc->fragno = 0;
+               desc->fraglen = 0;
        }
-       if (body->tail[0].iov_len) {
-               buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len);
-               crypto_digest_update(tfm, sg, 1);
+       return 0;
+}
+
+int
+gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
+                   int offset, struct page **pages)
+{
+       int ret;
+       struct encryptor_desc desc;
+
+       BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
+
+       memset(desc.iv, 0, sizeof(desc.iv));
+       desc.desc.tfm = tfm;
+       desc.desc.info = desc.iv;
+       desc.desc.flags = 0;
+       desc.pos = offset;
+       desc.outbuf = buf;
+       desc.pages = pages;
+       desc.fragno = 0;
+       desc.fraglen = 0;
+
+       ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc);
+       return ret;
+}
+
+EXPORT_SYMBOL(gss_encrypt_xdr_buf);
+
+struct decryptor_desc {
+       u8 iv[8]; /* XXX hard-coded blocksize */
+       struct blkcipher_desc desc;
+       struct scatterlist frags[4];
+       int fragno;
+       int fraglen;
+};
+
+static int
+decryptor(struct scatterlist *sg, void *data)
+{
+       struct decryptor_desc *desc = data;
+       int thislen = desc->fraglen + sg->length;
+       int fraglen, ret;
+
+       /* Worst case is 4 fragments: head, end of page 1, start
+        * of page 2, tail.  Anything more is a bug. */
+       BUG_ON(desc->fragno > 3);
+       desc->frags[desc->fragno] = *sg;
+       desc->fragno++;
+       desc->fraglen += sg->length;
+
+       fraglen = thislen & 7; /* XXX hardcoded blocksize */
+       thislen -= fraglen;
+
+       if (thislen == 0)
+               return 0;
+
+       ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
+                                         desc->frags, thislen);
+       if (ret)
+               return ret;
+       if (fraglen) {
+               desc->frags[0].page = sg->page;
+               desc->frags[0].offset = sg->offset + sg->length - fraglen;
+               desc->frags[0].length = fraglen;
+               desc->fragno = 1;
+               desc->fraglen = fraglen;
+       } else {
+               desc->fragno = 0;
+               desc->fraglen = 0;
        }
-       crypto_digest_final(tfm, cksum->data);
-       code = 0;
-out:
-       crypto_free_tfm(tfm);
-       return code;
+       return 0;
 }
 
-EXPORT_SYMBOL(make_checksum);
+int
+gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
+                   int offset)
+{
+       struct decryptor_desc desc;
+
+       /* XXXJBF: */
+       BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
+
+       memset(desc.iv, 0, sizeof(desc.iv));
+       desc.desc.tfm = tfm;
+       desc.desc.info = desc.iv;
+       desc.desc.flags = 0;
+       desc.fragno = 0;
+       desc.fraglen = 0;
+       return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
+}
+
+EXPORT_SYMBOL(gss_decrypt_xdr_buf);
This page took 0.04714 seconds and 5 git commands to generate.