From: Linus Torvalds Date: Sun, 18 Dec 2016 00:00:34 +0000 (-0800) Subject: Merge tag 'docs-4.10-2' of git://git.lwn.net/linux X-Git-Tag: v4.10-rc1~65 X-Git-Url: https://repo.jachan.dev/linux.git/commitdiff_plain/0aaf2146ecf00f7932f472ec5aa30d999c89530c?hp=-c Merge tag 'docs-4.10-2' of git://git.lwn.net/linux Pull more documentation updates from Jonathan Corbet: "This converts the crypto DocBook to Sphinx" * tag 'docs-4.10-2' of git://git.lwn.net/linux: crypto: doc - optimize compilation crypto: doc - clarify AEAD memory structure crypto: doc - remove crypto_alloc_ablkcipher crypto: doc - add KPP documentation crypto: doc - fix separation of cipher / req API crypto: doc - fix source comments for Sphinx crypto: doc - remove crypto API DocBook crypto: doc - convert crypto API documentation to Sphinx --- 0aaf2146ecf00f7932f472ec5aa30d999c89530c diff --combined crypto/algif_aead.c index 668ef402c6eb,a0d8377729a4..f849311e9fd4 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@@ -81,11 -81,7 +81,11 @@@ static inline bool aead_sufficient_data { unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); - return ctx->used >= ctx->aead_assoclen + as; + /* + * The minimum amount of memory needed for an AEAD cipher is + * the AAD and in case of decryption the tag. + */ + return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); } static void aead_reset_ctx(struct aead_ctx *ctx) @@@ -136,27 -132,28 +136,27 @@@ static void aead_wmem_wakeup(struct soc static int aead_wait_for_data(struct sock *sk, unsigned flags) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); struct aead_ctx *ctx = ask->private; long timeout; - DEFINE_WAIT(wait); int err = -ERESTARTSYS; if (flags & MSG_DONTWAIT) return -EAGAIN; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - + add_wait_queue(sk_sleep(sk), &wait); for (;;) { if (signal_pending(current)) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, !ctx->more)) { + if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { err = 0; break; } } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); @@@ -419,7 -416,7 +419,7 @@@ static int aead_recvmsg_async(struct so unsigned int i, reqlen = GET_REQ_SIZE(tfm); int err = -ENOMEM; unsigned long used; - size_t outlen; + size_t outlen = 0; size_t usedpages = 0; lock_sock(sk); @@@ -429,15 -426,12 +429,15 @@@ goto unlock; } - used = ctx->used; - outlen = used; - if (!aead_sufficient_data(ctx)) goto unlock; + used = ctx->used; + if (ctx->enc) + outlen = used + as; + else + outlen = used - as; + req = sock_kmalloc(sk, reqlen, GFP_KERNEL); if (unlikely(!req)) goto unlock; @@@ -451,16 -445,15 +451,16 @@@ aead_request_set_ad(req, ctx->aead_assoclen); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, aead_async_cb, sk); - used -= ctx->aead_assoclen + (ctx->enc ? as : 0); + used -= ctx->aead_assoclen; /* take over all tx sgls from ctx */ - areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, + areq->tsgl = sock_kmalloc(sk, + sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), GFP_KERNEL); if (unlikely(!areq->tsgl)) goto free; - sg_init_table(areq->tsgl, sgl->cur); + sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); for (i = 0; i < sgl->cur; i++) sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), sgl->sg[i].length, sgl->sg[i].offset); @@@ -468,7 -461,7 +468,7 @@@ areq->tsgls = sgl->cur; /* create rx sgls */ - while (iov_iter_count(&msg->msg_iter)) { + while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), (outlen - usedpages)); @@@ -498,14 -491,16 +498,14 @@@ last_rsgl = rsgl; - /* we do not need more iovecs as we have sufficient memory */ - if (outlen <= usedpages) - break; - iov_iter_advance(&msg->msg_iter, err); } - err = -EINVAL; + /* ensure output buffer is sufficiently large */ - if (usedpages < outlen) - goto free; + if (usedpages < outlen) { + err = -EINVAL; + goto unlock; + } aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, areq->iv); @@@ -556,18 -551,8 +556,8 @@@ static int aead_recvmsg_sync(struct soc lock_sock(sk); /* - * AEAD memory structure: For encryption, the tag is appended to the - * ciphertext which implies that the memory allocated for the ciphertext - * must be increased by the tag length. For decryption, the tag - * is expected to be concatenated to the ciphertext. The plaintext - * therefore has a memory size of the ciphertext minus the tag length. - * - * The memory structure for cipher operation has the following - * structure: - * AEAD encryption input: assoc data || plaintext - * AEAD encryption output: cipherntext || auth tag - * AEAD decryption input: assoc data || ciphertext || auth tag - * AEAD decryption output: plaintext + * Please see documentation of aead_request_set_crypt for the + * description of the AEAD memory structure expected from the caller. */ if (ctx->more) { @@@ -576,7 -561,6 +566,7 @@@ goto unlock; } + /* data length provided by caller via sendmsg/sendpage */ used = ctx->used; /* @@@ -591,27 -575,16 +581,27 @@@ if (!aead_sufficient_data(ctx)) goto unlock; - outlen = used; + /* + * Calculate the minimum output buffer size holding the result of the + * cipher operation. When encrypting data, the receiving buffer is + * larger by the tag length compared to the input buffer as the + * encryption operation generates the tag. For decryption, the input + * buffer provides the tag which is consumed resulting in only the + * plaintext without a buffer for the tag returned to the caller. + */ + if (ctx->enc) + outlen = used + as; + else + outlen = used - as; /* * The cipher operation input data is reduced by the associated data * length as this data is processed separately later on. */ - used -= ctx->aead_assoclen + (ctx->enc ? as : 0); + used -= ctx->aead_assoclen; /* convert iovecs of output buffers into scatterlists */ - while (iov_iter_count(&msg->msg_iter)) { + while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), (outlen - usedpages)); @@@ -638,14 -611,16 +628,14 @@@ last_rsgl = rsgl; - /* we do not need more iovecs as we have sufficient memory */ - if (outlen <= usedpages) - break; iov_iter_advance(&msg->msg_iter, err); } - err = -EINVAL; /* ensure output buffer is sufficiently large */ - if (usedpages < outlen) + if (usedpages < outlen) { + err = -EINVAL; goto unlock; + } sg_mark_end(sgl->sg + sgl->cur - 1); aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, diff --combined include/linux/crypto.h index 167aea29d41e,faf8127234e1..c0b0cf3d2d2f --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@@ -50,8 -50,6 +50,8 @@@ #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_KPP 0x00000008 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e @@@ -62,7 -60,6 +62,7 @@@ #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e #define CRYPTO_ALG_LARVAL 0x00000010 #define CRYPTO_ALG_DEAD 0x00000020 @@@ -90,7 -87,7 +90,7 @@@ #define CRYPTO_ALG_TESTED 0x00000400 /* - * Set if the algorithm is an instance that is build from templates. + * Set if the algorithm is an instance that is built from templates. */ #define CRYPTO_ALG_INSTANCE 0x00000800 @@@ -963,7 -960,7 +963,7 @@@ static inline void ablkcipher_request_f * ablkcipher_request_set_callback() - set asynchronous callback function * @req: request handle * @flags: specify zero or an ORing of the flags - * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and * increase the wait queue beyond the initial maximum size; * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep * @compl: callback function pointer to be registered with the request handle @@@ -980,7 -977,7 +980,7 @@@ * cipher operation completes. * * The callback function is registered with the ablkcipher_request handle and - * must comply with the following template + * must comply with the following template:: * * void callback_function(struct crypto_async_request *req, int error) */