]> Git Repo - linux.git/commitdiff
Merge tag 'nfs-for-4.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
authorLinus Torvalds <[email protected]>
Fri, 26 Oct 2018 20:05:26 +0000 (13:05 -0700)
committerLinus Torvalds <[email protected]>
Fri, 26 Oct 2018 20:05:26 +0000 (13:05 -0700)
Pull NFS client updates from Trond Myklebust:
 "Highlights include:

  Stable fixes:
   - Fix the NFSv4.1 r/wsize sanity checking
   - Reset the RPC/RDMA credit grant properly after a disconnect
   - Fix a missed page unlock after pg_doio()

  Features and optimisations:
   - Overhaul of the RPC client socket code to eliminate a locking
     bottleneck and reduce the latency when transmitting lots of
     requests in parallel.
   - Allow parallelisation of the RPCSEC_GSS encoding of an RPC request.
   - Convert the RPC client socket receive code to use iovec_iter() for
     improved efficiency.
   - Convert several NFS and RPC lookup operations to use RCU instead of
     taking global locks.
   - Avoid the need for BH-safe locks in the RPC/RDMA back channel.

  Bugfixes and cleanups:
   - Fix lock recovery during NFSv4 delegation recalls
   - Fix the NFSv4 + NFSv4.1 "lookup revalidate + open file" case.
   - Fixes for the RPC connection metrics
   - Various RPC client layer cleanups to consolidate stream based
     sockets
   - RPC/RDMA connection cleanups
   - Simplify the RPC/RDMA cleanup after memory operation failures
   - Clean ups for NFS v4.2 copy completion and NFSv4 open state
     reclaim"

* tag 'nfs-for-4.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (97 commits)
  SUNRPC: Convert the auth cred cache to use refcount_t
  SUNRPC: Convert auth creds to use refcount_t
  SUNRPC: Simplify lookup code
  SUNRPC: Clean up the AUTH cache code
  NFS: change sign of nfs_fh length
  sunrpc: safely reallow resvport min/max inversion
  nfs: remove redundant call to nfs_context_set_write_error()
  nfs: Fix a missed page unlock after pg_doio()
  SUNRPC: Fix a compile warning for cmpxchg64()
  NFSv4.x: fix lock recovery during delegation recall
  SUNRPC: use cmpxchg64() in gss_seq_send64_fetch_and_inc()
  xprtrdma: Squelch a sparse warning
  xprtrdma: Clean up xprt_rdma_disconnect_inject
  xprtrdma: Add documenting comments
  xprtrdma: Report when there were zero posted Receives
  xprtrdma: Move rb_flags initialization
  xprtrdma: Don't disable BH's in backchannel server
  xprtrdma: Remove memory address of "ep" from an error message
  xprtrdma: Rename rpcrdma_qp_async_error_upcall
  xprtrdma: Simplify RPC wake-ups on connect
  ...

1  2 
include/linux/sunrpc/gss_krb5.h
net/sunrpc/auth_gss/gss_krb5_wrap.c

index f6e8ceafafd8f1aa9b39fac833077de45d5afdca,69f749afa617b846f0ed064292f2d5c6489c0698..131424cefc6a92381036c099acb3a9833c846506
@@@ -71,10 -71,10 +71,10 @@@ struct gss_krb5_enctype 
        const u32               keyed_cksum;    /* is it a keyed cksum? */
        const u32               keybytes;       /* raw key len, in bytes */
        const u32               keylength;      /* final key len, in bytes */
 -      u32 (*encrypt) (struct crypto_skcipher *tfm,
 +      u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
                        void *iv, void *in, void *out,
                        int length);            /* encryption function */
 -      u32 (*decrypt) (struct crypto_skcipher *tfm,
 +      u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
                        void *iv, void *in, void *out,
                        int length);            /* decryption function */
        u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@@ -98,12 -98,12 +98,12 @@@ struct krb5_ctx 
        u32                     enctype;
        u32                     flags;
        const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
 -      struct crypto_skcipher  *enc;
 -      struct crypto_skcipher  *seq;
 -      struct crypto_skcipher *acceptor_enc;
 -      struct crypto_skcipher *initiator_enc;
 -      struct crypto_skcipher *acceptor_enc_aux;
 -      struct crypto_skcipher *initiator_enc_aux;
 +      struct crypto_sync_skcipher *enc;
 +      struct crypto_sync_skcipher *seq;
 +      struct crypto_sync_skcipher *acceptor_enc;
 +      struct crypto_sync_skcipher *initiator_enc;
 +      struct crypto_sync_skcipher *acceptor_enc_aux;
 +      struct crypto_sync_skcipher *initiator_enc_aux;
        u8                      Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
        u8                      cksum[GSS_KRB5_MAX_KEYLEN];
        s32                     endtime;
        u8                      acceptor_integ[GSS_KRB5_MAX_KEYLEN];
  };
  
- extern spinlock_t krb5_seq_lock;
+ extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx);
+ extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx);
  
  /* The length of the Kerberos GSS token header */
  #define GSS_KRB5_TOK_HDR_LEN  (16)
@@@ -262,24 -263,24 +263,24 @@@ gss_unwrap_kerberos(struct gss_ctx *ctx
  
  
  u32
 -krb5_encrypt(struct crypto_skcipher *key,
 +krb5_encrypt(struct crypto_sync_skcipher *key,
             void *iv, void *in, void *out, int length);
  
  u32
 -krb5_decrypt(struct crypto_skcipher *key,
 +krb5_decrypt(struct crypto_sync_skcipher *key,
             void *iv, void *in, void *out, int length); 
  
  int
 -gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf,
 +gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
                    int offset, struct page **pages);
  
  int
 -gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf,
 +gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
                    int offset);
  
  s32
  krb5_make_seq_num(struct krb5_ctx *kctx,
 -              struct crypto_skcipher *key,
 +              struct crypto_sync_skcipher *key,
                int direction,
                u32 seqnum, unsigned char *cksum, unsigned char *buf);
  
@@@ -320,12 -321,12 +321,12 @@@ gss_krb5_aes_decrypt(struct krb5_ctx *k
  
  int
  krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
 -                     struct crypto_skcipher *cipher,
 +                     struct crypto_sync_skcipher *cipher,
                       unsigned char *cksum);
  
  int
  krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
 -                     struct crypto_skcipher *cipher,
 +                     struct crypto_sync_skcipher *cipher,
                       s32 seqnum);
  void
  gss_krb5_make_confounder(char *p, u32 conflen);
index 3d975a4013d2ffd0ee4a9ec60a74122989f3cd06,41cb294cd07179306edbbe5f8e6037c46b26bfbe..962fa84e6db114f95790f8d6bba485fe226ed43e
@@@ -174,7 -174,7 +174,7 @@@ gss_wrap_kerberos_v1(struct krb5_ctx *k
  
        now = get_seconds();
  
 -      blocksize = crypto_skcipher_blocksize(kctx->enc);
 +      blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
        gss_krb5_add_padding(buf, offset, blocksize);
        BUG_ON((buf->len - offset) % blocksize);
        plainlen = conflen + buf->len - offset;
  
        memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
  
-       spin_lock(&krb5_seq_lock);
-       seq_send = kctx->seq_send++;
-       spin_unlock(&krb5_seq_lock);
+       seq_send = gss_seq_send_fetch_and_inc(kctx);
  
        /* XXX would probably be more efficient to compute checksum
         * and encrypt at the same time: */
                return GSS_S_FAILURE;
  
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
 -              struct crypto_skcipher *cipher;
 +              struct crypto_sync_skcipher *cipher;
                int err;
 -              cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
 -                                             CRYPTO_ALG_ASYNC);
 +              cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
 +                                                  0, 0);
                if (IS_ERR(cipher))
                        return GSS_S_FAILURE;
  
  
                err = gss_encrypt_xdr_buf(cipher, buf,
                                          offset + headlen - conflen, pages);
 -              crypto_free_skcipher(cipher);
 +              crypto_free_sync_skcipher(cipher);
                if (err)
                        return GSS_S_FAILURE;
        } else {
@@@ -327,18 -325,18 +325,18 @@@ gss_unwrap_kerberos_v1(struct krb5_ctx 
                return GSS_S_BAD_SIG;
  
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
 -              struct crypto_skcipher *cipher;
 +              struct crypto_sync_skcipher *cipher;
                int err;
  
 -              cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
 -                                             CRYPTO_ALG_ASYNC);
 +              cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
 +                                                  0, 0);
                if (IS_ERR(cipher))
                        return GSS_S_FAILURE;
  
                krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
  
                err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
 -              crypto_free_skcipher(cipher);
 +              crypto_free_sync_skcipher(cipher);
                if (err)
                        return GSS_S_DEFECTIVE_TOKEN;
        } else {
        /* Copy the data back to the right position.  XXX: Would probably be
         * better to copy and encrypt at the same time. */
  
 -      blocksize = crypto_skcipher_blocksize(kctx->enc);
 +      blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
        data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
                                        conflen;
        orig_start = buf->head[0].iov_base + offset;
@@@ -477,9 -475,7 +475,7 @@@ gss_wrap_kerberos_v2(struct krb5_ctx *k
        *be16ptr++ = 0;
  
        be64ptr = (__be64 *)be16ptr;
-       spin_lock(&krb5_seq_lock);
-       *be64ptr = cpu_to_be64(kctx->seq_send64++);
-       spin_unlock(&krb5_seq_lock);
+       *be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx));
  
        err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
        if (err)
This page took 0.080019 seconds and 4 git commands to generate.