static bool nfsd4_deleg_present(const struct inode *inode)
{
- struct file_lock_context *ctx = smp_load_acquire(&inode->i_flctx);
+ struct file_lock_context *ctx = locks_inode_context(inode);
return ctx && !list_empty_careful(&ctx->flc_lease);
}
if (!fp->fi_fds[oflag]) {
spin_unlock(&fp->fi_lock);
- if (!open->op_filp) {
- status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
- if (status != nfs_ok)
- goto out_put_access;
- } else {
- status = nfsd_file_create(rqstp, cur_fh, access, &nf);
- if (status != nfs_ok)
- goto out_put_access;
- nf->nf_file = open->op_filp;
- open->op_filp = NULL;
- trace_nfsd_file_create(rqstp, access, nf);
- }
+ status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
+ open->op_filp, &nf);
+ if (status != nfs_ok)
+ goto out_put_access;
spin_lock(&fp->fi_lock);
if (!fp->fi_fds[oflag]) {
list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
nf = stp->st_stid.sc_file;
- ctx = nf->fi_inode->i_flctx;
+ ctx = locks_inode_context(nf->fi_inode);
if (!ctx)
continue;
if (locks_owner_has_blockers(ctx, lo))
}
inode = locks_inode(nf->nf_file);
- flctx = inode->i_flctx;
+ flctx = locks_inode_context(inode);
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
spin_lock(&flctx->flc_lock);
struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
- del_timer_sync(&serv->sv_temptimer);
+ timer_shutdown_sync(&serv->sv_temptimer);
/*
* The last user is gone and thus all sockets have to be destroyed to
goto err_short_len;
/* Will be turned off by GSS integrity and privacy services */
- __set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+ set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* Will be turned off only when NFSv4 Sessions are used */
- __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
- __clear_bit(RQ_DROPME, &rqstp->rq_flags);
+ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+ clear_bit(RQ_DROPME, &rqstp->rq_flags);
svc_putu32(resv, rqstp->rq_xid);
rqstp->rq_respages = &rqstp->rq_pages[i];
rqstp->rq_next_page = rqstp->rq_respages + 1;
- iov_iter_bvec(&msg.msg_iter, READ, bvec, i, buflen);
+ iov_iter_bvec(&msg.msg_iter, ITER_DEST, bvec, i, buflen);
if (seek) {
iov_iter_advance(&msg.msg_iter, seek);
buflen -= seek;
static void svc_sock_secure_port(struct svc_rqst *rqstp)
{
if (svc_port_is_privileged(svc_addr(rqstp)))
- __set_bit(RQ_SECURE, &rqstp->rq_flags);
+ set_bit(RQ_SECURE, &rqstp->rq_flags);
else
- __clear_bit(RQ_SECURE, &rqstp->rq_flags);
+ clear_bit(RQ_SECURE, &rqstp->rq_flags);
}
/*
want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
iov.iov_len = want;
- iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, want);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
if (len < 0)
return len;
rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
- __set_bit(RQ_LOCAL, &rqstp->rq_flags);
+ set_bit(RQ_LOCAL, &rqstp->rq_flags);
else
- __clear_bit(RQ_LOCAL, &rqstp->rq_flags);
+ clear_bit(RQ_LOCAL, &rqstp->rq_flags);
p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
calldir = p[1];