]> Git Repo - J-linux.git/commitdiff
Merge tag 'block-5.8-2020-06-11' of git://git.kernel.dk/linux-block
authorLinus Torvalds <[email protected]>
Thu, 11 Jun 2020 23:07:33 +0000 (16:07 -0700)
committerLinus Torvalds <[email protected]>
Thu, 11 Jun 2020 23:07:33 +0000 (16:07 -0700)
Pull block fixes from Jens Axboe:
 "Some followup fixes for this merge window. In particular:

   - Seqcount write missing preemption disable for stats (Ahmed)

   - blktrace fixes (Chaitanya)

   - Redundant initializations (Colin)

   - Various small NVMe fixes (Chaitanya, Christoph, Daniel, Max,
     Niklas, Rikard)

   - loop flag bug regression fix (Martijn)

   - blk-mq tagging fixes (Christoph, Ming)"

* tag 'block-5.8-2020-06-11' of git://git.kernel.dk/linux-block:
  umem: remove redundant initialization of variable ret
  pktcdvd: remove redundant initialization of variable ret
  nvmet: fail outstanding host posted AEN req
  nvme-pci: use simple suspend when a HMB is enabled
  nvme-fc: don't call nvme_cleanup_cmd() for AENs
  nvmet-tcp: constify nvmet_tcp_ops
  nvme-tcp: constify nvme_tcp_mq_ops and nvme_tcp_admin_mq_ops
  nvme: do not call del_gendisk() on a disk that was never added
  blk-mq: fix blk_mq_all_tag_iter
  blk-mq: split out a __blk_mq_get_driver_tag helper
  blktrace: fix endianness for blk_log_remap()
  blktrace: fix endianness in get_pdu_int()
  blktrace: use errno instead of bi_status
  block: nr_sects_write(): Disable preemption on seqcount write
  block: remove the error argument to the block_bio_complete tracepoint
  loop: Fix wrong masking of status flags
  block/bio-integrity: don't free 'buf' if bio_integrity_add_page() failed

1  2 
drivers/block/loop.c
drivers/nvme/host/tcp.c
drivers/nvme/target/tcp.c

diff --combined drivers/block/loop.c
index 2e96d8b8758b63c7869df1e7403974bc824124f2,ad63e42478683a04e33fd0730a20a758a57c63d6..c33bbbfd1bd9ce8ce5eeb7d66079fc1dc5203285
@@@ -644,8 -644,8 +644,8 @@@ static int do_req_filebacked(struct loo
  
  static inline void loop_update_dio(struct loop_device *lo)
  {
 -      __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
 -                      lo->use_dio);
 +      __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
 +                              lo->use_dio);
  }
  
  static void loop_reread_partitions(struct loop_device *lo,
@@@ -1149,7 -1149,7 +1149,7 @@@ static int loop_configure(struct loop_d
  
        if (config->block_size)
                bsize = config->block_size;
 -      else if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev)
 +      else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
                /* In case of direct I/O, match underlying block size */
                bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
        else
@@@ -1390,7 -1390,7 +1390,7 @@@ loop_set_status(struct loop_device *lo
                goto out_unfreeze;
  
        /* Mask out flags that can't be set using LOOP_SET_STATUS. */
-       lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS;
+       lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
        /* For those flags, use the previous values instead */
        lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
        /* For flags that can't be cleared, use previous values too */
diff --combined drivers/nvme/host/tcp.c
index 1843110ec34f8a55349a20a98e84d259facf8952,7979fcf0c15f8e71b54bc8838f38ab4a691b2ed7..3345ec7efaff45de9d8ee10bc3bc0c0b1abb9e52
@@@ -131,8 -131,8 +131,8 @@@ struct nvme_tcp_ctrl 
  static LIST_HEAD(nvme_tcp_ctrl_list);
  static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
  static struct workqueue_struct *nvme_tcp_wq;
- static struct blk_mq_ops nvme_tcp_mq_ops;
- static struct blk_mq_ops nvme_tcp_admin_mq_ops;
+ static const struct blk_mq_ops nvme_tcp_mq_ops;
+ static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
  static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
  
  static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
@@@ -1339,7 -1339,8 +1339,7 @@@ static int nvme_tcp_alloc_queue(struct 
  {
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
 -      struct linger sol = { .l_onoff = 1, .l_linger = 0 };
 -      int ret, opt, rcv_pdu_size;
 +      int ret, rcv_pdu_size;
  
        queue->ctrl = ctrl;
        INIT_LIST_HEAD(&queue->send_list);
        }
  
        /* Single syn retry */
 -      opt = 1;
 -      ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
 -                      (char *)&opt, sizeof(opt));
 -      if (ret) {
 -              dev_err(nctrl->device,
 -                      "failed to set TCP_SYNCNT sock opt %d\n", ret);
 -              goto err_sock;
 -      }
 +      tcp_sock_set_syncnt(queue->sock->sk, 1);
  
        /* Set TCP no delay */
 -      opt = 1;
 -      ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
 -                      TCP_NODELAY, (char *)&opt, sizeof(opt));
 -      if (ret) {
 -              dev_err(nctrl->device,
 -                      "failed to set TCP_NODELAY sock opt %d\n", ret);
 -              goto err_sock;
 -      }
 +      tcp_sock_set_nodelay(queue->sock->sk);
  
        /*
         * Cleanup whatever is sitting in the TCP transmit queue on socket
         * close. This is done to prevent stale data from being sent should
         * the network connection be restored before TCP times out.
         */
 -      ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
 -                      (char *)&sol, sizeof(sol));
 -      if (ret) {
 -              dev_err(nctrl->device,
 -                      "failed to set SO_LINGER sock opt %d\n", ret);
 -              goto err_sock;
 -      }
 +      sock_no_linger(queue->sock->sk);
  
 -      if (so_priority > 0) {
 -              ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY,
 -                              (char *)&so_priority, sizeof(so_priority));
 -              if (ret) {
 -                      dev_err(ctrl->ctrl.device,
 -                              "failed to set SO_PRIORITY sock opt, ret %d\n",
 -                              ret);
 -                      goto err_sock;
 -              }
 -      }
 +      if (so_priority > 0)
 +              sock_set_priority(queue->sock->sk, so_priority);
  
        /* Set socket type of service */
 -      if (nctrl->opts->tos >= 0) {
 -              opt = nctrl->opts->tos;
 -              ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
 -                              (char *)&opt, sizeof(opt));
 -              if (ret) {
 -                      dev_err(nctrl->device,
 -                              "failed to set IP_TOS sock opt %d\n", ret);
 -                      goto err_sock;
 -              }
 -      }
 +      if (nctrl->opts->tos >= 0)
 +              ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
  
        queue->sock->sk->sk_allocation = GFP_ATOMIC;
        nvme_tcp_set_queue_io_cpu(queue);
@@@ -2301,7 -2338,7 +2301,7 @@@ static int nvme_tcp_poll(struct blk_mq_
        return queue->nr_cqe;
  }
  
- static struct blk_mq_ops nvme_tcp_mq_ops = {
+ static const struct blk_mq_ops nvme_tcp_mq_ops = {
        .queue_rq       = nvme_tcp_queue_rq,
        .complete       = nvme_complete_rq,
        .init_request   = nvme_tcp_init_request,
        .poll           = nvme_tcp_poll,
  };
  
- static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+ static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
        .queue_rq       = nvme_tcp_queue_rq,
        .complete       = nvme_complete_rq,
        .init_request   = nvme_tcp_init_request,
index 1669177cd26c9f4a5d4e7bacff3951bbf0eb10c0,9e4cb904ab27384c34d7a4c6350be647ec58aa81..de9217cfd22d7f1420176645fdebb8d81d239979
@@@ -153,7 -153,7 +153,7 @@@ static LIST_HEAD(nvmet_tcp_queue_list)
  static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
  
  static struct workqueue_struct *nvmet_tcp_wq;
- static struct nvmet_fabrics_ops nvmet_tcp_ops;
+ static const struct nvmet_fabrics_ops nvmet_tcp_ops;
  static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
  static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
  
@@@ -1436,6 -1436,7 +1436,6 @@@ static int nvmet_tcp_set_queue_sock(str
  {
        struct socket *sock = queue->sock;
        struct inet_sock *inet = inet_sk(sock->sk);
 -      struct linger sol = { .l_onoff = 1, .l_linger = 0 };
        int ret;
  
        ret = kernel_getsockname(sock,
         * close. This is done to prevent stale data from being sent should
         * the network connection be restored before TCP times out.
         */
 -      ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
 -                      (char *)&sol, sizeof(sol));
 -      if (ret)
 -              return ret;
 +      sock_no_linger(sock->sk);
  
 -      if (so_priority > 0) {
 -              ret = kernel_setsockopt(sock, SOL_SOCKET, SO_PRIORITY,
 -                              (char *)&so_priority, sizeof(so_priority));
 -              if (ret)
 -                      return ret;
 -      }
 +      if (so_priority > 0)
 +              sock_set_priority(sock->sk, so_priority);
  
        /* Set socket type of service */
 -      if (inet->rcv_tos > 0) {
 -              int tos = inet->rcv_tos;
 -
 -              ret = kernel_setsockopt(sock, SOL_IP, IP_TOS,
 -                              (char *)&tos, sizeof(tos));
 -              if (ret)
 -                      return ret;
 -      }
 +      if (inet->rcv_tos > 0)
 +              ip_sock_set_tos(sock->sk, inet->rcv_tos);
  
        write_lock_bh(&sock->sk->sk_callback_lock);
        sock->sk->sk_user_data = queue;
@@@ -1581,7 -1595,7 +1581,7 @@@ static int nvmet_tcp_add_port(struct nv
  {
        struct nvmet_tcp_port *port;
        __kernel_sa_family_t af;
 -      int opt, ret;
 +      int ret;
  
        port = kzalloc(sizeof(*port), GFP_KERNEL);
        if (!port)
        port->sock->sk->sk_user_data = port;
        port->data_ready = port->sock->sk->sk_data_ready;
        port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
 -
 -      opt = 1;
 -      ret = kernel_setsockopt(port->sock, IPPROTO_TCP,
 -                      TCP_NODELAY, (char *)&opt, sizeof(opt));
 -      if (ret) {
 -              pr_err("failed to set TCP_NODELAY sock opt %d\n", ret);
 -              goto err_sock;
 -      }
 -
 -      ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR,
 -                      (char *)&opt, sizeof(opt));
 -      if (ret) {
 -              pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret);
 -              goto err_sock;
 -      }
 -
 -      if (so_priority > 0) {
 -              ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_PRIORITY,
 -                              (char *)&so_priority, sizeof(so_priority));
 -              if (ret) {
 -                      pr_err("failed to set SO_PRIORITY sock opt %d\n", ret);
 -                      goto err_sock;
 -              }
 -      }
 +      sock_set_reuseaddr(port->sock->sk);
 +      tcp_sock_set_nodelay(port->sock->sk);
 +      if (so_priority > 0)
 +              sock_set_priority(port->sock->sk, so_priority);
  
        ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
                        sizeof(port->addr));
@@@ -1713,7 -1747,7 +1713,7 @@@ static void nvmet_tcp_disc_port_addr(st
        }
  }
  
- static struct nvmet_fabrics_ops nvmet_tcp_ops = {
+ static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
        .owner                  = THIS_MODULE,
        .type                   = NVMF_TRTYPE_TCP,
        .msdbd                  = 1,
This page took 0.090688 seconds and 4 git commands to generate.