2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
23 #include <linux/bio.h>
24 #include <linux/stat.h>
25 #include <linux/errno.h>
26 #include <linux/file.h>
27 #include <linux/ioctl.h>
28 #include <linux/mutex.h>
29 #include <linux/compiler.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 static DEFINE_IDR(nbd_index_idr);
48 static DEFINE_MUTEX(nbd_index_mutex);
49 static int nbd_total_devices = 0;
54 struct request *pending;
61 struct recv_thread_args {
62 struct work_struct work;
63 struct nbd_device *nbd;
67 struct link_dead_args {
68 struct work_struct work;
72 #define NBD_TIMEDOUT 0
73 #define NBD_DISCONNECT_REQUESTED 1
74 #define NBD_DISCONNECTED 2
75 #define NBD_HAS_PID_FILE 3
76 #define NBD_HAS_CONFIG_REF 4
78 #define NBD_DESTROY_ON_DISCONNECT 6
82 unsigned long runtime_flags;
83 u64 dead_conn_timeout;
85 struct nbd_sock **socks;
87 atomic_t live_connections;
88 wait_queue_head_t conn_wait;
90 atomic_t recv_threads;
91 wait_queue_head_t recv_wq;
94 #if IS_ENABLED(CONFIG_DEBUG_FS)
95 struct dentry *dbg_dir;
100 struct blk_mq_tag_set tag_set;
103 refcount_t config_refs;
105 struct nbd_config *config;
106 struct mutex config_lock;
107 struct gendisk *disk;
109 struct list_head list;
110 struct task_struct *task_recv;
111 struct task_struct *task_setup;
115 struct nbd_device *nbd;
118 struct completion send_complete;
122 #if IS_ENABLED(CONFIG_DEBUG_FS)
123 static struct dentry *nbd_dbg_dir;
126 #define nbd_name(nbd) ((nbd)->disk->disk_name)
128 #define NBD_MAGIC 0x68797548
130 static unsigned int nbds_max = 16;
131 static int max_part = 16;
132 static struct workqueue_struct *recv_workqueue;
133 static int part_shift;
135 static int nbd_dev_dbg_init(struct nbd_device *nbd);
136 static void nbd_dev_dbg_close(struct nbd_device *nbd);
137 static void nbd_config_put(struct nbd_device *nbd);
138 static void nbd_connect_reply(struct genl_info *info, int index);
139 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
140 static void nbd_dead_link_work(struct work_struct *work);
142 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
144 return disk_to_dev(nbd->disk);
147 static const char *nbdcmd_to_ascii(int cmd)
150 case NBD_CMD_READ: return "read";
151 case NBD_CMD_WRITE: return "write";
152 case NBD_CMD_DISC: return "disconnect";
153 case NBD_CMD_FLUSH: return "flush";
154 case NBD_CMD_TRIM: return "trim/discard";
159 static ssize_t pid_show(struct device *dev,
160 struct device_attribute *attr, char *buf)
162 struct gendisk *disk = dev_to_disk(dev);
163 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
165 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
168 static const struct device_attribute pid_attr = {
169 .attr = { .name = "pid", .mode = S_IRUGO},
173 static void nbd_dev_remove(struct nbd_device *nbd)
175 struct gendisk *disk = nbd->disk;
178 blk_cleanup_queue(disk->queue);
179 blk_mq_free_tag_set(&nbd->tag_set);
180 disk->private_data = NULL;
186 static void nbd_put(struct nbd_device *nbd)
188 if (refcount_dec_and_mutex_lock(&nbd->refs,
190 idr_remove(&nbd_index_idr, nbd->index);
191 mutex_unlock(&nbd_index_mutex);
196 static int nbd_disconnected(struct nbd_config *config)
198 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
199 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
202 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
205 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
206 struct link_dead_args *args;
207 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
209 INIT_WORK(&args->work, nbd_dead_link_work);
210 args->index = nbd->index;
211 queue_work(system_wq, &args->work);
215 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
216 atomic_dec(&nbd->config->live_connections);
219 nsock->pending = NULL;
223 static void nbd_size_clear(struct nbd_device *nbd)
225 if (nbd->config->bytesize) {
226 set_capacity(nbd->disk, 0);
227 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
231 static void nbd_size_update(struct nbd_device *nbd)
233 struct nbd_config *config = nbd->config;
234 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
235 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
236 set_capacity(nbd->disk, config->bytesize >> 9);
237 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
240 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
243 struct nbd_config *config = nbd->config;
244 config->blksize = blocksize;
245 config->bytesize = blocksize * nr_blocks;
248 static void nbd_complete_rq(struct request *req)
250 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
252 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd,
253 cmd->status ? "failed" : "done");
255 blk_mq_end_request(req, cmd->status);
259 * Forcibly shutdown the socket causing all listeners to error
261 static void sock_shutdown(struct nbd_device *nbd)
263 struct nbd_config *config = nbd->config;
266 if (config->num_connections == 0)
268 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
271 for (i = 0; i < config->num_connections; i++) {
272 struct nbd_sock *nsock = config->socks[i];
273 mutex_lock(&nsock->tx_lock);
274 nbd_mark_nsock_dead(nbd, nsock, 0);
275 mutex_unlock(&nsock->tx_lock);
277 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
280 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
283 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
284 struct nbd_device *nbd = cmd->nbd;
285 struct nbd_config *config;
287 if (!refcount_inc_not_zero(&nbd->config_refs)) {
288 cmd->status = BLK_STS_TIMEOUT;
289 return BLK_EH_HANDLED;
291 config = nbd->config;
293 if (config->num_connections > 1) {
294 dev_err_ratelimited(nbd_to_dev(nbd),
295 "Connection timed out, retrying\n");
297 * Hooray we have more connections, requeue this IO, the submit
298 * path will put it on a real connection.
300 if (config->socks && config->num_connections > 1) {
301 if (cmd->index < config->num_connections) {
302 struct nbd_sock *nsock =
303 config->socks[cmd->index];
304 mutex_lock(&nsock->tx_lock);
305 /* We can have multiple outstanding requests, so
306 * we don't want to mark the nsock dead if we've
307 * already reconnected with a new socket, so
308 * only mark it dead if its the same socket we
311 if (cmd->cookie == nsock->cookie)
312 nbd_mark_nsock_dead(nbd, nsock, 1);
313 mutex_unlock(&nsock->tx_lock);
315 blk_mq_requeue_request(req, true);
317 return BLK_EH_NOT_HANDLED;
320 dev_err_ratelimited(nbd_to_dev(nbd),
321 "Connection timed out\n");
323 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
324 cmd->status = BLK_STS_IOERR;
328 return BLK_EH_HANDLED;
332 * Send or receive packet.
334 static int sock_xmit(struct nbd_device *nbd, int index, int send,
335 struct iov_iter *iter, int msg_flags, int *sent)
337 struct nbd_config *config = nbd->config;
338 struct socket *sock = config->socks[index]->sock;
341 unsigned int noreclaim_flag;
343 if (unlikely(!sock)) {
344 dev_err_ratelimited(disk_to_dev(nbd->disk),
345 "Attempted %s on closed socket in sock_xmit\n",
346 (send ? "send" : "recv"));
350 msg.msg_iter = *iter;
352 noreclaim_flag = memalloc_noreclaim_save();
354 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
357 msg.msg_control = NULL;
358 msg.msg_controllen = 0;
359 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
362 result = sock_sendmsg(sock, &msg);
364 result = sock_recvmsg(sock, &msg, msg.msg_flags);
368 result = -EPIPE; /* short read */
373 } while (msg_data_left(&msg));
375 memalloc_noreclaim_restore(noreclaim_flag);
381 * Different settings for sk->sk_sndtimeo can result in different return values
382 * if there is a signal pending when we enter sendmsg, because reasons?
384 static inline int was_interrupted(int result)
386 return result == -ERESTARTSYS || result == -EINTR;
389 /* always call with the tx_lock held */
390 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
392 struct request *req = blk_mq_rq_from_pdu(cmd);
393 struct nbd_config *config = nbd->config;
394 struct nbd_sock *nsock = config->socks[index];
396 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
397 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
398 struct iov_iter from;
399 unsigned long size = blk_rq_bytes(req);
402 u32 nbd_cmd_flags = 0;
403 u32 tag = blk_mq_unique_tag(req);
404 int sent = nsock->sent, skip = 0;
406 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
408 switch (req_op(req)) {
413 type = NBD_CMD_FLUSH;
416 type = NBD_CMD_WRITE;
425 if (rq_data_dir(req) == WRITE &&
426 (config->flags & NBD_FLAG_READ_ONLY)) {
427 dev_err_ratelimited(disk_to_dev(nbd->disk),
428 "Write on read-only\n");
432 if (req->cmd_flags & REQ_FUA)
433 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
435 /* We did a partial send previously, and we at least sent the whole
436 * request struct, so just go and send the rest of the pages in the
440 if (sent >= sizeof(request)) {
441 skip = sent - sizeof(request);
444 iov_iter_advance(&from, sent);
447 cmd->cookie = nsock->cookie;
448 request.type = htonl(type | nbd_cmd_flags);
449 if (type != NBD_CMD_FLUSH) {
450 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
451 request.len = htonl(size);
453 memcpy(request.handle, &tag, sizeof(tag));
455 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
456 cmd, nbdcmd_to_ascii(type),
457 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
458 result = sock_xmit(nbd, index, 1, &from,
459 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
461 if (was_interrupted(result)) {
462 /* If we havne't sent anything we can just return BUSY,
463 * however if we have sent something we need to make
464 * sure we only allow this req to be sent until we are
468 nsock->pending = req;
471 return BLK_STS_RESOURCE;
473 dev_err_ratelimited(disk_to_dev(nbd->disk),
474 "Send control failed (result %d)\n", result);
478 if (type != NBD_CMD_WRITE)
483 struct bio *next = bio->bi_next;
484 struct bvec_iter iter;
487 bio_for_each_segment(bvec, bio, iter) {
488 bool is_last = !next && bio_iter_last(bvec, iter);
489 int flags = is_last ? 0 : MSG_MORE;
491 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
493 iov_iter_bvec(&from, ITER_BVEC | WRITE,
494 &bvec, 1, bvec.bv_len);
496 if (skip >= iov_iter_count(&from)) {
497 skip -= iov_iter_count(&from);
500 iov_iter_advance(&from, skip);
503 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
505 if (was_interrupted(result)) {
506 /* We've already sent the header, we
507 * have no choice but to set pending and
510 nsock->pending = req;
512 return BLK_STS_RESOURCE;
514 dev_err(disk_to_dev(nbd->disk),
515 "Send data failed (result %d)\n",
520 * The completion might already have come in,
521 * so break for the last one instead of letting
522 * the iterator do it. This prevents use-after-free
531 nsock->pending = NULL;
536 /* NULL returned = something went wrong, inform userspace */
537 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
539 struct nbd_config *config = nbd->config;
541 struct nbd_reply reply;
543 struct request *req = NULL;
546 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
550 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
551 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
553 if (!nbd_disconnected(config))
554 dev_err(disk_to_dev(nbd->disk),
555 "Receive control failed (result %d)\n", result);
556 return ERR_PTR(result);
559 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
560 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
561 (unsigned long)ntohl(reply.magic));
562 return ERR_PTR(-EPROTO);
565 memcpy(&tag, reply.handle, sizeof(u32));
567 hwq = blk_mq_unique_tag_to_hwq(tag);
568 if (hwq < nbd->tag_set.nr_hw_queues)
569 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
570 blk_mq_unique_tag_to_tag(tag));
571 if (!req || !blk_mq_request_started(req)) {
572 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
574 return ERR_PTR(-ENOENT);
576 cmd = blk_mq_rq_to_pdu(req);
577 if (ntohl(reply.error)) {
578 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
580 cmd->status = BLK_STS_IOERR;
584 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
585 if (rq_data_dir(req) != WRITE) {
586 struct req_iterator iter;
589 rq_for_each_segment(bvec, req, iter) {
590 iov_iter_bvec(&to, ITER_BVEC | READ,
591 &bvec, 1, bvec.bv_len);
592 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
594 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
597 * If we've disconnected or we only have 1
598 * connection then we need to make sure we
599 * complete this request, otherwise error out
600 * and let the timeout stuff handle resubmitting
601 * this request onto another connection.
603 if (nbd_disconnected(config) ||
604 config->num_connections <= 1) {
605 cmd->status = BLK_STS_IOERR;
608 return ERR_PTR(-EIO);
610 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
614 /* See the comment in nbd_queue_rq. */
615 wait_for_completion(&cmd->send_complete);
620 static void recv_work(struct work_struct *work)
622 struct recv_thread_args *args = container_of(work,
623 struct recv_thread_args,
625 struct nbd_device *nbd = args->nbd;
626 struct nbd_config *config = nbd->config;
630 cmd = nbd_read_stat(nbd, args->index);
632 struct nbd_sock *nsock = config->socks[args->index];
634 mutex_lock(&nsock->tx_lock);
635 nbd_mark_nsock_dead(nbd, nsock, 1);
636 mutex_unlock(&nsock->tx_lock);
640 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
642 atomic_dec(&config->recv_threads);
643 wake_up(&config->recv_wq);
648 static void nbd_clear_req(struct request *req, void *data, bool reserved)
652 if (!blk_mq_request_started(req))
654 cmd = blk_mq_rq_to_pdu(req);
655 cmd->status = BLK_STS_IOERR;
656 blk_mq_complete_request(req);
659 static void nbd_clear_que(struct nbd_device *nbd)
661 blk_mq_quiesce_queue(nbd->disk->queue);
662 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
663 blk_mq_unquiesce_queue(nbd->disk->queue);
664 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
667 static int find_fallback(struct nbd_device *nbd, int index)
669 struct nbd_config *config = nbd->config;
671 struct nbd_sock *nsock = config->socks[index];
672 int fallback = nsock->fallback_index;
674 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
677 if (config->num_connections <= 1) {
678 dev_err_ratelimited(disk_to_dev(nbd->disk),
679 "Attempted send on invalid socket\n");
683 if (fallback >= 0 && fallback < config->num_connections &&
684 !config->socks[fallback]->dead)
687 if (nsock->fallback_index < 0 ||
688 nsock->fallback_index >= config->num_connections ||
689 config->socks[nsock->fallback_index]->dead) {
691 for (i = 0; i < config->num_connections; i++) {
694 if (!config->socks[i]->dead) {
699 nsock->fallback_index = new_index;
701 dev_err_ratelimited(disk_to_dev(nbd->disk),
702 "Dead connection, failed to find a fallback\n");
706 new_index = nsock->fallback_index;
710 static int wait_for_reconnect(struct nbd_device *nbd)
712 struct nbd_config *config = nbd->config;
713 if (!config->dead_conn_timeout)
715 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
717 wait_event_timeout(config->conn_wait,
718 atomic_read(&config->live_connections),
719 config->dead_conn_timeout);
720 return atomic_read(&config->live_connections);
723 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
725 struct request *req = blk_mq_rq_from_pdu(cmd);
726 struct nbd_device *nbd = cmd->nbd;
727 struct nbd_config *config;
728 struct nbd_sock *nsock;
731 if (!refcount_inc_not_zero(&nbd->config_refs)) {
732 dev_err_ratelimited(disk_to_dev(nbd->disk),
733 "Socks array is empty\n");
734 blk_mq_start_request(req);
737 config = nbd->config;
739 if (index >= config->num_connections) {
740 dev_err_ratelimited(disk_to_dev(nbd->disk),
741 "Attempted send on invalid socket\n");
743 blk_mq_start_request(req);
746 cmd->status = BLK_STS_OK;
748 nsock = config->socks[index];
749 mutex_lock(&nsock->tx_lock);
751 int old_index = index;
752 index = find_fallback(nbd, index);
753 mutex_unlock(&nsock->tx_lock);
755 if (wait_for_reconnect(nbd)) {
759 /* All the sockets should already be down at this point,
760 * we just want to make sure that DISCONNECTED is set so
761 * any requests that come in that were queue'ed waiting
762 * for the reconnect timer don't trigger the timer again
763 * and instead just error out.
767 blk_mq_start_request(req);
773 /* Handle the case that we have a pending request that was partially
774 * transmitted that _has_ to be serviced first. We need to call requeue
775 * here so that it gets put _after_ the request that is already on the
778 blk_mq_start_request(req);
779 if (unlikely(nsock->pending && nsock->pending != req)) {
780 blk_mq_requeue_request(req, true);
785 * Some failures are related to the link going down, so anything that
786 * returns EAGAIN can be retried on a different socket.
788 ret = nbd_send_cmd(nbd, cmd, index);
789 if (ret == -EAGAIN) {
790 dev_err_ratelimited(disk_to_dev(nbd->disk),
791 "Request send failed, requeueing\n");
792 nbd_mark_nsock_dead(nbd, nsock, 1);
793 blk_mq_requeue_request(req, true);
797 mutex_unlock(&nsock->tx_lock);
802 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
803 const struct blk_mq_queue_data *bd)
805 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
809 * Since we look at the bio's to send the request over the network we
810 * need to make sure the completion work doesn't mark this request done
811 * before we are done doing our send. This keeps us from dereferencing
812 * freed data if we have particularly fast completions (ie we get the
813 * completion before we exit sock_xmit on the last bvec) or in the case
814 * that the server is misbehaving (or there was an error) before we're
815 * done sending everything over the wire.
817 init_completion(&cmd->send_complete);
819 /* We can be called directly from the user space process, which means we
820 * could possibly have signals pending so our sendmsg will fail. In
821 * this case we need to return that we are busy, otherwise error out as
824 ret = nbd_handle_cmd(cmd, hctx->queue_num);
829 complete(&cmd->send_complete);
834 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
837 struct nbd_config *config = nbd->config;
839 struct nbd_sock **socks;
840 struct nbd_sock *nsock;
843 sock = sockfd_lookup(arg, &err);
847 if (!netlink && !nbd->task_setup &&
848 !test_bit(NBD_BOUND, &config->runtime_flags))
849 nbd->task_setup = current;
852 (nbd->task_setup != current ||
853 test_bit(NBD_BOUND, &config->runtime_flags))) {
854 dev_err(disk_to_dev(nbd->disk),
855 "Device being setup by another task");
860 socks = krealloc(config->socks, (config->num_connections + 1) *
861 sizeof(struct nbd_sock *), GFP_KERNEL);
866 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
872 config->socks = socks;
874 nsock->fallback_index = -1;
876 mutex_init(&nsock->tx_lock);
878 nsock->pending = NULL;
881 socks[config->num_connections++] = nsock;
882 atomic_inc(&config->live_connections);
887 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
889 struct nbd_config *config = nbd->config;
890 struct socket *sock, *old;
891 struct recv_thread_args *args;
895 sock = sockfd_lookup(arg, &err);
899 args = kzalloc(sizeof(*args), GFP_KERNEL);
905 for (i = 0; i < config->num_connections; i++) {
906 struct nbd_sock *nsock = config->socks[i];
911 mutex_lock(&nsock->tx_lock);
913 mutex_unlock(&nsock->tx_lock);
916 sk_set_memalloc(sock->sk);
917 if (nbd->tag_set.timeout)
918 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
919 atomic_inc(&config->recv_threads);
920 refcount_inc(&nbd->config_refs);
922 nsock->fallback_index = -1;
925 INIT_WORK(&args->work, recv_work);
929 mutex_unlock(&nsock->tx_lock);
932 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
934 /* We take the tx_mutex in an error path in the recv_work, so we
935 * need to queue_work outside of the tx_mutex.
937 queue_work(recv_workqueue, &args->work);
939 atomic_inc(&config->live_connections);
940 wake_up(&config->conn_wait);
948 static void nbd_bdev_reset(struct block_device *bdev)
950 if (bdev->bd_openers > 1)
952 bd_set_size(bdev, 0);
954 blkdev_reread_part(bdev);
955 bdev->bd_invalidated = 1;
959 static void nbd_parse_flags(struct nbd_device *nbd)
961 struct nbd_config *config = nbd->config;
962 if (config->flags & NBD_FLAG_READ_ONLY)
963 set_disk_ro(nbd->disk, true);
965 set_disk_ro(nbd->disk, false);
966 if (config->flags & NBD_FLAG_SEND_TRIM)
967 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
968 if (config->flags & NBD_FLAG_SEND_FLUSH) {
969 if (config->flags & NBD_FLAG_SEND_FUA)
970 blk_queue_write_cache(nbd->disk->queue, true, true);
972 blk_queue_write_cache(nbd->disk->queue, true, false);
975 blk_queue_write_cache(nbd->disk->queue, false, false);
978 static void send_disconnects(struct nbd_device *nbd)
980 struct nbd_config *config = nbd->config;
981 struct nbd_request request = {
982 .magic = htonl(NBD_REQUEST_MAGIC),
983 .type = htonl(NBD_CMD_DISC),
985 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
986 struct iov_iter from;
989 for (i = 0; i < config->num_connections; i++) {
990 struct nbd_sock *nsock = config->socks[i];
992 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
993 mutex_lock(&nsock->tx_lock);
994 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
996 dev_err(disk_to_dev(nbd->disk),
997 "Send disconnect failed %d\n", ret);
998 mutex_unlock(&nsock->tx_lock);
1002 static int nbd_disconnect(struct nbd_device *nbd)
1004 struct nbd_config *config = nbd->config;
1006 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1007 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
1008 send_disconnects(nbd);
1012 static void nbd_clear_sock(struct nbd_device *nbd)
1016 nbd->task_setup = NULL;
1019 static void nbd_config_put(struct nbd_device *nbd)
1021 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1022 &nbd->config_lock)) {
1023 struct nbd_config *config = nbd->config;
1024 nbd_dev_dbg_close(nbd);
1025 nbd_size_clear(nbd);
1026 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1027 &config->runtime_flags))
1028 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1029 nbd->task_recv = NULL;
1030 nbd_clear_sock(nbd);
1031 if (config->num_connections) {
1033 for (i = 0; i < config->num_connections; i++) {
1034 sockfd_put(config->socks[i]->sock);
1035 kfree(config->socks[i]);
1037 kfree(config->socks);
1042 nbd->tag_set.timeout = 0;
1043 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1045 mutex_unlock(&nbd->config_lock);
1047 module_put(THIS_MODULE);
1051 static int nbd_start_device(struct nbd_device *nbd)
1053 struct nbd_config *config = nbd->config;
1054 int num_connections = config->num_connections;
1061 if (num_connections > 1 &&
1062 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1063 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1067 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1068 nbd->task_recv = current;
1070 nbd_parse_flags(nbd);
1072 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1074 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1077 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
1079 nbd_dev_dbg_init(nbd);
1080 for (i = 0; i < num_connections; i++) {
1081 struct recv_thread_args *args;
1083 args = kzalloc(sizeof(*args), GFP_KERNEL);
1088 sk_set_memalloc(config->socks[i]->sock->sk);
1089 if (nbd->tag_set.timeout)
1090 config->socks[i]->sock->sk->sk_sndtimeo =
1091 nbd->tag_set.timeout;
1092 atomic_inc(&config->recv_threads);
1093 refcount_inc(&nbd->config_refs);
1094 INIT_WORK(&args->work, recv_work);
1097 queue_work(recv_workqueue, &args->work);
1099 nbd_size_update(nbd);
1103 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1105 struct nbd_config *config = nbd->config;
1108 ret = nbd_start_device(nbd);
1112 bd_set_size(bdev, config->bytesize);
1114 bdev->bd_invalidated = 1;
1115 mutex_unlock(&nbd->config_lock);
1116 ret = wait_event_interruptible(config->recv_wq,
1117 atomic_read(&config->recv_threads) == 0);
1120 mutex_lock(&nbd->config_lock);
1121 bd_set_size(bdev, 0);
1122 /* user requested, ignore socket errors */
1123 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
1125 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
1130 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1131 struct block_device *bdev)
1135 nbd_bdev_reset(bdev);
1136 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1137 &nbd->config->runtime_flags))
1138 nbd_config_put(nbd);
1141 /* Must be called with config_lock held */
1142 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1143 unsigned int cmd, unsigned long arg)
1145 struct nbd_config *config = nbd->config;
1148 case NBD_DISCONNECT:
1149 return nbd_disconnect(nbd);
1150 case NBD_CLEAR_SOCK:
1151 nbd_clear_sock_ioctl(nbd, bdev);
1154 return nbd_add_socket(nbd, arg, false);
1155 case NBD_SET_BLKSIZE:
1156 nbd_size_set(nbd, arg,
1157 div_s64(config->bytesize, arg));
1160 nbd_size_set(nbd, config->blksize,
1161 div_s64(arg, config->blksize));
1163 case NBD_SET_SIZE_BLOCKS:
1164 nbd_size_set(nbd, config->blksize, arg);
1166 case NBD_SET_TIMEOUT:
1168 nbd->tag_set.timeout = arg * HZ;
1169 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1174 config->flags = arg;
1177 return nbd_start_device_ioctl(nbd, bdev);
1180 * This is for compatibility only. The queue is always cleared
1181 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1184 case NBD_PRINT_DEBUG:
1186 * For compatibility only, we no longer keep a list of
1187 * outstanding requests.
1194 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1195 unsigned int cmd, unsigned long arg)
1197 struct nbd_device *nbd = bdev->bd_disk->private_data;
1198 struct nbd_config *config = nbd->config;
1199 int error = -EINVAL;
1201 if (!capable(CAP_SYS_ADMIN))
1204 /* The block layer will pass back some non-nbd ioctls in case we have
1205 * special handling for them, but we don't so just return an error.
1207 if (_IOC_TYPE(cmd) != 0xab)
1210 mutex_lock(&nbd->config_lock);
1212 /* Don't allow ioctl operations on a nbd device that was created with
1213 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1215 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1216 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1217 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1219 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1220 mutex_unlock(&nbd->config_lock);
1224 static struct nbd_config *nbd_alloc_config(void)
1226 struct nbd_config *config;
1228 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1231 atomic_set(&config->recv_threads, 0);
1232 init_waitqueue_head(&config->recv_wq);
1233 init_waitqueue_head(&config->conn_wait);
1234 config->blksize = 1024;
1235 atomic_set(&config->live_connections, 0);
1236 try_module_get(THIS_MODULE);
1240 static int nbd_open(struct block_device *bdev, fmode_t mode)
1242 struct nbd_device *nbd;
1245 mutex_lock(&nbd_index_mutex);
1246 nbd = bdev->bd_disk->private_data;
1251 if (!refcount_inc_not_zero(&nbd->refs)) {
1255 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1256 struct nbd_config *config;
1258 mutex_lock(&nbd->config_lock);
1259 if (refcount_inc_not_zero(&nbd->config_refs)) {
1260 mutex_unlock(&nbd->config_lock);
1263 config = nbd->config = nbd_alloc_config();
1266 mutex_unlock(&nbd->config_lock);
1269 refcount_set(&nbd->config_refs, 1);
1270 refcount_inc(&nbd->refs);
1271 mutex_unlock(&nbd->config_lock);
1274 mutex_unlock(&nbd_index_mutex);
1278 static void nbd_release(struct gendisk *disk, fmode_t mode)
1280 struct nbd_device *nbd = disk->private_data;
1281 nbd_config_put(nbd);
1285 static const struct block_device_operations nbd_fops =
1287 .owner = THIS_MODULE,
1289 .release = nbd_release,
1291 .compat_ioctl = nbd_ioctl,
1294 #if IS_ENABLED(CONFIG_DEBUG_FS)
1296 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1298 struct nbd_device *nbd = s->private;
1301 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
1306 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1308 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1311 static const struct file_operations nbd_dbg_tasks_ops = {
1312 .open = nbd_dbg_tasks_open,
1314 .llseek = seq_lseek,
1315 .release = single_release,
1318 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1320 struct nbd_device *nbd = s->private;
1321 u32 flags = nbd->config->flags;
1323 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1325 seq_puts(s, "Known flags:\n");
1327 if (flags & NBD_FLAG_HAS_FLAGS)
1328 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1329 if (flags & NBD_FLAG_READ_ONLY)
1330 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1331 if (flags & NBD_FLAG_SEND_FLUSH)
1332 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1333 if (flags & NBD_FLAG_SEND_FUA)
1334 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1335 if (flags & NBD_FLAG_SEND_TRIM)
1336 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1341 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1343 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1346 static const struct file_operations nbd_dbg_flags_ops = {
1347 .open = nbd_dbg_flags_open,
1349 .llseek = seq_lseek,
1350 .release = single_release,
1353 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1356 struct nbd_config *config = nbd->config;
1361 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1363 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1367 config->dbg_dir = dir;
1369 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
1370 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1371 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1372 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1373 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
1378 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1380 debugfs_remove_recursive(nbd->config->dbg_dir);
1383 static int nbd_dbg_init(void)
1385 struct dentry *dbg_dir;
1387 dbg_dir = debugfs_create_dir("nbd", NULL);
1391 nbd_dbg_dir = dbg_dir;
1396 static void nbd_dbg_close(void)
1398 debugfs_remove_recursive(nbd_dbg_dir);
1401 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1403 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1408 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1412 static int nbd_dbg_init(void)
1417 static void nbd_dbg_close(void)
1423 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1424 unsigned int hctx_idx, unsigned int numa_node)
1426 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1427 cmd->nbd = set->driver_data;
1431 static const struct blk_mq_ops nbd_mq_ops = {
1432 .queue_rq = nbd_queue_rq,
1433 .complete = nbd_complete_rq,
1434 .init_request = nbd_init_request,
1435 .timeout = nbd_xmit_timeout,
1438 static int nbd_dev_add(int index)
1440 struct nbd_device *nbd;
1441 struct gendisk *disk;
1442 struct request_queue *q;
1445 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1449 disk = alloc_disk(1 << part_shift);
1454 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1459 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1468 nbd->tag_set.ops = &nbd_mq_ops;
1469 nbd->tag_set.nr_hw_queues = 1;
1470 nbd->tag_set.queue_depth = 128;
1471 nbd->tag_set.numa_node = NUMA_NO_NODE;
1472 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1473 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1474 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1475 nbd->tag_set.driver_data = nbd;
1477 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1481 q = blk_mq_init_queue(&nbd->tag_set);
1489 * Tell the block layer that we are not a rotational device
1491 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1492 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1493 disk->queue->limits.discard_granularity = 512;
1494 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1495 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1496 blk_queue_max_segments(disk->queue, USHRT_MAX);
1497 blk_queue_max_hw_sectors(disk->queue, 65536);
1498 disk->queue->limits.max_sectors = 256;
1500 mutex_init(&nbd->config_lock);
1501 refcount_set(&nbd->config_refs, 0);
1502 refcount_set(&nbd->refs, 1);
1503 INIT_LIST_HEAD(&nbd->list);
1504 disk->major = NBD_MAJOR;
1505 disk->first_minor = index << part_shift;
1506 disk->fops = &nbd_fops;
1507 disk->private_data = nbd;
1508 sprintf(disk->disk_name, "nbd%d", index);
1510 nbd_total_devices++;
1514 blk_mq_free_tag_set(&nbd->tag_set);
1516 idr_remove(&nbd_index_idr, index);
1525 static int find_free_cb(int id, void *ptr, void *data)
1527 struct nbd_device *nbd = ptr;
1528 struct nbd_device **found = data;
1530 if (!refcount_read(&nbd->config_refs)) {
1537 /* Netlink interface. */
1538 static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1539 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1540 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1541 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1542 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1543 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1544 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1545 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1546 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1547 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1550 static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1551 [NBD_SOCK_FD] = { .type = NLA_U32 },
1554 /* We don't use this right now since we don't parse the incoming list, but we
1555 * still want it here so userspace knows what to expect.
1557 static struct nla_policy __attribute__((unused))
1558 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1559 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1560 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1563 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1565 struct nbd_device *nbd = NULL;
1566 struct nbd_config *config;
1569 bool put_dev = false;
1571 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1574 if (info->attrs[NBD_ATTR_INDEX])
1575 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1576 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1577 printk(KERN_ERR "nbd: must specify at least one socket\n");
1580 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1581 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1585 mutex_lock(&nbd_index_mutex);
1587 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1590 new_index = nbd_dev_add(-1);
1591 if (new_index < 0) {
1592 mutex_unlock(&nbd_index_mutex);
1593 printk(KERN_ERR "nbd: failed to add new device\n");
1596 nbd = idr_find(&nbd_index_idr, new_index);
1599 nbd = idr_find(&nbd_index_idr, index);
1601 ret = nbd_dev_add(index);
1603 mutex_unlock(&nbd_index_mutex);
1604 printk(KERN_ERR "nbd: failed to add new device\n");
1607 nbd = idr_find(&nbd_index_idr, index);
1611 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1613 mutex_unlock(&nbd_index_mutex);
1616 if (!refcount_inc_not_zero(&nbd->refs)) {
1617 mutex_unlock(&nbd_index_mutex);
1620 printk(KERN_ERR "nbd: device at index %d is going down\n",
1624 mutex_unlock(&nbd_index_mutex);
1626 mutex_lock(&nbd->config_lock);
1627 if (refcount_read(&nbd->config_refs)) {
1628 mutex_unlock(&nbd->config_lock);
1632 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1635 if (WARN_ON(nbd->config)) {
1636 mutex_unlock(&nbd->config_lock);
1640 config = nbd->config = nbd_alloc_config();
1642 mutex_unlock(&nbd->config_lock);
1644 printk(KERN_ERR "nbd: couldn't allocate config\n");
1647 refcount_set(&nbd->config_refs, 1);
1648 set_bit(NBD_BOUND, &config->runtime_flags);
1650 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1651 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1652 nbd_size_set(nbd, config->blksize,
1653 div64_u64(bytes, config->blksize));
1655 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1657 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1658 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1660 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1661 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1662 nbd->tag_set.timeout = timeout * HZ;
1663 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1665 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1666 config->dead_conn_timeout =
1667 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1668 config->dead_conn_timeout *= HZ;
1670 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1672 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1673 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1674 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1675 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1676 set_bit(NBD_DESTROY_ON_DISCONNECT,
1677 &config->runtime_flags);
1682 if (info->attrs[NBD_ATTR_SOCKETS]) {
1683 struct nlattr *attr;
1686 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1688 struct nlattr *socks[NBD_SOCK_MAX+1];
1690 if (nla_type(attr) != NBD_SOCK_ITEM) {
1691 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1695 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1696 nbd_sock_policy, info->extack);
1698 printk(KERN_ERR "nbd: error processing sock list\n");
1702 if (!socks[NBD_SOCK_FD])
1704 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1705 ret = nbd_add_socket(nbd, fd, true);
1710 ret = nbd_start_device(nbd);
1712 mutex_unlock(&nbd->config_lock);
1714 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1715 refcount_inc(&nbd->config_refs);
1716 nbd_connect_reply(info, nbd->index);
1718 nbd_config_put(nbd);
1724 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1726 struct nbd_device *nbd;
1729 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1732 if (!info->attrs[NBD_ATTR_INDEX]) {
1733 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1736 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1737 mutex_lock(&nbd_index_mutex);
1738 nbd = idr_find(&nbd_index_idr, index);
1740 mutex_unlock(&nbd_index_mutex);
1741 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1745 if (!refcount_inc_not_zero(&nbd->refs)) {
1746 mutex_unlock(&nbd_index_mutex);
1747 printk(KERN_ERR "nbd: device at index %d is going down\n",
1751 mutex_unlock(&nbd_index_mutex);
1752 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1756 mutex_lock(&nbd->config_lock);
1757 nbd_disconnect(nbd);
1758 mutex_unlock(&nbd->config_lock);
1759 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1760 &nbd->config->runtime_flags))
1761 nbd_config_put(nbd);
1762 nbd_config_put(nbd);
1767 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1769 struct nbd_device *nbd = NULL;
1770 struct nbd_config *config;
1773 bool put_dev = false;
1775 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1778 if (!info->attrs[NBD_ATTR_INDEX]) {
1779 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1782 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1783 mutex_lock(&nbd_index_mutex);
1784 nbd = idr_find(&nbd_index_idr, index);
1786 mutex_unlock(&nbd_index_mutex);
1787 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1791 if (!refcount_inc_not_zero(&nbd->refs)) {
1792 mutex_unlock(&nbd_index_mutex);
1793 printk(KERN_ERR "nbd: device at index %d is going down\n",
1797 mutex_unlock(&nbd_index_mutex);
1799 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1800 dev_err(nbd_to_dev(nbd),
1801 "not configured, cannot reconfigure\n");
1806 mutex_lock(&nbd->config_lock);
1807 config = nbd->config;
1808 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1810 dev_err(nbd_to_dev(nbd),
1811 "not configured, cannot reconfigure\n");
1815 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1816 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1817 nbd->tag_set.timeout = timeout * HZ;
1818 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1820 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1821 config->dead_conn_timeout =
1822 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1823 config->dead_conn_timeout *= HZ;
1825 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1826 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1827 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1828 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1829 &config->runtime_flags))
1832 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1833 &config->runtime_flags))
1834 refcount_inc(&nbd->refs);
1838 if (info->attrs[NBD_ATTR_SOCKETS]) {
1839 struct nlattr *attr;
1842 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1844 struct nlattr *socks[NBD_SOCK_MAX+1];
1846 if (nla_type(attr) != NBD_SOCK_ITEM) {
1847 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1851 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
1852 nbd_sock_policy, info->extack);
1854 printk(KERN_ERR "nbd: error processing sock list\n");
1858 if (!socks[NBD_SOCK_FD])
1860 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1861 ret = nbd_reconnect_socket(nbd, fd);
1867 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
1871 mutex_unlock(&nbd->config_lock);
1872 nbd_config_put(nbd);
1879 static const struct genl_ops nbd_connect_genl_ops[] = {
1881 .cmd = NBD_CMD_CONNECT,
1882 .policy = nbd_attr_policy,
1883 .doit = nbd_genl_connect,
1886 .cmd = NBD_CMD_DISCONNECT,
1887 .policy = nbd_attr_policy,
1888 .doit = nbd_genl_disconnect,
1891 .cmd = NBD_CMD_RECONFIGURE,
1892 .policy = nbd_attr_policy,
1893 .doit = nbd_genl_reconfigure,
1896 .cmd = NBD_CMD_STATUS,
1897 .policy = nbd_attr_policy,
1898 .doit = nbd_genl_status,
1902 static const struct genl_multicast_group nbd_mcast_grps[] = {
1903 { .name = NBD_GENL_MCAST_GROUP_NAME, },
1906 static struct genl_family nbd_genl_family __ro_after_init = {
1908 .name = NBD_GENL_FAMILY_NAME,
1909 .version = NBD_GENL_VERSION,
1910 .module = THIS_MODULE,
1911 .ops = nbd_connect_genl_ops,
1912 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
1913 .maxattr = NBD_ATTR_MAX,
1914 .mcgrps = nbd_mcast_grps,
1915 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
1918 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
1920 struct nlattr *dev_opt;
1924 /* This is a little racey, but for status it's ok. The
1925 * reason we don't take a ref here is because we can't
1926 * take a ref in the index == -1 case as we would need
1927 * to put under the nbd_index_mutex, which could
1928 * deadlock if we are configured to remove ourselves
1929 * once we're disconnected.
1931 if (refcount_read(&nbd->config_refs))
1933 dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
1936 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
1939 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
1943 nla_nest_end(reply, dev_opt);
1947 static int status_cb(int id, void *ptr, void *data)
1949 struct nbd_device *nbd = ptr;
1950 return populate_nbd_status(nbd, (struct sk_buff *)data);
1953 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
1955 struct nlattr *dev_list;
1956 struct sk_buff *reply;
1962 if (info->attrs[NBD_ATTR_INDEX])
1963 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1965 mutex_lock(&nbd_index_mutex);
1967 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
1968 nla_attr_size(sizeof(u8)));
1969 msg_size *= (index == -1) ? nbd_total_devices : 1;
1971 reply = genlmsg_new(msg_size, GFP_KERNEL);
1974 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
1981 dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
1983 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
1989 struct nbd_device *nbd;
1990 nbd = idr_find(&nbd_index_idr, index);
1992 ret = populate_nbd_status(nbd, reply);
1999 nla_nest_end(reply, dev_list);
2000 genlmsg_end(reply, reply_head);
2001 genlmsg_reply(reply, info);
2004 mutex_unlock(&nbd_index_mutex);
2008 static void nbd_connect_reply(struct genl_info *info, int index)
2010 struct sk_buff *skb;
2014 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2017 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2023 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2028 genlmsg_end(skb, msg_head);
2029 genlmsg_reply(skb, info);
2032 static void nbd_mcast_index(int index)
2034 struct sk_buff *skb;
2038 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2041 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2047 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2052 genlmsg_end(skb, msg_head);
2053 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2056 static void nbd_dead_link_work(struct work_struct *work)
2058 struct link_dead_args *args = container_of(work, struct link_dead_args,
2060 nbd_mcast_index(args->index);
2064 static int __init nbd_init(void)
2068 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2071 printk(KERN_ERR "nbd: max_part must be >= 0\n");
2077 part_shift = fls(max_part);
2080 * Adjust max_part according to part_shift as it is exported
2081 * to user space so that user can know the max number of
2082 * partition kernel should be able to manage.
2084 * Note that -1 is required because partition 0 is reserved
2085 * for the whole disk.
2087 max_part = (1UL << part_shift) - 1;
2090 if ((1UL << part_shift) > DISK_MAX_PARTS)
2093 if (nbds_max > 1UL << (MINORBITS - part_shift))
2095 recv_workqueue = alloc_workqueue("knbd-recv",
2096 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2097 if (!recv_workqueue)
2100 if (register_blkdev(NBD_MAJOR, "nbd")) {
2101 destroy_workqueue(recv_workqueue);
2105 if (genl_register_family(&nbd_genl_family)) {
2106 unregister_blkdev(NBD_MAJOR, "nbd");
2107 destroy_workqueue(recv_workqueue);
2112 mutex_lock(&nbd_index_mutex);
2113 for (i = 0; i < nbds_max; i++)
2115 mutex_unlock(&nbd_index_mutex);
2119 static int nbd_exit_cb(int id, void *ptr, void *data)
2121 struct list_head *list = (struct list_head *)data;
2122 struct nbd_device *nbd = ptr;
2124 list_add_tail(&nbd->list, list);
2128 static void __exit nbd_cleanup(void)
2130 struct nbd_device *nbd;
2131 LIST_HEAD(del_list);
2135 mutex_lock(&nbd_index_mutex);
2136 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2137 mutex_unlock(&nbd_index_mutex);
2139 while (!list_empty(&del_list)) {
2140 nbd = list_first_entry(&del_list, struct nbd_device, list);
2141 list_del_init(&nbd->list);
2142 if (refcount_read(&nbd->refs) != 1)
2143 printk(KERN_ERR "nbd: possibly leaking a device\n");
2147 idr_destroy(&nbd_index_idr);
2148 genl_unregister_family(&nbd_genl_family);
2149 destroy_workqueue(recv_workqueue);
2150 unregister_blkdev(NBD_MAJOR, "nbd");
2153 module_init(nbd_init);
2154 module_exit(nbd_cleanup);
2156 MODULE_DESCRIPTION("Network Block Device");
2157 MODULE_LICENSE("GPL");
2159 module_param(nbds_max, int, 0444);
2160 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2161 module_param(max_part, int, 0444);
2162 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");