]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Network block device - make block devices work over TCP | |
3 | * | |
4 | * Note that you can not swap over this thing, yet. Seems to work but | |
5 | * deadlocks sometimes - you can not swap over TCP in general. | |
6 | * | |
7 | * Copyright 1997-2000 Pavel Machek <[email protected]> | |
8 | * Parts copyright 2001 Steven Whitehouse <[email protected]> | |
9 | * | |
10 | * (part of code stolen from loop.c) | |
11 | * | |
12 | * 97-3-25 compiled 0-th version, not yet tested it | |
13 | * (it did not work, BTW) (later that day) HEY! it works! | |
14 | * (bit later) hmm, not that much... 2:00am next day: | |
15 | * yes, it works, but it gives something like 50kB/sec | |
16 | * 97-4-01 complete rewrite to make it possible for many requests at | |
17 | * once to be processed | |
18 | * 97-4-11 Making protocol independent of endianity etc. | |
19 | * 97-9-13 Cosmetic changes | |
20 | * 98-5-13 Attempt to make 64-bit-clean on 64-bit machines | |
21 | * 99-1-11 Attempt to make 64-bit-clean on 32-bit machines <[email protected]> | |
22 | * 01-2-27 Fix to store proper blockcount for kernel (calculated using | |
23 | * BLOCK_SIZE_BITS, not device blocksize) <[email protected]> | |
24 | * 01-3-11 Make nbd work with new Linux block layer code. It now supports | |
25 | * plugging like all the other block devices. Also added in MSG_MORE to | |
26 | * reduce number of partial TCP segments sent. <[email protected]> | |
27 | * 01-12-6 Fix deadlock condition by making queue locks independent of | |
28 | * the transmit lock. <[email protected]> | |
29 | * 02-10-11 Allow hung xmit to be aborted via SIGKILL & various fixes. | |
30 | * <[email protected]> <[email protected]> | |
31 | * 03-06-22 Make nbd work with new linux 2.5 block layer design. This fixes | |
32 | * memory corruption from module removal and possible memory corruption | |
33 | * from sending/receiving disk data. <[email protected]> | |
34 | * 03-06-23 Cosmetic changes. <[email protected]> | |
35 | * 03-06-23 Enhance diagnostics support. <[email protected]> | |
36 | * 03-06-24 Remove unneeded blksize_bits field from nbd_device struct. | |
37 | * <[email protected]> | |
38 | * 03-06-24 Cleanup PARANOIA usage & code. <[email protected]> | |
39 | * 04-02-19 Remove PARANOIA, plus various cleanups (Paul Clements) | |
40 | * possible FIXME: make set_sock / set_blksize / set_size / do_it one syscall | |
41 | * why not: would need access_ok and friends, would share yet another | |
42 | * structure with userland | |
43 | */ | |
44 | ||
45 | #include <linux/major.h> | |
46 | ||
47 | #include <linux/blkdev.h> | |
48 | #include <linux/module.h> | |
49 | #include <linux/init.h> | |
50 | #include <linux/sched.h> | |
51 | #include <linux/fs.h> | |
52 | #include <linux/bio.h> | |
53 | #include <linux/stat.h> | |
54 | #include <linux/errno.h> | |
55 | #include <linux/file.h> | |
56 | #include <linux/ioctl.h> | |
4b2f0260 HX |
57 | #include <linux/compiler.h> |
58 | #include <linux/err.h> | |
59 | #include <linux/kernel.h> | |
1da177e4 LT |
60 | #include <net/sock.h> |
61 | ||
62 | #include <linux/devfs_fs_kernel.h> | |
63 | ||
64 | #include <asm/uaccess.h> | |
4b2f0260 | 65 | #include <asm/system.h> |
1da177e4 LT |
66 | #include <asm/types.h> |
67 | ||
68 | #include <linux/nbd.h> | |
69 | ||
70 | #define LO_MAGIC 0x68797548 | |
71 | ||
72 | #ifdef NDEBUG | |
73 | #define dprintk(flags, fmt...) | |
74 | #else /* NDEBUG */ | |
75 | #define dprintk(flags, fmt...) do { \ | |
76 | if (debugflags & (flags)) printk(KERN_DEBUG fmt); \ | |
77 | } while (0) | |
78 | #define DBG_IOCTL 0x0004 | |
79 | #define DBG_INIT 0x0010 | |
80 | #define DBG_EXIT 0x0020 | |
81 | #define DBG_BLKDEV 0x0100 | |
82 | #define DBG_RX 0x0200 | |
83 | #define DBG_TX 0x0400 | |
84 | static unsigned int debugflags; | |
40be0c28 | 85 | static unsigned int nbds_max = 16; |
1da177e4 LT |
86 | #endif /* NDEBUG */ |
87 | ||
88 | static struct nbd_device nbd_dev[MAX_NBD]; | |
89 | ||
90 | /* | |
91 | * Use just one lock (or at most 1 per NIC). Two arguments for this: | |
92 | * 1. Each NIC is essentially a synchronization point for all servers | |
93 | * accessed through that NIC so there's no need to have more locks | |
94 | * than NICs anyway. | |
95 | * 2. More locks lead to more "Dirty cache line bouncing" which will slow | |
96 | * down each lock to the point where they're actually slower than just | |
97 | * a single lock. | |
98 | * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this! | |
99 | */ | |
100 | static DEFINE_SPINLOCK(nbd_lock); | |
101 | ||
102 | #ifndef NDEBUG | |
103 | static const char *ioctl_cmd_to_ascii(int cmd) | |
104 | { | |
105 | switch (cmd) { | |
106 | case NBD_SET_SOCK: return "set-sock"; | |
107 | case NBD_SET_BLKSIZE: return "set-blksize"; | |
108 | case NBD_SET_SIZE: return "set-size"; | |
109 | case NBD_DO_IT: return "do-it"; | |
110 | case NBD_CLEAR_SOCK: return "clear-sock"; | |
111 | case NBD_CLEAR_QUE: return "clear-que"; | |
112 | case NBD_PRINT_DEBUG: return "print-debug"; | |
113 | case NBD_SET_SIZE_BLOCKS: return "set-size-blocks"; | |
114 | case NBD_DISCONNECT: return "disconnect"; | |
115 | case BLKROSET: return "set-read-only"; | |
116 | case BLKFLSBUF: return "flush-buffer-cache"; | |
117 | } | |
118 | return "unknown"; | |
119 | } | |
120 | ||
121 | static const char *nbdcmd_to_ascii(int cmd) | |
122 | { | |
123 | switch (cmd) { | |
124 | case NBD_CMD_READ: return "read"; | |
125 | case NBD_CMD_WRITE: return "write"; | |
126 | case NBD_CMD_DISC: return "disconnect"; | |
127 | } | |
128 | return "invalid"; | |
129 | } | |
130 | #endif /* NDEBUG */ | |
131 | ||
132 | static void nbd_end_request(struct request *req) | |
133 | { | |
134 | int uptodate = (req->errors == 0) ? 1 : 0; | |
135 | request_queue_t *q = req->q; | |
136 | unsigned long flags; | |
137 | ||
138 | dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, | |
139 | req, uptodate? "done": "failed"); | |
140 | ||
141 | spin_lock_irqsave(q->queue_lock, flags); | |
142 | if (!end_that_request_first(req, uptodate, req->nr_sectors)) { | |
8ffdc655 | 143 | end_that_request_last(req, uptodate); |
1da177e4 LT |
144 | } |
145 | spin_unlock_irqrestore(q->queue_lock, flags); | |
146 | } | |
147 | ||
148 | /* | |
149 | * Send or receive packet. | |
150 | */ | |
151 | static int sock_xmit(struct socket *sock, int send, void *buf, int size, | |
152 | int msg_flags) | |
153 | { | |
154 | int result; | |
155 | struct msghdr msg; | |
156 | struct kvec iov; | |
157 | unsigned long flags; | |
158 | sigset_t oldset; | |
159 | ||
160 | /* Allow interception of SIGKILL only | |
161 | * Don't allow other signals to interrupt the transmission */ | |
162 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
163 | oldset = current->blocked; | |
164 | sigfillset(¤t->blocked); | |
165 | sigdelsetmask(¤t->blocked, sigmask(SIGKILL)); | |
166 | recalc_sigpending(); | |
167 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
168 | ||
169 | do { | |
170 | sock->sk->sk_allocation = GFP_NOIO; | |
171 | iov.iov_base = buf; | |
172 | iov.iov_len = size; | |
173 | msg.msg_name = NULL; | |
174 | msg.msg_namelen = 0; | |
175 | msg.msg_control = NULL; | |
176 | msg.msg_controllen = 0; | |
1da177e4 LT |
177 | msg.msg_flags = msg_flags | MSG_NOSIGNAL; |
178 | ||
179 | if (send) | |
180 | result = kernel_sendmsg(sock, &msg, &iov, 1, size); | |
181 | else | |
182 | result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0); | |
183 | ||
184 | if (signal_pending(current)) { | |
185 | siginfo_t info; | |
186 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
187 | printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", | |
188 | current->pid, current->comm, | |
189 | dequeue_signal(current, ¤t->blocked, &info)); | |
190 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
191 | result = -EINTR; | |
192 | break; | |
193 | } | |
194 | ||
195 | if (result <= 0) { | |
196 | if (result == 0) | |
197 | result = -EPIPE; /* short read */ | |
198 | break; | |
199 | } | |
200 | size -= result; | |
201 | buf += result; | |
202 | } while (size > 0); | |
203 | ||
204 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
205 | current->blocked = oldset; | |
206 | recalc_sigpending(); | |
207 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
208 | ||
209 | return result; | |
210 | } | |
211 | ||
212 | static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec, | |
213 | int flags) | |
214 | { | |
215 | int result; | |
216 | void *kaddr = kmap(bvec->bv_page); | |
217 | result = sock_xmit(sock, 1, kaddr + bvec->bv_offset, bvec->bv_len, | |
218 | flags); | |
219 | kunmap(bvec->bv_page); | |
220 | return result; | |
221 | } | |
222 | ||
223 | static int nbd_send_req(struct nbd_device *lo, struct request *req) | |
224 | { | |
225 | int result, i, flags; | |
226 | struct nbd_request request; | |
227 | unsigned long size = req->nr_sectors << 9; | |
228 | struct socket *sock = lo->sock; | |
229 | ||
230 | request.magic = htonl(NBD_REQUEST_MAGIC); | |
231 | request.type = htonl(nbd_cmd(req)); | |
232 | request.from = cpu_to_be64((u64) req->sector << 9); | |
233 | request.len = htonl(size); | |
234 | memcpy(request.handle, &req, sizeof(req)); | |
235 | ||
1da177e4 LT |
236 | dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", |
237 | lo->disk->disk_name, req, | |
238 | nbdcmd_to_ascii(nbd_cmd(req)), | |
239 | (unsigned long long)req->sector << 9, | |
240 | req->nr_sectors << 9); | |
241 | result = sock_xmit(sock, 1, &request, sizeof(request), | |
242 | (nbd_cmd(req) == NBD_CMD_WRITE)? MSG_MORE: 0); | |
243 | if (result <= 0) { | |
244 | printk(KERN_ERR "%s: Send control failed (result %d)\n", | |
245 | lo->disk->disk_name, result); | |
246 | goto error_out; | |
247 | } | |
248 | ||
249 | if (nbd_cmd(req) == NBD_CMD_WRITE) { | |
250 | struct bio *bio; | |
251 | /* | |
252 | * we are really probing at internals to determine | |
253 | * whether to set MSG_MORE or not... | |
254 | */ | |
255 | rq_for_each_bio(bio, req) { | |
256 | struct bio_vec *bvec; | |
257 | bio_for_each_segment(bvec, bio, i) { | |
258 | flags = 0; | |
259 | if ((i < (bio->bi_vcnt - 1)) || bio->bi_next) | |
260 | flags = MSG_MORE; | |
261 | dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", | |
262 | lo->disk->disk_name, req, | |
263 | bvec->bv_len); | |
264 | result = sock_send_bvec(sock, bvec, flags); | |
265 | if (result <= 0) { | |
266 | printk(KERN_ERR "%s: Send data failed (result %d)\n", | |
267 | lo->disk->disk_name, | |
268 | result); | |
269 | goto error_out; | |
270 | } | |
271 | } | |
272 | } | |
273 | } | |
1da177e4 LT |
274 | return 0; |
275 | ||
276 | error_out: | |
1da177e4 LT |
277 | return 1; |
278 | } | |
279 | ||
280 | static struct request *nbd_find_request(struct nbd_device *lo, char *handle) | |
281 | { | |
282 | struct request *req; | |
283 | struct list_head *tmp; | |
284 | struct request *xreq; | |
4b2f0260 | 285 | int err; |
1da177e4 LT |
286 | |
287 | memcpy(&xreq, handle, sizeof(xreq)); | |
288 | ||
4b2f0260 HX |
289 | err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); |
290 | if (unlikely(err)) | |
291 | goto out; | |
292 | ||
1da177e4 LT |
293 | spin_lock(&lo->queue_lock); |
294 | list_for_each(tmp, &lo->queue_head) { | |
295 | req = list_entry(tmp, struct request, queuelist); | |
296 | if (req != xreq) | |
297 | continue; | |
298 | list_del_init(&req->queuelist); | |
299 | spin_unlock(&lo->queue_lock); | |
300 | return req; | |
301 | } | |
302 | spin_unlock(&lo->queue_lock); | |
4b2f0260 HX |
303 | |
304 | err = -ENOENT; | |
305 | ||
306 | out: | |
307 | return ERR_PTR(err); | |
1da177e4 LT |
308 | } |
309 | ||
310 | static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec) | |
311 | { | |
312 | int result; | |
313 | void *kaddr = kmap(bvec->bv_page); | |
314 | result = sock_xmit(sock, 0, kaddr + bvec->bv_offset, bvec->bv_len, | |
315 | MSG_WAITALL); | |
316 | kunmap(bvec->bv_page); | |
317 | return result; | |
318 | } | |
319 | ||
320 | /* NULL returned = something went wrong, inform userspace */ | |
321 | static struct request *nbd_read_stat(struct nbd_device *lo) | |
322 | { | |
323 | int result; | |
324 | struct nbd_reply reply; | |
325 | struct request *req; | |
326 | struct socket *sock = lo->sock; | |
327 | ||
328 | reply.magic = 0; | |
329 | result = sock_xmit(sock, 0, &reply, sizeof(reply), MSG_WAITALL); | |
330 | if (result <= 0) { | |
331 | printk(KERN_ERR "%s: Receive control failed (result %d)\n", | |
332 | lo->disk->disk_name, result); | |
333 | goto harderror; | |
334 | } | |
335 | req = nbd_find_request(lo, reply.handle); | |
4b2f0260 HX |
336 | if (unlikely(IS_ERR(req))) { |
337 | result = PTR_ERR(req); | |
338 | if (result != -ENOENT) | |
339 | goto harderror; | |
340 | ||
1da177e4 LT |
341 | printk(KERN_ERR "%s: Unexpected reply (%p)\n", |
342 | lo->disk->disk_name, reply.handle); | |
343 | result = -EBADR; | |
344 | goto harderror; | |
345 | } | |
346 | ||
347 | if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { | |
348 | printk(KERN_ERR "%s: Wrong magic (0x%lx)\n", | |
349 | lo->disk->disk_name, | |
350 | (unsigned long)ntohl(reply.magic)); | |
351 | result = -EPROTO; | |
352 | goto harderror; | |
353 | } | |
354 | if (ntohl(reply.error)) { | |
355 | printk(KERN_ERR "%s: Other side returned error (%d)\n", | |
356 | lo->disk->disk_name, ntohl(reply.error)); | |
357 | req->errors++; | |
358 | return req; | |
359 | } | |
360 | ||
361 | dprintk(DBG_RX, "%s: request %p: got reply\n", | |
362 | lo->disk->disk_name, req); | |
363 | if (nbd_cmd(req) == NBD_CMD_READ) { | |
364 | int i; | |
365 | struct bio *bio; | |
366 | rq_for_each_bio(bio, req) { | |
367 | struct bio_vec *bvec; | |
368 | bio_for_each_segment(bvec, bio, i) { | |
369 | result = sock_recv_bvec(sock, bvec); | |
370 | if (result <= 0) { | |
371 | printk(KERN_ERR "%s: Receive data failed (result %d)\n", | |
372 | lo->disk->disk_name, | |
373 | result); | |
374 | goto harderror; | |
375 | } | |
376 | dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", | |
377 | lo->disk->disk_name, req, bvec->bv_len); | |
378 | } | |
379 | } | |
380 | } | |
381 | return req; | |
382 | harderror: | |
383 | lo->harderror = result; | |
384 | return NULL; | |
385 | } | |
386 | ||
387 | static void nbd_do_it(struct nbd_device *lo) | |
388 | { | |
389 | struct request *req; | |
390 | ||
391 | BUG_ON(lo->magic != LO_MAGIC); | |
392 | ||
393 | while ((req = nbd_read_stat(lo)) != NULL) | |
394 | nbd_end_request(req); | |
395 | return; | |
396 | } | |
397 | ||
398 | static void nbd_clear_que(struct nbd_device *lo) | |
399 | { | |
400 | struct request *req; | |
401 | ||
402 | BUG_ON(lo->magic != LO_MAGIC); | |
403 | ||
4b2f0260 HX |
404 | /* |
405 | * Because we have set lo->sock to NULL under the tx_lock, all | |
406 | * modifications to the list must have completed by now. For | |
407 | * the same reason, the active_req must be NULL. | |
408 | * | |
409 | * As a consequence, we don't need to take the spin lock while | |
410 | * purging the list here. | |
411 | */ | |
412 | BUG_ON(lo->sock); | |
413 | BUG_ON(lo->active_req); | |
414 | ||
415 | while (!list_empty(&lo->queue_head)) { | |
416 | req = list_entry(lo->queue_head.next, struct request, | |
417 | queuelist); | |
418 | list_del_init(&req->queuelist); | |
419 | req->errors++; | |
420 | nbd_end_request(req); | |
421 | } | |
1da177e4 LT |
422 | } |
423 | ||
424 | /* | |
425 | * We always wait for result of write, for now. It would be nice to make it optional | |
426 | * in future | |
427 | * if ((req->cmd == WRITE) && (lo->flags & NBD_WRITE_NOCHK)) | |
428 | * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } | |
429 | */ | |
430 | ||
431 | static void do_nbd_request(request_queue_t * q) | |
432 | { | |
433 | struct request *req; | |
434 | ||
435 | while ((req = elv_next_request(q)) != NULL) { | |
436 | struct nbd_device *lo; | |
437 | ||
438 | blkdev_dequeue_request(req); | |
439 | dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%lx)\n", | |
440 | req->rq_disk->disk_name, req, req->flags); | |
441 | ||
442 | if (!(req->flags & REQ_CMD)) | |
443 | goto error_out; | |
444 | ||
445 | lo = req->rq_disk->private_data; | |
446 | ||
447 | BUG_ON(lo->magic != LO_MAGIC); | |
448 | ||
1da177e4 LT |
449 | nbd_cmd(req) = NBD_CMD_READ; |
450 | if (rq_data_dir(req) == WRITE) { | |
451 | nbd_cmd(req) = NBD_CMD_WRITE; | |
452 | if (lo->flags & NBD_READ_ONLY) { | |
453 | printk(KERN_ERR "%s: Write on read-only\n", | |
454 | lo->disk->disk_name); | |
455 | goto error_out; | |
456 | } | |
457 | } | |
458 | ||
459 | req->errors = 0; | |
460 | spin_unlock_irq(q->queue_lock); | |
461 | ||
82d4dc5a | 462 | mutex_lock(&lo->tx_lock); |
4b2f0260 | 463 | if (unlikely(!lo->sock)) { |
82d4dc5a | 464 | mutex_unlock(&lo->tx_lock); |
4b2f0260 HX |
465 | printk(KERN_ERR "%s: Attempted send on closed socket\n", |
466 | lo->disk->disk_name); | |
1da177e4 LT |
467 | req->errors++; |
468 | nbd_end_request(req); | |
469 | spin_lock_irq(q->queue_lock); | |
470 | continue; | |
471 | } | |
472 | ||
4b2f0260 | 473 | lo->active_req = req; |
1da177e4 LT |
474 | |
475 | if (nbd_send_req(lo, req) != 0) { | |
476 | printk(KERN_ERR "%s: Request send failed\n", | |
477 | lo->disk->disk_name); | |
4b2f0260 HX |
478 | req->errors++; |
479 | nbd_end_request(req); | |
480 | } else { | |
481 | spin_lock(&lo->queue_lock); | |
482 | list_add(&req->queuelist, &lo->queue_head); | |
483 | spin_unlock(&lo->queue_lock); | |
1da177e4 LT |
484 | } |
485 | ||
4b2f0260 | 486 | lo->active_req = NULL; |
82d4dc5a | 487 | mutex_unlock(&lo->tx_lock); |
4b2f0260 HX |
488 | wake_up_all(&lo->active_wq); |
489 | ||
1da177e4 LT |
490 | spin_lock_irq(q->queue_lock); |
491 | continue; | |
492 | ||
493 | error_out: | |
494 | req->errors++; | |
495 | spin_unlock(q->queue_lock); | |
496 | nbd_end_request(req); | |
497 | spin_lock(q->queue_lock); | |
498 | } | |
499 | return; | |
500 | } | |
501 | ||
502 | static int nbd_ioctl(struct inode *inode, struct file *file, | |
503 | unsigned int cmd, unsigned long arg) | |
504 | { | |
505 | struct nbd_device *lo = inode->i_bdev->bd_disk->private_data; | |
506 | int error; | |
507 | struct request sreq ; | |
508 | ||
509 | if (!capable(CAP_SYS_ADMIN)) | |
510 | return -EPERM; | |
511 | ||
512 | BUG_ON(lo->magic != LO_MAGIC); | |
513 | ||
514 | /* Anyone capable of this syscall can do *real bad* things */ | |
515 | dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", | |
516 | lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); | |
517 | ||
518 | switch (cmd) { | |
519 | case NBD_DISCONNECT: | |
520 | printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); | |
521 | sreq.flags = REQ_SPECIAL; | |
522 | nbd_cmd(&sreq) = NBD_CMD_DISC; | |
523 | /* | |
524 | * Set these to sane values in case server implementation | |
525 | * fails to check the request type first and also to keep | |
526 | * debugging output cleaner. | |
527 | */ | |
528 | sreq.sector = 0; | |
529 | sreq.nr_sectors = 0; | |
530 | if (!lo->sock) | |
531 | return -EINVAL; | |
532 | nbd_send_req(lo, &sreq); | |
533 | return 0; | |
534 | ||
535 | case NBD_CLEAR_SOCK: | |
536 | error = 0; | |
82d4dc5a | 537 | mutex_lock(&lo->tx_lock); |
1da177e4 | 538 | lo->sock = NULL; |
82d4dc5a | 539 | mutex_unlock(&lo->tx_lock); |
1da177e4 LT |
540 | file = lo->file; |
541 | lo->file = NULL; | |
1da177e4 | 542 | nbd_clear_que(lo); |
4b2f0260 | 543 | BUG_ON(!list_empty(&lo->queue_head)); |
1da177e4 LT |
544 | if (file) |
545 | fput(file); | |
546 | return error; | |
547 | case NBD_SET_SOCK: | |
548 | if (lo->file) | |
549 | return -EBUSY; | |
550 | error = -EINVAL; | |
551 | file = fget(arg); | |
552 | if (file) { | |
553 | inode = file->f_dentry->d_inode; | |
554 | if (S_ISSOCK(inode->i_mode)) { | |
555 | lo->file = file; | |
556 | lo->sock = SOCKET_I(inode); | |
557 | error = 0; | |
558 | } else { | |
559 | fput(file); | |
560 | } | |
561 | } | |
562 | return error; | |
563 | case NBD_SET_BLKSIZE: | |
564 | lo->blksize = arg; | |
565 | lo->bytesize &= ~(lo->blksize-1); | |
566 | inode->i_bdev->bd_inode->i_size = lo->bytesize; | |
567 | set_blocksize(inode->i_bdev, lo->blksize); | |
568 | set_capacity(lo->disk, lo->bytesize >> 9); | |
569 | return 0; | |
570 | case NBD_SET_SIZE: | |
571 | lo->bytesize = arg & ~(lo->blksize-1); | |
572 | inode->i_bdev->bd_inode->i_size = lo->bytesize; | |
573 | set_blocksize(inode->i_bdev, lo->blksize); | |
574 | set_capacity(lo->disk, lo->bytesize >> 9); | |
575 | return 0; | |
576 | case NBD_SET_SIZE_BLOCKS: | |
577 | lo->bytesize = ((u64) arg) * lo->blksize; | |
578 | inode->i_bdev->bd_inode->i_size = lo->bytesize; | |
579 | set_blocksize(inode->i_bdev, lo->blksize); | |
580 | set_capacity(lo->disk, lo->bytesize >> 9); | |
581 | return 0; | |
582 | case NBD_DO_IT: | |
583 | if (!lo->file) | |
584 | return -EINVAL; | |
585 | nbd_do_it(lo); | |
586 | /* on return tidy up in case we have a signal */ | |
587 | /* Forcibly shutdown the socket causing all listeners | |
588 | * to error | |
589 | * | |
590 | * FIXME: This code is duplicated from sys_shutdown, but | |
591 | * there should be a more generic interface rather than | |
592 | * calling socket ops directly here */ | |
82d4dc5a | 593 | mutex_lock(&lo->tx_lock); |
1da177e4 LT |
594 | if (lo->sock) { |
595 | printk(KERN_WARNING "%s: shutting down socket\n", | |
596 | lo->disk->disk_name); | |
597 | lo->sock->ops->shutdown(lo->sock, | |
598 | SEND_SHUTDOWN|RCV_SHUTDOWN); | |
599 | lo->sock = NULL; | |
600 | } | |
82d4dc5a | 601 | mutex_unlock(&lo->tx_lock); |
1da177e4 LT |
602 | file = lo->file; |
603 | lo->file = NULL; | |
1da177e4 LT |
604 | nbd_clear_que(lo); |
605 | printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); | |
606 | if (file) | |
607 | fput(file); | |
608 | return lo->harderror; | |
609 | case NBD_CLEAR_QUE: | |
4b2f0260 HX |
610 | /* |
611 | * This is for compatibility only. The queue is always cleared | |
612 | * by NBD_DO_IT or NBD_CLEAR_SOCK. | |
613 | */ | |
614 | BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); | |
1da177e4 LT |
615 | return 0; |
616 | case NBD_PRINT_DEBUG: | |
617 | printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", | |
618 | inode->i_bdev->bd_disk->disk_name, | |
619 | lo->queue_head.next, lo->queue_head.prev, | |
620 | &lo->queue_head); | |
621 | return 0; | |
622 | } | |
623 | return -EINVAL; | |
624 | } | |
625 | ||
626 | static struct block_device_operations nbd_fops = | |
627 | { | |
628 | .owner = THIS_MODULE, | |
629 | .ioctl = nbd_ioctl, | |
630 | }; | |
631 | ||
632 | /* | |
633 | * And here should be modules and kernel interface | |
634 | * (Just smiley confuses emacs :-) | |
635 | */ | |
636 | ||
637 | static int __init nbd_init(void) | |
638 | { | |
639 | int err = -ENOMEM; | |
640 | int i; | |
641 | ||
642 | if (sizeof(struct nbd_request) != 28) { | |
643 | printk(KERN_CRIT "nbd: sizeof nbd_request needs to be 28 in order to work!\n" ); | |
644 | return -EIO; | |
645 | } | |
646 | ||
40be0c28 LMB |
647 | if (nbds_max > MAX_NBD) { |
648 | printk(KERN_CRIT "nbd: cannot allocate more than %u nbds; %u requested.\n", MAX_NBD, | |
649 | nbds_max); | |
650 | return -EINVAL; | |
651 | } | |
652 | ||
653 | for (i = 0; i < nbds_max; i++) { | |
1da177e4 LT |
654 | struct gendisk *disk = alloc_disk(1); |
655 | if (!disk) | |
656 | goto out; | |
657 | nbd_dev[i].disk = disk; | |
658 | /* | |
659 | * The new linux 2.5 block layer implementation requires | |
660 | * every gendisk to have its very own request_queue struct. | |
661 | * These structs are big so we dynamically allocate them. | |
662 | */ | |
663 | disk->queue = blk_init_queue(do_nbd_request, &nbd_lock); | |
664 | if (!disk->queue) { | |
665 | put_disk(disk); | |
666 | goto out; | |
667 | } | |
668 | } | |
669 | ||
670 | if (register_blkdev(NBD_MAJOR, "nbd")) { | |
671 | err = -EIO; | |
672 | goto out; | |
673 | } | |
674 | ||
675 | printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); | |
676 | dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags); | |
677 | ||
678 | devfs_mk_dir("nbd"); | |
40be0c28 | 679 | for (i = 0; i < nbds_max; i++) { |
1da177e4 LT |
680 | struct gendisk *disk = nbd_dev[i].disk; |
681 | nbd_dev[i].file = NULL; | |
682 | nbd_dev[i].magic = LO_MAGIC; | |
683 | nbd_dev[i].flags = 0; | |
684 | spin_lock_init(&nbd_dev[i].queue_lock); | |
685 | INIT_LIST_HEAD(&nbd_dev[i].queue_head); | |
82d4dc5a | 686 | mutex_init(&nbd_dev[i].tx_lock); |
4b2f0260 | 687 | init_waitqueue_head(&nbd_dev[i].active_wq); |
1da177e4 LT |
688 | nbd_dev[i].blksize = 1024; |
689 | nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ | |
690 | disk->major = NBD_MAJOR; | |
691 | disk->first_minor = i; | |
692 | disk->fops = &nbd_fops; | |
693 | disk->private_data = &nbd_dev[i]; | |
694 | disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; | |
695 | sprintf(disk->disk_name, "nbd%d", i); | |
696 | sprintf(disk->devfs_name, "nbd/%d", i); | |
697 | set_capacity(disk, 0x7ffffc00ULL << 1); /* 2 TB */ | |
698 | add_disk(disk); | |
699 | } | |
700 | ||
701 | return 0; | |
702 | out: | |
703 | while (i--) { | |
704 | blk_cleanup_queue(nbd_dev[i].disk->queue); | |
705 | put_disk(nbd_dev[i].disk); | |
706 | } | |
707 | return err; | |
708 | } | |
709 | ||
710 | static void __exit nbd_cleanup(void) | |
711 | { | |
712 | int i; | |
40be0c28 | 713 | for (i = 0; i < nbds_max; i++) { |
1da177e4 | 714 | struct gendisk *disk = nbd_dev[i].disk; |
40be0c28 | 715 | nbd_dev[i].magic = 0; |
1da177e4 LT |
716 | if (disk) { |
717 | del_gendisk(disk); | |
718 | blk_cleanup_queue(disk->queue); | |
719 | put_disk(disk); | |
720 | } | |
721 | } | |
722 | devfs_remove("nbd"); | |
723 | unregister_blkdev(NBD_MAJOR, "nbd"); | |
724 | printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); | |
725 | } | |
726 | ||
727 | module_init(nbd_init); | |
728 | module_exit(nbd_cleanup); | |
729 | ||
730 | MODULE_DESCRIPTION("Network Block Device"); | |
731 | MODULE_LICENSE("GPL"); | |
732 | ||
40be0c28 LMB |
733 | module_param(nbds_max, int, 0444); |
734 | MODULE_PARM_DESC(nbds_max, "How many network block devices to initialize."); | |
1da177e4 LT |
735 | #ifndef NDEBUG |
736 | module_param(debugflags, int, 0644); | |
737 | MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); | |
738 | #endif |