1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * (c) Copyright 2003-2013 Datera, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/bio.h>
35 #include <linux/genhd.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <scsi/scsi_proto.h>
39 #include <asm/unaligned.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
44 #include "target_core_iblock.h"
46 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
47 #define IBLOCK_BIO_POOL_SIZE 128
49 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
51 return container_of(dev, struct iblock_dev, dev);
55 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
57 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
58 " Generic Target Core Stack %s\n", hba->hba_id,
59 IBLOCK_VERSION, TARGET_CORE_VERSION);
63 static void iblock_detach_hba(struct se_hba *hba)
67 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
69 struct iblock_dev *ib_dev = NULL;
71 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
73 pr_err("Unable to allocate struct iblock_dev\n");
77 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
82 static int iblock_configure_device(struct se_device *dev)
84 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
85 struct request_queue *q;
86 struct block_device *bd = NULL;
87 struct blk_integrity *bi;
91 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
92 pr_err("Missing udev_path= parameters for IBLOCK\n");
96 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
97 if (!ib_dev->ibd_bio_set) {
98 pr_err("IBLOCK: Unable to create bioset\n");
102 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
103 ib_dev->ibd_udev_path);
105 mode = FMODE_READ|FMODE_EXCL;
106 if (!ib_dev->ibd_readonly)
109 dev->dev_flags |= DF_READ_ONLY;
111 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
114 goto out_free_bioset;
118 q = bdev_get_queue(bd);
120 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
122 dev->dev_attrib.hw_queue_depth = q->nr_requests;
125 * Check if the underlying struct block_device request_queue supports
126 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
127 * in ATA and we need to set TPE=1
129 if (blk_queue_discard(q)) {
130 dev->dev_attrib.max_unmap_lba_count =
131 q->limits.max_discard_sectors;
134 * Currently hardcoded to 1 in Linux/SCSI code..
136 dev->dev_attrib.max_unmap_block_desc_count = 1;
137 dev->dev_attrib.unmap_granularity =
138 q->limits.discard_granularity >> 9;
139 dev->dev_attrib.unmap_granularity_alignment =
140 q->limits.discard_alignment;
142 pr_debug("IBLOCK: BLOCK Discard support available,"
143 " disabled by default\n");
146 * Enable write same emulation for IBLOCK and use 0xFFFF as
147 * the smaller WRITE_SAME(10) only has a two-byte block count.
149 dev->dev_attrib.max_write_same_len = 0xFFFF;
151 if (blk_queue_nonrot(q))
152 dev->dev_attrib.is_nonrot = 1;
154 bi = bdev_get_integrity(bd);
156 struct bio_set *bs = ib_dev->ibd_bio_set;
158 if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
159 !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
160 pr_err("IBLOCK export of blk_integrity: %s not"
161 " supported\n", bi->name);
166 if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
167 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
168 } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
169 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
172 if (dev->dev_attrib.pi_prot_type) {
173 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
174 pr_err("Unable to allocate bioset for PI\n");
178 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
179 bs->bio_integrity_pool);
181 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
187 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
189 bioset_free(ib_dev->ibd_bio_set);
190 ib_dev->ibd_bio_set = NULL;
195 static void iblock_dev_call_rcu(struct rcu_head *p)
197 struct se_device *dev = container_of(p, struct se_device, rcu_head);
198 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
203 static void iblock_free_device(struct se_device *dev)
205 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
207 if (ib_dev->ibd_bd != NULL)
208 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
209 if (ib_dev->ibd_bio_set != NULL)
210 bioset_free(ib_dev->ibd_bio_set);
212 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
215 static unsigned long long iblock_emulate_read_cap_with_block_size(
216 struct se_device *dev,
217 struct block_device *bd,
218 struct request_queue *q)
220 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
221 bdev_logical_block_size(bd)) - 1);
222 u32 block_size = bdev_logical_block_size(bd);
224 if (block_size == dev->dev_attrib.block_size)
227 switch (block_size) {
229 switch (dev->dev_attrib.block_size) {
243 switch (dev->dev_attrib.block_size) {
258 switch (dev->dev_attrib.block_size) {
273 switch (dev->dev_attrib.block_size) {
294 static void iblock_complete_cmd(struct se_cmd *cmd)
296 struct iblock_req *ibr = cmd->priv;
299 if (!atomic_dec_and_test(&ibr->pending))
302 if (atomic_read(&ibr->ib_bio_err_cnt))
303 status = SAM_STAT_CHECK_CONDITION;
305 status = SAM_STAT_GOOD;
307 target_complete_cmd(cmd, status);
311 static void iblock_bio_done(struct bio *bio)
313 struct se_cmd *cmd = bio->bi_private;
314 struct iblock_req *ibr = cmd->priv;
317 pr_err("bio error: %p, err: %d\n", bio, bio->bi_error);
319 * Bump the ib_bio_err_cnt and release bio.
321 atomic_inc(&ibr->ib_bio_err_cnt);
322 smp_mb__after_atomic();
327 iblock_complete_cmd(cmd);
331 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
333 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
337 * Only allocate as many vector entries as the bio code allows us to,
338 * we'll loop later on until we have handled the whole request.
340 if (sg_num > BIO_MAX_PAGES)
341 sg_num = BIO_MAX_PAGES;
343 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
345 pr_err("Unable to allocate memory for bio\n");
349 bio->bi_bdev = ib_dev->ibd_bd;
350 bio->bi_private = cmd;
351 bio->bi_end_io = &iblock_bio_done;
352 bio->bi_iter.bi_sector = lba;
357 static void iblock_submit_bios(struct bio_list *list, int rw)
359 struct blk_plug plug;
362 blk_start_plug(&plug);
363 while ((bio = bio_list_pop(list)))
365 blk_finish_plug(&plug);
368 static void iblock_end_io_flush(struct bio *bio)
370 struct se_cmd *cmd = bio->bi_private;
373 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
377 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
379 target_complete_cmd(cmd, SAM_STAT_GOOD);
386 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
387 * always flush the whole cache.
389 static sense_reason_t
390 iblock_execute_sync_cache(struct se_cmd *cmd)
392 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
393 int immed = (cmd->t_task_cdb[1] & 0x2);
397 * If the Immediate bit is set, queue up the GOOD response
398 * for this SYNCHRONIZE_CACHE op.
401 target_complete_cmd(cmd, SAM_STAT_GOOD);
403 bio = bio_alloc(GFP_KERNEL, 0);
404 bio->bi_end_io = iblock_end_io_flush;
405 bio->bi_bdev = ib_dev->ibd_bd;
407 bio->bi_private = cmd;
408 submit_bio(WRITE_FLUSH, bio);
412 static sense_reason_t
413 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
415 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
418 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
420 pr_err("blkdev_issue_discard() failed: %d\n", ret);
421 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
427 static sense_reason_t
428 iblock_execute_write_same(struct se_cmd *cmd)
430 struct iblock_req *ibr;
431 struct scatterlist *sg;
433 struct bio_list list;
434 sector_t block_lba = cmd->t_task_lba;
435 sector_t sectors = sbc_get_write_same_sectors(cmd);
438 pr_err("WRITE_SAME: Protection information with IBLOCK"
439 " backends not supported\n");
440 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
442 sg = &cmd->t_data_sg[0];
444 if (cmd->t_data_nents > 1 ||
445 sg->length != cmd->se_dev->dev_attrib.block_size) {
446 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
447 " block_size: %u\n", cmd->t_data_nents, sg->length,
448 cmd->se_dev->dev_attrib.block_size);
449 return TCM_INVALID_CDB_FIELD;
452 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
457 bio = iblock_get_bio(cmd, block_lba, 1);
461 bio_list_init(&list);
462 bio_list_add(&list, bio);
464 atomic_set(&ibr->pending, 1);
467 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
470 bio = iblock_get_bio(cmd, block_lba, 1);
474 atomic_inc(&ibr->pending);
475 bio_list_add(&list, bio);
478 /* Always in 512 byte units for Linux/Block */
479 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
483 iblock_submit_bios(&list, WRITE);
487 while ((bio = bio_list_pop(&list)))
492 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
496 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
499 static match_table_t tokens = {
500 {Opt_udev_path, "udev_path=%s"},
501 {Opt_readonly, "readonly=%d"},
502 {Opt_force, "force=%d"},
506 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
507 const char *page, ssize_t count)
509 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
510 char *orig, *ptr, *arg_p, *opts;
511 substring_t args[MAX_OPT_ARGS];
513 unsigned long tmp_readonly;
515 opts = kstrdup(page, GFP_KERNEL);
521 while ((ptr = strsep(&opts, ",\n")) != NULL) {
525 token = match_token(ptr, tokens, args);
528 if (ib_dev->ibd_bd) {
529 pr_err("Unable to set udev_path= while"
530 " ib_dev->ibd_bd exists\n");
534 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
535 SE_UDEV_PATH_LEN) == 0) {
539 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
540 ib_dev->ibd_udev_path);
541 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
544 arg_p = match_strdup(&args[0]);
549 ret = kstrtoul(arg_p, 0, &tmp_readonly);
552 pr_err("kstrtoul() failed for"
556 ib_dev->ibd_readonly = tmp_readonly;
557 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
568 return (!ret) ? count : ret;
571 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
573 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
574 struct block_device *bd = ib_dev->ibd_bd;
575 char buf[BDEVNAME_SIZE];
579 bl += sprintf(b + bl, "iBlock device: %s",
581 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
582 bl += sprintf(b + bl, " UDEV PATH: %s",
583 ib_dev->ibd_udev_path);
584 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
586 bl += sprintf(b + bl, " ");
588 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
589 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
590 "" : (bd->bd_holder == ib_dev) ?
591 "CLAIMED: IBLOCK" : "CLAIMED: OS");
593 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
600 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
602 struct se_device *dev = cmd->se_dev;
603 struct blk_integrity *bi;
604 struct bio_integrity_payload *bip;
605 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
606 struct scatterlist *sg;
609 bi = bdev_get_integrity(ib_dev->ibd_bd);
611 pr_err("Unable to locate bio_integrity\n");
615 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
617 pr_err("Unable to allocate bio_integrity_payload\n");
621 bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
623 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
625 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
626 (unsigned long long)bip->bip_iter.bi_sector);
628 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
630 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
632 if (rc != sg->length) {
633 pr_err("bio_integrity_add_page() failed; %d\n", rc);
637 pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
638 sg_page(sg), sg->length, sg->offset);
644 static sense_reason_t
645 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
646 enum dma_data_direction data_direction)
648 struct se_device *dev = cmd->se_dev;
649 struct iblock_req *ibr;
650 struct bio *bio, *bio_start;
651 struct bio_list list;
652 struct scatterlist *sg;
653 u32 sg_num = sgl_nents;
659 if (data_direction == DMA_TO_DEVICE) {
660 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
661 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
663 * Force writethrough using WRITE_FUA if a volatile write cache
664 * is not enabled, or if initiator set the Force Unit Access bit.
666 if (q->flush_flags & REQ_FUA) {
667 if (cmd->se_cmd_flags & SCF_FUA)
669 else if (!(q->flush_flags & REQ_FLUSH))
681 * Convert the blocksize advertised to the initiator to the 512 byte
682 * units unconditionally used by the Linux block layer.
684 if (dev->dev_attrib.block_size == 4096)
685 block_lba = (cmd->t_task_lba << 3);
686 else if (dev->dev_attrib.block_size == 2048)
687 block_lba = (cmd->t_task_lba << 2);
688 else if (dev->dev_attrib.block_size == 1024)
689 block_lba = (cmd->t_task_lba << 1);
690 else if (dev->dev_attrib.block_size == 512)
691 block_lba = cmd->t_task_lba;
693 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
694 " %u\n", dev->dev_attrib.block_size);
695 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
698 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
704 atomic_set(&ibr->pending, 1);
705 iblock_complete_cmd(cmd);
709 bio = iblock_get_bio(cmd, block_lba, sgl_nents);
714 bio_list_init(&list);
715 bio_list_add(&list, bio);
717 atomic_set(&ibr->pending, 2);
720 for_each_sg(sgl, sg, sgl_nents, i) {
722 * XXX: if the length the device accepts is shorter than the
723 * length of the S/G list entry this will cause and
724 * endless loop. Better hope no driver uses huge pages.
726 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
728 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
729 iblock_submit_bios(&list, rw);
733 bio = iblock_get_bio(cmd, block_lba, sg_num);
737 atomic_inc(&ibr->pending);
738 bio_list_add(&list, bio);
742 /* Always in 512 byte units for Linux/Block */
743 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
747 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
748 int rc = iblock_alloc_bip(cmd, bio_start);
753 iblock_submit_bios(&list, rw);
754 iblock_complete_cmd(cmd);
758 while ((bio = bio_list_pop(&list)))
763 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
766 static sector_t iblock_get_blocks(struct se_device *dev)
768 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
769 struct block_device *bd = ib_dev->ibd_bd;
770 struct request_queue *q = bdev_get_queue(bd);
772 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
775 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
777 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
778 struct block_device *bd = ib_dev->ibd_bd;
781 ret = bdev_alignment_offset(bd);
785 /* convert offset-bytes to offset-lbas */
786 return ret / bdev_logical_block_size(bd);
789 static unsigned int iblock_get_lbppbe(struct se_device *dev)
791 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
792 struct block_device *bd = ib_dev->ibd_bd;
793 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
795 return ilog2(logs_per_phys);
798 static unsigned int iblock_get_io_min(struct se_device *dev)
800 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
801 struct block_device *bd = ib_dev->ibd_bd;
803 return bdev_io_min(bd);
806 static unsigned int iblock_get_io_opt(struct se_device *dev)
808 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
809 struct block_device *bd = ib_dev->ibd_bd;
811 return bdev_io_opt(bd);
814 static struct sbc_ops iblock_sbc_ops = {
815 .execute_rw = iblock_execute_rw,
816 .execute_sync_cache = iblock_execute_sync_cache,
817 .execute_write_same = iblock_execute_write_same,
818 .execute_unmap = iblock_execute_unmap,
821 static sense_reason_t
822 iblock_parse_cdb(struct se_cmd *cmd)
824 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
827 static bool iblock_get_write_cache(struct se_device *dev)
829 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
830 struct block_device *bd = ib_dev->ibd_bd;
831 struct request_queue *q = bdev_get_queue(bd);
833 return q->flush_flags & REQ_FLUSH;
836 static const struct target_backend_ops iblock_ops = {
838 .inquiry_prod = "IBLOCK",
839 .inquiry_rev = IBLOCK_VERSION,
840 .owner = THIS_MODULE,
841 .attach_hba = iblock_attach_hba,
842 .detach_hba = iblock_detach_hba,
843 .alloc_device = iblock_alloc_device,
844 .configure_device = iblock_configure_device,
845 .free_device = iblock_free_device,
846 .parse_cdb = iblock_parse_cdb,
847 .set_configfs_dev_params = iblock_set_configfs_dev_params,
848 .show_configfs_dev_params = iblock_show_configfs_dev_params,
849 .get_device_type = sbc_get_device_type,
850 .get_blocks = iblock_get_blocks,
851 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
852 .get_lbppbe = iblock_get_lbppbe,
853 .get_io_min = iblock_get_io_min,
854 .get_io_opt = iblock_get_io_opt,
855 .get_write_cache = iblock_get_write_cache,
856 .tb_dev_attrib_attrs = sbc_attrib_attrs,
859 static int __init iblock_module_init(void)
861 return transport_backend_register(&iblock_ops);
864 static void __exit iblock_module_exit(void)
866 target_backend_unregister(&iblock_ops);
869 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
871 MODULE_LICENSE("GPL");
873 module_init(iblock_module_init);
874 module_exit(iblock_module_exit);