#define PART_BITS 4
#define VQ_NAME_LEN 16
+ #define MAX_DISCARD_SEGMENTS 256u
static int major;
static DEFINE_IDA(vd_index_ida);
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
+ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
+ {
+ unsigned short segments = blk_rq_nr_discard_segments(req);
+ unsigned short n = 0;
+ struct virtio_blk_discard_write_zeroes *range;
+ struct bio *bio;
+ u32 flags = 0;
+
+ if (unmap)
+ flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
+
+ range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
+ if (!range)
+ return -ENOMEM;
+
+ __rq_for_each_bio(bio, req) {
+ u64 sector = bio->bi_iter.bi_sector;
+ u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+
+ range[n].flags = cpu_to_le32(flags);
+ range[n].num_sectors = cpu_to_le32(num_sectors);
+ range[n].sector = cpu_to_le64(sector);
+ n++;
+ }
+
+ req->special_vec.bv_page = virt_to_page(range);
+ req->special_vec.bv_offset = offset_in_page(range);
+ req->special_vec.bv_len = sizeof(*range) * segments;
+ req->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+ return 0;
+ }
+
static inline void virtblk_request_done(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ kfree(page_address(req->special_vec.bv_page) +
+ req->special_vec.bv_offset);
+ }
+
switch (req_op(req)) {
case REQ_OP_SCSI_IN:
case REQ_OP_SCSI_OUT:
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
+static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+ bool kick;
+
+ spin_lock_irq(&vq->lock);
+ kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irq(&vq->lock);
+
+ if (kick)
+ virtqueue_notify(vq->vq);
+}
+
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
int qid = hctx->queue_num;
int err;
bool notify = false;
+ bool unmap = false;
u32 type;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
case REQ_OP_FLUSH:
type = VIRTIO_BLK_T_FLUSH;
break;
+ case REQ_OP_DISCARD:
+ type = VIRTIO_BLK_T_DISCARD;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ type = VIRTIO_BLK_T_WRITE_ZEROES;
+ unmap = !(req->cmd_flags & REQ_NOUNMAP);
+ break;
case REQ_OP_SCSI_IN:
case REQ_OP_SCSI_OUT:
type = VIRTIO_BLK_T_SCSI_CMD;
blk_mq_start_request(req);
+ if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ err = virtblk_setup_discard_write_zeroes(req, unmap);
+ if (err)
+ return BLK_STS_RESOURCE;
+ }
+
num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
if (num) {
if (rq_data_dir(req) == WRITE)
{
struct virtio_blk *vblk = set->driver_data;
- return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
+ return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
}
#ifdef CONFIG_VIRTIO_BLK_SCSI
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
+ .commit_rqs = virtio_commit_rqs,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
#ifdef CONFIG_VIRTIO_BLK_SCSI
if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size);
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
+ q->limits.discard_granularity = blk_size;
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ discard_sector_alignment, &v);
+ q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ max_discard_sectors, &v);
+ blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
+
+ virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
+ &v);
+ blk_queue_max_discard_segments(q,
+ min_not_zero(v,
+ MAX_DISCARD_SEGMENTS));
+
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
+ virtio_cread(vdev, struct virtio_blk_config,
+ max_write_zeroes_sectors, &v);
+ blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
+ }
+
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
VIRTIO_BLK_F_SCSI,
#endif
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
- VIRTIO_BLK_F_MQ,
+ VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
}
;
static unsigned int features[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
- VIRTIO_BLK_F_MQ,
+ VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
};
static struct virtio_driver virtio_blk = {
return 0;
}
-static char *vhost_scsi_get_fabric_name(void)
-{
- return "vhost";
-}
-
static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
&vc->out_iter))) {
- vq_err(vq, "Faulted on copy_from_iter\n");
+ vq_err(vq, "Faulted on copy_from_iter_full\n");
} else if (unlikely(*vc->lunp != 1)) {
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
se_tpg = &tpg->se_tpg;
ret = target_depend_item(&se_tpg->tpg_group.cg_item);
if (ret) {
- pr_warn("configfs_depend_item() failed: %d\n", ret);
+ pr_warn("target_depend_item() failed: %d\n", ret);
kfree(vs_tpg);
mutex_unlock(&tpg->tv_tpg_mutex);
goto out;
static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
- .name = "vhost",
- .get_fabric_name = vhost_scsi_get_fabric_name,
+ .fabric_name = "vhost",
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
.tpg_get_tag = vhost_scsi_get_tpgt,
.tpg_check_demo_mode = vhost_scsi_check_true,