1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe I/O command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/blkdev.h>
8 #include <linux/blk-integrity.h>
9 #include <linux/memremap.h>
10 #include <linux/module.h>
13 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
15 /* Logical blocks per physical block, 0's based. */
16 const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
17 bdev_logical_block_size(bdev));
20 * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
21 * NAWUPF, and NACWU are defined for this namespace and should be
22 * used by the host for this namespace instead of the AWUN, AWUPF,
23 * and ACWU fields in the Identify Controller data structure. If
24 * any of these fields are zero that means that the corresponding
25 * field from the identify controller data structure should be used.
33 * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
34 * NOWS are defined for this namespace and should be used by
35 * the host for I/O optimization.
38 /* NPWG = Namespace Preferred Write Granularity. 0's based */
40 /* NPWA = Namespace Preferred Write Alignment. 0's based */
42 /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
43 id->npdg = to0based(bdev_discard_granularity(bdev) /
44 bdev_logical_block_size(bdev));
45 /* NPDG = Namespace Preferred Deallocate Alignment */
47 /* NOWS = Namespace Optimal Write Size */
48 id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
51 void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
54 blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
59 static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
61 struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
64 ns->metadata_size = bi->tuple_size;
65 if (bi->profile == &t10_pi_type1_crc)
66 ns->pi_type = NVME_NS_DPS_PI_TYPE1;
67 else if (bi->profile == &t10_pi_type3_crc)
68 ns->pi_type = NVME_NS_DPS_PI_TYPE3;
70 /* Unsupported metadata type */
71 ns->metadata_size = 0;
75 int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
80 * When buffered_io namespace attribute is enabled that means user want
81 * this block device to be used as a file, so block device can take
82 * an advantage of cache.
87 ns->bdev = blkdev_get_by_path(ns->device_path,
88 FMODE_READ | FMODE_WRITE, NULL);
89 if (IS_ERR(ns->bdev)) {
90 ret = PTR_ERR(ns->bdev);
91 if (ret != -ENOTBLK) {
92 pr_err("failed to open block device %s: (%ld)\n",
93 ns->device_path, PTR_ERR(ns->bdev));
98 ns->size = bdev_nr_bytes(ns->bdev);
99 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
102 ns->metadata_size = 0;
103 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
104 nvmet_bdev_ns_enable_integrity(ns);
106 if (bdev_is_zoned(ns->bdev)) {
107 if (!nvmet_bdev_zns_enable(ns)) {
108 nvmet_bdev_ns_disable(ns);
111 ns->csi = NVME_CSI_ZNS;
117 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
119 ns->size = bdev_nr_bytes(ns->bdev);
122 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
124 u16 status = NVME_SC_SUCCESS;
126 if (likely(blk_sts == BLK_STS_OK))
129 * Right now there exists M : 1 mapping between block layer error
130 * to the NVMe status code (see nvme_error_status()). For consistency,
131 * when we reverse map we use most appropriate NVMe Status code from
132 * the group of the NVMe staus codes used in the nvme_error_status().
136 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
137 req->error_loc = offsetof(struct nvme_rw_command, length);
140 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
141 req->error_loc = offsetof(struct nvme_rw_command, slba);
143 case BLK_STS_NOTSUPP:
144 req->error_loc = offsetof(struct nvme_common_command, opcode);
145 switch (req->cmd->common.opcode) {
147 case nvme_cmd_write_zeroes:
148 status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
151 status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
155 status = NVME_SC_ACCESS_DENIED;
156 req->error_loc = offsetof(struct nvme_rw_command, nsid);
160 status = NVME_SC_INTERNAL | NVME_SC_DNR;
161 req->error_loc = offsetof(struct nvme_common_command, opcode);
164 switch (req->cmd->common.opcode) {
167 req->error_slba = le64_to_cpu(req->cmd->rw.slba);
169 case nvme_cmd_write_zeroes:
171 le64_to_cpu(req->cmd->write_zeroes.slba);
179 static void nvmet_bio_done(struct bio *bio)
181 struct nvmet_req *req = bio->bi_private;
183 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
184 nvmet_req_bio_put(req, bio);
187 #ifdef CONFIG_BLK_DEV_INTEGRITY
188 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
189 struct sg_mapping_iter *miter)
191 struct blk_integrity *bi;
192 struct bio_integrity_payload *bip;
196 bi = bdev_get_integrity(req->ns->bdev);
198 pr_err("Unable to locate bio_integrity\n");
202 bip = bio_integrity_alloc(bio, GFP_NOIO,
203 bio_max_segs(req->metadata_sg_cnt));
205 pr_err("Unable to allocate bio_integrity_payload\n");
209 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
210 /* virtual start sector must be in integrity interval units */
211 bip_set_seed(bip, bio->bi_iter.bi_sector >>
212 (bi->interval_exp - SECTOR_SHIFT));
214 resid = bip->bip_iter.bi_size;
215 while (resid > 0 && sg_miter_next(miter)) {
216 len = min_t(size_t, miter->length, resid);
217 rc = bio_integrity_add_page(bio, miter->page, len,
218 offset_in_page(miter->addr));
219 if (unlikely(rc != len)) {
220 pr_err("bio_integrity_add_page() failed; %d\n", rc);
221 sg_miter_stop(miter);
226 if (len < miter->length)
227 miter->consumed -= miter->length - len;
229 sg_miter_stop(miter);
234 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
235 struct sg_mapping_iter *miter)
239 #endif /* CONFIG_BLK_DEV_INTEGRITY */
241 static void nvmet_bdev_execute_rw(struct nvmet_req *req)
243 unsigned int sg_cnt = req->sg_cnt;
245 struct scatterlist *sg;
246 struct blk_plug plug;
250 struct sg_mapping_iter prot_miter;
251 unsigned int iter_flags;
252 unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
254 if (!nvmet_check_transfer_len(req, total_len))
258 nvmet_req_complete(req, 0);
262 if (req->cmd->rw.opcode == nvme_cmd_write) {
263 opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
264 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
266 iter_flags = SG_MITER_TO_SG;
269 iter_flags = SG_MITER_FROM_SG;
272 if (is_pci_p2pdma_page(sg_page(req->sg)))
275 sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
277 if (nvmet_use_inline_bvec(req)) {
278 bio = &req->b.inline_bio;
279 bio_init(bio, req->ns->bdev, req->inline_bvec,
280 ARRAY_SIZE(req->inline_bvec), opf);
282 bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
285 bio->bi_iter.bi_sector = sector;
286 bio->bi_private = req;
287 bio->bi_end_io = nvmet_bio_done;
289 blk_start_plug(&plug);
290 if (req->metadata_len)
291 sg_miter_start(&prot_miter, req->metadata_sg,
292 req->metadata_sg_cnt, iter_flags);
294 for_each_sg(req->sg, sg, req->sg_cnt, i) {
295 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
297 struct bio *prev = bio;
299 if (req->metadata_len) {
300 rc = nvmet_bdev_alloc_bip(req, bio,
308 bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
310 bio->bi_iter.bi_sector = sector;
312 bio_chain(bio, prev);
316 sector += sg->length >> 9;
320 if (req->metadata_len) {
321 rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
329 blk_finish_plug(&plug);
332 static void nvmet_bdev_execute_flush(struct nvmet_req *req)
334 struct bio *bio = &req->b.inline_bio;
336 if (!bdev_write_cache(req->ns->bdev)) {
337 nvmet_req_complete(req, NVME_SC_SUCCESS);
341 if (!nvmet_check_transfer_len(req, 0))
344 bio_init(bio, req->ns->bdev, req->inline_bvec,
345 ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
346 bio->bi_private = req;
347 bio->bi_end_io = nvmet_bio_done;
352 u16 nvmet_bdev_flush(struct nvmet_req *req)
354 if (!bdev_write_cache(req->ns->bdev))
357 if (blkdev_issue_flush(req->ns->bdev))
358 return NVME_SC_INTERNAL | NVME_SC_DNR;
362 static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
363 struct nvme_dsm_range *range, struct bio **bio)
365 struct nvmet_ns *ns = req->ns;
368 ret = __blkdev_issue_discard(ns->bdev,
369 nvmet_lba_to_sect(ns, range->slba),
370 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
372 if (ret && ret != -EOPNOTSUPP) {
373 req->error_slba = le64_to_cpu(range->slba);
374 return errno_to_nvme_status(req, ret);
376 return NVME_SC_SUCCESS;
379 static void nvmet_bdev_execute_discard(struct nvmet_req *req)
381 struct nvme_dsm_range range;
382 struct bio *bio = NULL;
386 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
387 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
392 status = nvmet_bdev_discard_range(req, &range, &bio);
398 bio->bi_private = req;
399 bio->bi_end_io = nvmet_bio_done;
405 nvmet_req_complete(req, status);
409 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
411 if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
414 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
416 nvmet_bdev_execute_discard(req);
418 case NVME_DSMGMT_IDR:
419 case NVME_DSMGMT_IDW:
421 /* Not supported yet */
422 nvmet_req_complete(req, 0);
427 static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
429 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
430 struct bio *bio = NULL;
435 if (!nvmet_check_transfer_len(req, 0))
438 sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
439 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
440 (req->ns->blksize_shift - 9));
442 ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
443 GFP_KERNEL, &bio, 0);
445 bio->bi_private = req;
446 bio->bi_end_io = nvmet_bio_done;
449 nvmet_req_complete(req, errno_to_nvme_status(req, ret));
453 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
455 switch (req->cmd->common.opcode) {
458 req->execute = nvmet_bdev_execute_rw;
459 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
460 req->metadata_len = nvmet_rw_metadata_len(req);
463 req->execute = nvmet_bdev_execute_flush;
466 req->execute = nvmet_bdev_execute_dsm;
468 case nvme_cmd_write_zeroes:
469 req->execute = nvmet_bdev_execute_write_zeroes;
472 return nvmet_report_invalid_opcode(req);