]> Git Repo - linux.git/blob - drivers/nvme/target/admin-cmd.c
Linux 6.14-rc3
[linux.git] / drivers / nvme / target / admin-cmd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe admin command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
9 #include <linux/part_stat.h>
10
11 #include <generated/utsrelease.h>
12 #include <linux/unaligned.h>
13 #include "nvmet.h"
14
15 static void nvmet_execute_delete_sq(struct nvmet_req *req)
16 {
17         struct nvmet_ctrl *ctrl = req->sq->ctrl;
18         u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
19         u16 status;
20
21         if (!nvmet_is_pci_ctrl(ctrl)) {
22                 status = nvmet_report_invalid_opcode(req);
23                 goto complete;
24         }
25
26         if (!sqid) {
27                 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
28                 goto complete;
29         }
30
31         status = nvmet_check_sqid(ctrl, sqid, false);
32         if (status != NVME_SC_SUCCESS)
33                 goto complete;
34
35         status = ctrl->ops->delete_sq(ctrl, sqid);
36
37 complete:
38         nvmet_req_complete(req, status);
39 }
40
41 static void nvmet_execute_create_sq(struct nvmet_req *req)
42 {
43         struct nvmet_ctrl *ctrl = req->sq->ctrl;
44         struct nvme_command *cmd = req->cmd;
45         u16 sqid = le16_to_cpu(cmd->create_sq.sqid);
46         u16 cqid = le16_to_cpu(cmd->create_sq.cqid);
47         u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags);
48         u16 qsize = le16_to_cpu(cmd->create_sq.qsize);
49         u64 prp1 = le64_to_cpu(cmd->create_sq.prp1);
50         u16 status;
51
52         if (!nvmet_is_pci_ctrl(ctrl)) {
53                 status = nvmet_report_invalid_opcode(req);
54                 goto complete;
55         }
56
57         if (!sqid) {
58                 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
59                 goto complete;
60         }
61
62         status = nvmet_check_sqid(ctrl, sqid, true);
63         if (status != NVME_SC_SUCCESS)
64                 goto complete;
65
66         /*
67          * Note: The NVMe specification allows multiple SQs to use the same CQ.
68          * However, the target code does not really support that. So for now,
69          * prevent this and fail the command if sqid and cqid are different.
70          */
71         if (!cqid || cqid != sqid) {
72                 pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid);
73                 status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR;
74                 goto complete;
75         }
76
77         if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
78                 status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
79                 goto complete;
80         }
81
82         status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1);
83
84 complete:
85         nvmet_req_complete(req, status);
86 }
87
88 static void nvmet_execute_delete_cq(struct nvmet_req *req)
89 {
90         struct nvmet_ctrl *ctrl = req->sq->ctrl;
91         u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
92         u16 status;
93
94         if (!nvmet_is_pci_ctrl(ctrl)) {
95                 status = nvmet_report_invalid_opcode(req);
96                 goto complete;
97         }
98
99         if (!cqid) {
100                 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
101                 goto complete;
102         }
103
104         status = nvmet_check_cqid(ctrl, cqid);
105         if (status != NVME_SC_SUCCESS)
106                 goto complete;
107
108         status = ctrl->ops->delete_cq(ctrl, cqid);
109
110 complete:
111         nvmet_req_complete(req, status);
112 }
113
114 static void nvmet_execute_create_cq(struct nvmet_req *req)
115 {
116         struct nvmet_ctrl *ctrl = req->sq->ctrl;
117         struct nvme_command *cmd = req->cmd;
118         u16 cqid = le16_to_cpu(cmd->create_cq.cqid);
119         u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags);
120         u16 qsize = le16_to_cpu(cmd->create_cq.qsize);
121         u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector);
122         u64 prp1 = le64_to_cpu(cmd->create_cq.prp1);
123         u16 status;
124
125         if (!nvmet_is_pci_ctrl(ctrl)) {
126                 status = nvmet_report_invalid_opcode(req);
127                 goto complete;
128         }
129
130         if (!cqid) {
131                 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
132                 goto complete;
133         }
134
135         status = nvmet_check_cqid(ctrl, cqid);
136         if (status != NVME_SC_SUCCESS)
137                 goto complete;
138
139         if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
140                 status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
141                 goto complete;
142         }
143
144         status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize,
145                                       prp1, irq_vector);
146
147 complete:
148         nvmet_req_complete(req, status);
149 }
150
151 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
152 {
153         u32 len = le16_to_cpu(cmd->get_log_page.numdu);
154
155         len <<= 16;
156         len += le16_to_cpu(cmd->get_log_page.numdl);
157         /* NUMD is a 0's based value */
158         len += 1;
159         len *= sizeof(u32);
160
161         return len;
162 }
163
164 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
165 {
166         switch (cdw10 & 0xff) {
167         case NVME_FEAT_HOST_ID:
168                 return sizeof(req->sq->ctrl->hostid);
169         default:
170                 return 0;
171         }
172 }
173
174 u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
175 {
176         return le64_to_cpu(cmd->get_log_page.lpo);
177 }
178
179 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
180 {
181         nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
182 }
183
184 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
185 {
186         struct nvmet_ctrl *ctrl = req->sq->ctrl;
187         unsigned long flags;
188         off_t offset = 0;
189         u64 slot;
190         u64 i;
191
192         spin_lock_irqsave(&ctrl->error_lock, flags);
193         slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
194
195         for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
196                 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
197                                 sizeof(struct nvme_error_slot)))
198                         break;
199
200                 if (slot == 0)
201                         slot = NVMET_ERROR_LOG_SLOTS - 1;
202                 else
203                         slot--;
204                 offset += sizeof(struct nvme_error_slot);
205         }
206         spin_unlock_irqrestore(&ctrl->error_lock, flags);
207         nvmet_req_complete(req, 0);
208 }
209
210 static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
211 {
212         struct nvme_supported_log *logs;
213         u16 status;
214
215         logs = kzalloc(sizeof(*logs), GFP_KERNEL);
216         if (!logs) {
217                 status = NVME_SC_INTERNAL;
218                 goto out;
219         }
220
221         logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
222         logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
223         logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
224         logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
225         logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
226         logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
227         logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
228         logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
229         logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
230         logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
231         logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
232
233         status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
234         kfree(logs);
235 out:
236         nvmet_req_complete(req, status);
237 }
238
239 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
240                 struct nvme_smart_log *slog)
241 {
242         u64 host_reads, host_writes, data_units_read, data_units_written;
243         u16 status;
244
245         status = nvmet_req_find_ns(req);
246         if (status)
247                 return status;
248
249         /* we don't have the right data for file backed ns */
250         if (!req->ns->bdev)
251                 return NVME_SC_SUCCESS;
252
253         host_reads = part_stat_read(req->ns->bdev, ios[READ]);
254         data_units_read =
255                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
256         host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
257         data_units_written =
258                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
259
260         put_unaligned_le64(host_reads, &slog->host_reads[0]);
261         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
262         put_unaligned_le64(host_writes, &slog->host_writes[0]);
263         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
264
265         return NVME_SC_SUCCESS;
266 }
267
268 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
269                 struct nvme_smart_log *slog)
270 {
271         u64 host_reads = 0, host_writes = 0;
272         u64 data_units_read = 0, data_units_written = 0;
273         struct nvmet_ns *ns;
274         struct nvmet_ctrl *ctrl;
275         unsigned long idx;
276
277         ctrl = req->sq->ctrl;
278         nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
279                 /* we don't have the right data for file backed ns */
280                 if (!ns->bdev)
281                         continue;
282                 host_reads += part_stat_read(ns->bdev, ios[READ]);
283                 data_units_read += DIV_ROUND_UP(
284                         part_stat_read(ns->bdev, sectors[READ]), 1000);
285                 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
286                 data_units_written += DIV_ROUND_UP(
287                         part_stat_read(ns->bdev, sectors[WRITE]), 1000);
288         }
289
290         put_unaligned_le64(host_reads, &slog->host_reads[0]);
291         put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
292         put_unaligned_le64(host_writes, &slog->host_writes[0]);
293         put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
294
295         return NVME_SC_SUCCESS;
296 }
297
298 static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
299 {
300         struct nvme_rotational_media_log *log;
301         struct gendisk *disk;
302         u16 status;
303
304         req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
305                                             req->cmd->get_log_page.lsi));
306         status = nvmet_req_find_ns(req);
307         if (status)
308                 goto out;
309
310         if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) {
311                 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
312                 goto out;
313         }
314
315         if (req->transfer_len != sizeof(*log)) {
316                 status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
317                 goto out;
318         }
319
320         log = kzalloc(sizeof(*log), GFP_KERNEL);
321         if (!log)
322                 goto out;
323
324         log->endgid = req->cmd->get_log_page.lsi;
325         disk = req->ns->bdev->bd_disk;
326         if (disk && disk->ia_ranges)
327                 log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
328         else
329                 log->numa = cpu_to_le16(1);
330
331         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
332         kfree(log);
333 out:
334         nvmet_req_complete(req, status);
335 }
336
337 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
338 {
339         struct nvme_smart_log *log;
340         u16 status = NVME_SC_INTERNAL;
341         unsigned long flags;
342
343         if (req->transfer_len != sizeof(*log))
344                 goto out;
345
346         log = kzalloc(sizeof(*log), GFP_KERNEL);
347         if (!log)
348                 goto out;
349
350         if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
351                 status = nvmet_get_smart_log_all(req, log);
352         else
353                 status = nvmet_get_smart_log_nsid(req, log);
354         if (status)
355                 goto out_free_log;
356
357         spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
358         put_unaligned_le64(req->sq->ctrl->err_counter,
359                         &log->num_err_log_entries);
360         spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
361
362         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
363 out_free_log:
364         kfree(log);
365 out:
366         nvmet_req_complete(req, status);
367 }
368
369 static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
370                                         struct nvme_effects_log *log)
371 {
372         /* For a PCI target controller, advertize support for the . */
373         if (nvmet_is_pci_ctrl(ctrl)) {
374                 log->acs[nvme_admin_delete_sq] =
375                 log->acs[nvme_admin_create_sq] =
376                 log->acs[nvme_admin_delete_cq] =
377                 log->acs[nvme_admin_create_cq] =
378                         cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
379         }
380
381         log->acs[nvme_admin_get_log_page] =
382         log->acs[nvme_admin_identify] =
383         log->acs[nvme_admin_abort_cmd] =
384         log->acs[nvme_admin_set_features] =
385         log->acs[nvme_admin_get_features] =
386         log->acs[nvme_admin_async_event] =
387         log->acs[nvme_admin_keep_alive] =
388                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
389 }
390
391 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
392 {
393         log->iocs[nvme_cmd_read] =
394         log->iocs[nvme_cmd_flush] =
395         log->iocs[nvme_cmd_dsm] =
396         log->iocs[nvme_cmd_resv_acquire] =
397         log->iocs[nvme_cmd_resv_register] =
398         log->iocs[nvme_cmd_resv_release] =
399         log->iocs[nvme_cmd_resv_report] =
400                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
401         log->iocs[nvme_cmd_write] =
402         log->iocs[nvme_cmd_write_zeroes] =
403                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
404 }
405
406 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
407 {
408         log->iocs[nvme_cmd_zone_append] =
409         log->iocs[nvme_cmd_zone_mgmt_send] =
410                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
411         log->iocs[nvme_cmd_zone_mgmt_recv] =
412                 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
413 }
414
415 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
416 {
417         struct nvmet_ctrl *ctrl = req->sq->ctrl;
418         struct nvme_effects_log *log;
419         u16 status = NVME_SC_SUCCESS;
420
421         log = kzalloc(sizeof(*log), GFP_KERNEL);
422         if (!log) {
423                 status = NVME_SC_INTERNAL;
424                 goto out;
425         }
426
427         switch (req->cmd->get_log_page.csi) {
428         case NVME_CSI_NVM:
429                 nvmet_get_cmd_effects_admin(ctrl, log);
430                 nvmet_get_cmd_effects_nvm(log);
431                 break;
432         case NVME_CSI_ZNS:
433                 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
434                         status = NVME_SC_INVALID_IO_CMD_SET;
435                         goto free;
436                 }
437                 nvmet_get_cmd_effects_admin(ctrl, log);
438                 nvmet_get_cmd_effects_nvm(log);
439                 nvmet_get_cmd_effects_zns(log);
440                 break;
441         default:
442                 status = NVME_SC_INVALID_LOG_PAGE;
443                 goto free;
444         }
445
446         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
447 free:
448         kfree(log);
449 out:
450         nvmet_req_complete(req, status);
451 }
452
453 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
454 {
455         struct nvmet_ctrl *ctrl = req->sq->ctrl;
456         u16 status = NVME_SC_INTERNAL;
457         size_t len;
458
459         if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
460                 goto out;
461
462         mutex_lock(&ctrl->lock);
463         if (ctrl->nr_changed_ns == U32_MAX)
464                 len = sizeof(__le32);
465         else
466                 len = ctrl->nr_changed_ns * sizeof(__le32);
467         status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
468         if (!status)
469                 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
470         ctrl->nr_changed_ns = 0;
471         nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
472         mutex_unlock(&ctrl->lock);
473 out:
474         nvmet_req_complete(req, status);
475 }
476
477 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
478                 struct nvme_ana_group_desc *desc)
479 {
480         struct nvmet_ctrl *ctrl = req->sq->ctrl;
481         struct nvmet_ns *ns;
482         unsigned long idx;
483         u32 count = 0;
484
485         if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
486                 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
487                         if (ns->anagrpid == grpid)
488                                 desc->nsids[count++] = cpu_to_le32(ns->nsid);
489                 }
490         }
491
492         desc->grpid = cpu_to_le32(grpid);
493         desc->nnsids = cpu_to_le32(count);
494         desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
495         desc->state = req->port->ana_state[grpid];
496         memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
497         return struct_size(desc, nsids, count);
498 }
499
500 static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
501 {
502         u64 host_reads, host_writes, data_units_read, data_units_written;
503         struct nvme_endurance_group_log *log;
504         u16 status;
505
506         /*
507          * The target driver emulates each endurance group as its own
508          * namespace, reusing the nsid as the endurance group identifier.
509          */
510         req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
511                                             req->cmd->get_log_page.lsi));
512         status = nvmet_req_find_ns(req);
513         if (status)
514                 goto out;
515
516         log = kzalloc(sizeof(*log), GFP_KERNEL);
517         if (!log) {
518                 status = NVME_SC_INTERNAL;
519                 goto out;
520         }
521
522         if (!req->ns->bdev)
523                 goto copy;
524
525         host_reads = part_stat_read(req->ns->bdev, ios[READ]);
526         data_units_read =
527                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
528         host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
529         data_units_written =
530                 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
531
532         put_unaligned_le64(host_reads, &log->hrc[0]);
533         put_unaligned_le64(data_units_read, &log->dur[0]);
534         put_unaligned_le64(host_writes, &log->hwc[0]);
535         put_unaligned_le64(data_units_written, &log->duw[0]);
536 copy:
537         status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
538         kfree(log);
539 out:
540         nvmet_req_complete(req, status);
541 }
542
543 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
544 {
545         struct nvme_ana_rsp_hdr hdr = { 0, };
546         struct nvme_ana_group_desc *desc;
547         size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
548         size_t len;
549         u32 grpid;
550         u16 ngrps = 0;
551         u16 status;
552
553         status = NVME_SC_INTERNAL;
554         desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
555                        GFP_KERNEL);
556         if (!desc)
557                 goto out;
558
559         down_read(&nvmet_ana_sem);
560         for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
561                 if (!nvmet_ana_group_enabled[grpid])
562                         continue;
563                 len = nvmet_format_ana_group(req, grpid, desc);
564                 status = nvmet_copy_to_sgl(req, offset, desc, len);
565                 if (status)
566                         break;
567                 offset += len;
568                 ngrps++;
569         }
570         for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
571                 if (nvmet_ana_group_enabled[grpid])
572                         ngrps++;
573         }
574
575         hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
576         hdr.ngrps = cpu_to_le16(ngrps);
577         nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
578         up_read(&nvmet_ana_sem);
579
580         kfree(desc);
581
582         /* copy the header last once we know the number of groups */
583         status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
584 out:
585         nvmet_req_complete(req, status);
586 }
587
588 static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
589 {
590         struct nvme_supported_features_log *features;
591         u16 status;
592
593         features = kzalloc(sizeof(*features), GFP_KERNEL);
594         if (!features) {
595                 status = NVME_SC_INTERNAL;
596                 goto out;
597         }
598
599         features->fis[NVME_FEAT_NUM_QUEUES] =
600                 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
601         features->fis[NVME_FEAT_KATO] =
602                 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
603         features->fis[NVME_FEAT_ASYNC_EVENT] =
604                 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
605         features->fis[NVME_FEAT_HOST_ID] =
606                 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
607         features->fis[NVME_FEAT_WRITE_PROTECT] =
608                 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
609         features->fis[NVME_FEAT_RESV_MASK] =
610                 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
611
612         status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
613         kfree(features);
614 out:
615         nvmet_req_complete(req, status);
616 }
617
618 static void nvmet_execute_get_log_page(struct nvmet_req *req)
619 {
620         if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
621                 return;
622
623         switch (req->cmd->get_log_page.lid) {
624         case NVME_LOG_SUPPORTED:
625                 return nvmet_execute_get_supported_log_pages(req);
626         case NVME_LOG_ERROR:
627                 return nvmet_execute_get_log_page_error(req);
628         case NVME_LOG_SMART:
629                 return nvmet_execute_get_log_page_smart(req);
630         case NVME_LOG_FW_SLOT:
631                 /*
632                  * We only support a single firmware slot which always is
633                  * active, so we can zero out the whole firmware slot log and
634                  * still claim to fully implement this mandatory log page.
635                  */
636                 return nvmet_execute_get_log_page_noop(req);
637         case NVME_LOG_CHANGED_NS:
638                 return nvmet_execute_get_log_changed_ns(req);
639         case NVME_LOG_CMD_EFFECTS:
640                 return nvmet_execute_get_log_cmd_effects_ns(req);
641         case NVME_LOG_ENDURANCE_GROUP:
642                 return nvmet_execute_get_log_page_endgrp(req);
643         case NVME_LOG_ANA:
644                 return nvmet_execute_get_log_page_ana(req);
645         case NVME_LOG_FEATURES:
646                 return nvmet_execute_get_log_page_features(req);
647         case NVME_LOG_RMI:
648                 return nvmet_execute_get_log_page_rmi(req);
649         case NVME_LOG_RESERVATION:
650                 return nvmet_execute_get_log_page_resv(req);
651         }
652         pr_debug("unhandled lid %d on qid %d\n",
653                req->cmd->get_log_page.lid, req->sq->qid);
654         req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
655         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
656 }
657
658 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
659 {
660         struct nvmet_ctrl *ctrl = req->sq->ctrl;
661         struct nvmet_subsys *subsys = ctrl->subsys;
662         struct nvme_id_ctrl *id;
663         u32 cmd_capsule_size, ctratt;
664         u16 status = 0;
665
666         if (!subsys->subsys_discovered) {
667                 mutex_lock(&subsys->lock);
668                 subsys->subsys_discovered = true;
669                 mutex_unlock(&subsys->lock);
670         }
671
672         id = kzalloc(sizeof(*id), GFP_KERNEL);
673         if (!id) {
674                 status = NVME_SC_INTERNAL;
675                 goto out;
676         }
677
678         id->vid = cpu_to_le16(subsys->vendor_id);
679         id->ssvid = cpu_to_le16(subsys->subsys_vendor_id);
680
681         memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
682         memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
683                        strlen(subsys->model_number), ' ');
684         memcpy_and_pad(id->fr, sizeof(id->fr),
685                        subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
686
687         put_unaligned_le24(subsys->ieee_oui, id->ieee);
688
689         id->rab = 6;
690
691         if (nvmet_is_disc_subsys(ctrl->subsys))
692                 id->cntrltype = NVME_CTRL_DISC;
693         else
694                 id->cntrltype = NVME_CTRL_IO;
695
696         /* we support multiple ports, multiples hosts and ANA: */
697         id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
698                 NVME_CTRL_CMIC_ANA;
699
700         /* Limit MDTS according to transport capability */
701         if (ctrl->ops->get_mdts)
702                 id->mdts = ctrl->ops->get_mdts(ctrl);
703         else
704                 id->mdts = 0;
705
706         id->cntlid = cpu_to_le16(ctrl->cntlid);
707         id->ver = cpu_to_le32(ctrl->subsys->ver);
708
709         /* XXX: figure out what to do about RTD3R/RTD3 */
710         id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
711         ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS;
712         if (nvmet_is_pci_ctrl(ctrl))
713                 ctratt |= NVME_CTRL_ATTR_RHII;
714         id->ctratt = cpu_to_le32(ctratt);
715
716         id->oacs = 0;
717
718         /*
719          * We don't really have a practical limit on the number of abort
720          * comands.  But we don't do anything useful for abort either, so
721          * no point in allowing more abort commands than the spec requires.
722          */
723         id->acl = 3;
724
725         id->aerl = NVMET_ASYNC_EVENTS - 1;
726
727         /* first slot is read-only, only one slot supported */
728         id->frmw = (1 << 0) | (1 << 1);
729         id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
730         id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
731         id->npss = 0;
732
733         /* We support keep-alive timeout in granularity of seconds */
734         id->kas = cpu_to_le16(NVMET_KAS);
735
736         id->sqes = (0x6 << 4) | 0x6;
737         id->cqes = (0x4 << 4) | 0x4;
738
739         /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
740         id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
741
742         id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
743         id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
744         id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
745                         NVME_CTRL_ONCS_WRITE_ZEROES |
746                         NVME_CTRL_ONCS_RESERVATIONS);
747
748         /* XXX: don't report vwc if the underlying device is write through */
749         id->vwc = NVME_CTRL_VWC_PRESENT;
750
751         /*
752          * We can't support atomic writes bigger than a LBA without support
753          * from the backend device.
754          */
755         id->awun = 0;
756         id->awupf = 0;
757
758         /* we always support SGLs */
759         id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED);
760         if (ctrl->ops->flags & NVMF_KEYED_SGLS)
761                 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS);
762         if (req->port->inline_data_size)
763                 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS);
764
765         strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
766
767         /*
768          * Max command capsule size is sqe + in-capsule data size.
769          * Disable in-capsule data for Metadata capable controllers.
770          */
771         cmd_capsule_size = sizeof(struct nvme_command);
772         if (!ctrl->pi_support)
773                 cmd_capsule_size += req->port->inline_data_size;
774         id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
775
776         /* Max response capsule size is cqe */
777         id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
778
779         id->msdbd = ctrl->ops->msdbd;
780
781         /*
782          * Endurance group identifier is 16 bits, so we can't let namespaces
783          * overflow that since we reuse the nsid
784          */
785         BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
786         id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
787
788         id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
789         id->anatt = 10; /* random value */
790         id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
791         id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
792
793         /*
794          * Meh, we don't really support any power state.  Fake up the same
795          * values that qemu does.
796          */
797         id->psd[0].max_power = cpu_to_le16(0x9c4);
798         id->psd[0].entry_lat = cpu_to_le32(0x10);
799         id->psd[0].exit_lat = cpu_to_le32(0x4);
800
801         id->nwpc = 1 << 0; /* write protect and no write protect */
802
803         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
804
805         kfree(id);
806 out:
807         nvmet_req_complete(req, status);
808 }
809
810 static void nvmet_execute_identify_ns(struct nvmet_req *req)
811 {
812         struct nvme_id_ns *id;
813         u16 status;
814
815         if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
816                 req->error_loc = offsetof(struct nvme_identify, nsid);
817                 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
818                 goto out;
819         }
820
821         id = kzalloc(sizeof(*id), GFP_KERNEL);
822         if (!id) {
823                 status = NVME_SC_INTERNAL;
824                 goto out;
825         }
826
827         /* return an all zeroed buffer if we can't find an active namespace */
828         status = nvmet_req_find_ns(req);
829         if (status) {
830                 status = 0;
831                 goto done;
832         }
833
834         if (nvmet_ns_revalidate(req->ns)) {
835                 mutex_lock(&req->ns->subsys->lock);
836                 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
837                 mutex_unlock(&req->ns->subsys->lock);
838         }
839
840         /*
841          * nuse = ncap = nsze isn't always true, but we have no way to find
842          * that out from the underlying device.
843          */
844         id->ncap = id->nsze =
845                 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
846         switch (req->port->ana_state[req->ns->anagrpid]) {
847         case NVME_ANA_INACCESSIBLE:
848         case NVME_ANA_PERSISTENT_LOSS:
849                 break;
850         default:
851                 id->nuse = id->nsze;
852                 break;
853         }
854
855         if (req->ns->bdev)
856                 nvmet_bdev_set_limits(req->ns->bdev, id);
857
858         /*
859          * We just provide a single LBA format that matches what the
860          * underlying device reports.
861          */
862         id->nlbaf = 0;
863         id->flbas = 0;
864
865         /*
866          * Our namespace might always be shared.  Not just with other
867          * controllers, but also with any other user of the block device.
868          */
869         id->nmic = NVME_NS_NMIC_SHARED;
870         id->anagrpid = cpu_to_le32(req->ns->anagrpid);
871
872         if (req->ns->pr.enable)
873                 id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
874                         NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
875                         NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
876                         NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
877                         NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
878                         NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
879                         NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
880
881         /*
882          * Since we don't know any better, every namespace is its own endurance
883          * group.
884          */
885         id->endgid = cpu_to_le16(req->ns->nsid);
886
887         memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
888
889         id->lbaf[0].ds = req->ns->blksize_shift;
890
891         if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
892                 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
893                           NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
894                           NVME_NS_DPC_PI_TYPE3;
895                 id->mc = NVME_MC_EXTENDED_LBA;
896                 id->dps = req->ns->pi_type;
897                 id->flbas = NVME_NS_FLBAS_META_EXT;
898                 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
899         }
900
901         if (req->ns->readonly)
902                 id->nsattr |= NVME_NS_ATTR_RO;
903 done:
904         if (!status)
905                 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
906
907         kfree(id);
908 out:
909         nvmet_req_complete(req, status);
910 }
911
912 static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
913 {
914         u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
915         static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
916         struct nvmet_ctrl *ctrl = req->sq->ctrl;
917         struct nvmet_ns *ns;
918         unsigned long idx;
919         __le16 *list;
920         u16 status;
921         int i = 1;
922
923         list = kzalloc(buf_size, GFP_KERNEL);
924         if (!list) {
925                 status = NVME_SC_INTERNAL;
926                 goto out;
927         }
928
929         nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
930                 if (ns->nsid <= min_endgid)
931                         continue;
932
933                 list[i++] = cpu_to_le16(ns->nsid);
934                 if (i == buf_size / sizeof(__le16))
935                         break;
936         }
937
938         list[0] = cpu_to_le16(i - 1);
939         status = nvmet_copy_to_sgl(req, 0, list, buf_size);
940         kfree(list);
941 out:
942         nvmet_req_complete(req, status);
943 }
944
945 static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
946 {
947         static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
948         struct nvmet_ctrl *ctrl = req->sq->ctrl;
949         struct nvmet_ns *ns;
950         unsigned long idx;
951         u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
952         __le32 *list;
953         u16 status = 0;
954         int i = 0;
955
956         /*
957          * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
958          * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
959          */
960         if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
961                 req->error_loc = offsetof(struct nvme_identify, nsid);
962                 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
963                 goto out;
964         }
965
966         list = kzalloc(buf_size, GFP_KERNEL);
967         if (!list) {
968                 status = NVME_SC_INTERNAL;
969                 goto out;
970         }
971
972         nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
973                 if (ns->nsid <= min_nsid)
974                         continue;
975                 if (match_css && req->ns->csi != req->cmd->identify.csi)
976                         continue;
977                 list[i++] = cpu_to_le32(ns->nsid);
978                 if (i == buf_size / sizeof(__le32))
979                         break;
980         }
981
982         status = nvmet_copy_to_sgl(req, 0, list, buf_size);
983
984         kfree(list);
985 out:
986         nvmet_req_complete(req, status);
987 }
988
989 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
990                                     void *id, off_t *off)
991 {
992         struct nvme_ns_id_desc desc = {
993                 .nidt = type,
994                 .nidl = len,
995         };
996         u16 status;
997
998         status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
999         if (status)
1000                 return status;
1001         *off += sizeof(desc);
1002
1003         status = nvmet_copy_to_sgl(req, *off, id, len);
1004         if (status)
1005                 return status;
1006         *off += len;
1007
1008         return 0;
1009 }
1010
1011 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
1012 {
1013         off_t off = 0;
1014         u16 status;
1015
1016         status = nvmet_req_find_ns(req);
1017         if (status)
1018                 goto out;
1019
1020         if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
1021                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
1022                                                   NVME_NIDT_UUID_LEN,
1023                                                   &req->ns->uuid, &off);
1024                 if (status)
1025                         goto out;
1026         }
1027         if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
1028                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
1029                                                   NVME_NIDT_NGUID_LEN,
1030                                                   &req->ns->nguid, &off);
1031                 if (status)
1032                         goto out;
1033         }
1034
1035         status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
1036                                           NVME_NIDT_CSI_LEN,
1037                                           &req->ns->csi, &off);
1038         if (status)
1039                 goto out;
1040
1041         if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
1042                         off) != NVME_IDENTIFY_DATA_SIZE - off)
1043                 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1044
1045 out:
1046         nvmet_req_complete(req, status);
1047 }
1048
1049 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
1050 {
1051         /* Not supported: return zeroes */
1052         nvmet_req_complete(req,
1053                    nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
1054 }
1055
1056 static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
1057 {
1058         u16 status;
1059         struct nvme_id_ns_nvm *id;
1060
1061         status = nvmet_req_find_ns(req);
1062         if (status)
1063                 goto out;
1064
1065         id = kzalloc(sizeof(*id), GFP_KERNEL);
1066         if (!id) {
1067                 status = NVME_SC_INTERNAL;
1068                 goto out;
1069         }
1070         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1071         kfree(id);
1072 out:
1073         nvmet_req_complete(req, status);
1074 }
1075
1076 static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
1077 {
1078         struct nvme_id_ns_cs_indep *id;
1079         u16 status;
1080
1081         status = nvmet_req_find_ns(req);
1082         if (status)
1083                 goto out;
1084
1085         id = kzalloc(sizeof(*id), GFP_KERNEL);
1086         if (!id) {
1087                 status = NVME_SC_INTERNAL;
1088                 goto out;
1089         }
1090
1091         id->nstat = NVME_NSTAT_NRDY;
1092         id->anagrpid = cpu_to_le32(req->ns->anagrpid);
1093         id->nmic = NVME_NS_NMIC_SHARED;
1094         if (req->ns->readonly)
1095                 id->nsattr |= NVME_NS_ATTR_RO;
1096         if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
1097                 id->nsfeat |= NVME_NS_ROTATIONAL;
1098         /*
1099          * We need flush command to flush the file's metadata,
1100          * so report supporting vwc if backend is file, even
1101          * though buffered_io is disable.
1102          */
1103         if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
1104                 id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
1105
1106         status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1107         kfree(id);
1108 out:
1109         nvmet_req_complete(req, status);
1110 }
1111
1112 static void nvmet_execute_identify(struct nvmet_req *req)
1113 {
1114         if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
1115                 return;
1116
1117         switch (req->cmd->identify.cns) {
1118         case NVME_ID_CNS_NS:
1119                 nvmet_execute_identify_ns(req);
1120                 return;
1121         case NVME_ID_CNS_CTRL:
1122                 nvmet_execute_identify_ctrl(req);
1123                 return;
1124         case NVME_ID_CNS_NS_ACTIVE_LIST:
1125                 nvmet_execute_identify_nslist(req, false);
1126                 return;
1127         case NVME_ID_CNS_NS_DESC_LIST:
1128                 nvmet_execute_identify_desclist(req);
1129                 return;
1130         case NVME_ID_CNS_CS_NS:
1131                 switch (req->cmd->identify.csi) {
1132                 case NVME_CSI_NVM:
1133                         nvme_execute_identify_ns_nvm(req);
1134                         return;
1135                 case NVME_CSI_ZNS:
1136                         if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1137                                 nvmet_execute_identify_ns_zns(req);
1138                                 return;
1139                         }
1140                         break;
1141                 }
1142                 break;
1143         case NVME_ID_CNS_CS_CTRL:
1144                 switch (req->cmd->identify.csi) {
1145                 case NVME_CSI_NVM:
1146                         nvmet_execute_identify_ctrl_nvm(req);
1147                         return;
1148                 case NVME_CSI_ZNS:
1149                         if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1150                                 nvmet_execute_identify_ctrl_zns(req);
1151                                 return;
1152                         }
1153                         break;
1154                 }
1155                 break;
1156         case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
1157                 nvmet_execute_identify_nslist(req, true);
1158                 return;
1159         case NVME_ID_CNS_NS_CS_INDEP:
1160                 nvmet_execute_id_cs_indep(req);
1161                 return;
1162         case NVME_ID_CNS_ENDGRP_LIST:
1163                 nvmet_execute_identify_endgrp_list(req);
1164                 return;
1165         }
1166
1167         pr_debug("unhandled identify cns %d on qid %d\n",
1168                req->cmd->identify.cns, req->sq->qid);
1169         req->error_loc = offsetof(struct nvme_identify, cns);
1170         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
1171 }
1172
1173 /*
1174  * A "minimum viable" abort implementation: the command is mandatory in the
1175  * spec, but we are not required to do any useful work.  We couldn't really
1176  * do a useful abort, so don't bother even with waiting for the command
1177  * to be exectuted and return immediately telling the command to abort
1178  * wasn't found.
1179  */
1180 static void nvmet_execute_abort(struct nvmet_req *req)
1181 {
1182         if (!nvmet_check_transfer_len(req, 0))
1183                 return;
1184         nvmet_set_result(req, 1);
1185         nvmet_req_complete(req, 0);
1186 }
1187
1188 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
1189 {
1190         u16 status;
1191
1192         if (req->ns->file)
1193                 status = nvmet_file_flush(req);
1194         else
1195                 status = nvmet_bdev_flush(req);
1196
1197         if (status)
1198                 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
1199         return status;
1200 }
1201
1202 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
1203 {
1204         u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
1205         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1206         u16 status;
1207
1208         status = nvmet_req_find_ns(req);
1209         if (status)
1210                 return status;
1211
1212         mutex_lock(&subsys->lock);
1213         switch (write_protect) {
1214         case NVME_NS_WRITE_PROTECT:
1215                 req->ns->readonly = true;
1216                 status = nvmet_write_protect_flush_sync(req);
1217                 if (status)
1218                         req->ns->readonly = false;
1219                 break;
1220         case NVME_NS_NO_WRITE_PROTECT:
1221                 req->ns->readonly = false;
1222                 status = 0;
1223                 break;
1224         default:
1225                 break;
1226         }
1227
1228         if (!status)
1229                 nvmet_ns_changed(subsys, req->ns->nsid);
1230         mutex_unlock(&subsys->lock);
1231         return status;
1232 }
1233
1234 u16 nvmet_set_feat_kato(struct nvmet_req *req)
1235 {
1236         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1237
1238         nvmet_stop_keep_alive_timer(req->sq->ctrl);
1239         req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
1240         nvmet_start_keep_alive_timer(req->sq->ctrl);
1241
1242         nvmet_set_result(req, req->sq->ctrl->kato);
1243
1244         return 0;
1245 }
1246
1247 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
1248 {
1249         u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1250
1251         if (val32 & ~mask) {
1252                 req->error_loc = offsetof(struct nvme_common_command, cdw11);
1253                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1254         }
1255
1256         WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
1257         nvmet_set_result(req, val32);
1258
1259         return 0;
1260 }
1261
1262 static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
1263 {
1264         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1265
1266         if (!nvmet_is_pci_ctrl(ctrl))
1267                 return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1268
1269         /*
1270          * The NVMe base specifications v2.1 recommends supporting 128-bits host
1271          * IDs (section 5.1.25.1.28.1). However, that same section also says
1272          * that "The controller may support a 64-bit Host Identifier and/or an
1273          * extended 128-bit Host Identifier". So simplify this support and do
1274          * not support 64-bits host IDs to avoid needing to check that all
1275          * controllers associated with the same subsystem all use the same host
1276          * ID size.
1277          */
1278         if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1279                 req->error_loc = offsetof(struct nvme_common_command, cdw11);
1280                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1281         }
1282
1283         return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid,
1284                                    sizeof(req->sq->ctrl->hostid));
1285 }
1286
1287 static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
1288 {
1289         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1290         u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1291         struct nvmet_feat_irq_coalesce irqc = {
1292                 .time = (cdw11 >> 8) & 0xff,
1293                 .thr = cdw11 & 0xff,
1294         };
1295
1296         /*
1297          * This feature is not supported for fabrics controllers and mandatory
1298          * for PCI controllers.
1299          */
1300         if (!nvmet_is_pci_ctrl(ctrl)) {
1301                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1302                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1303         }
1304
1305         return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1306 }
1307
1308 static u16 nvmet_set_feat_irq_config(struct nvmet_req *req)
1309 {
1310         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1311         u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1312         struct nvmet_feat_irq_config irqcfg = {
1313                 .iv = cdw11 & 0xffff,
1314                 .cd = (cdw11 >> 16) & 0x1,
1315         };
1316
1317         /*
1318          * This feature is not supported for fabrics controllers and mandatory
1319          * for PCI controllers.
1320          */
1321         if (!nvmet_is_pci_ctrl(ctrl)) {
1322                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1323                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1324         }
1325
1326         return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1327 }
1328
1329 static u16 nvmet_set_feat_arbitration(struct nvmet_req *req)
1330 {
1331         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1332         u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1333         struct nvmet_feat_arbitration arb = {
1334                 .hpw = (cdw11 >> 24) & 0xff,
1335                 .mpw = (cdw11 >> 16) & 0xff,
1336                 .lpw = (cdw11 >> 8) & 0xff,
1337                 .ab = cdw11 & 0x3,
1338         };
1339
1340         if (!ctrl->ops->set_feature) {
1341                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1342                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1343         }
1344
1345         return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1346 }
1347
1348 void nvmet_execute_set_features(struct nvmet_req *req)
1349 {
1350         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1351         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1352         u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1353         u16 status = 0;
1354         u16 nsqr;
1355         u16 ncqr;
1356
1357         if (!nvmet_check_data_len_lte(req, 0))
1358                 return;
1359
1360         switch (cdw10 & 0xff) {
1361         case NVME_FEAT_ARBITRATION:
1362                 status = nvmet_set_feat_arbitration(req);
1363                 break;
1364         case NVME_FEAT_NUM_QUEUES:
1365                 ncqr = (cdw11 >> 16) & 0xffff;
1366                 nsqr = cdw11 & 0xffff;
1367                 if (ncqr == 0xffff || nsqr == 0xffff) {
1368                         status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1369                         break;
1370                 }
1371                 nvmet_set_result(req,
1372                         (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
1373                 break;
1374         case NVME_FEAT_IRQ_COALESCE:
1375                 status = nvmet_set_feat_irq_coalesce(req);
1376                 break;
1377         case NVME_FEAT_IRQ_CONFIG:
1378                 status = nvmet_set_feat_irq_config(req);
1379                 break;
1380         case NVME_FEAT_KATO:
1381                 status = nvmet_set_feat_kato(req);
1382                 break;
1383         case NVME_FEAT_ASYNC_EVENT:
1384                 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
1385                 break;
1386         case NVME_FEAT_HOST_ID:
1387                 status = nvmet_set_feat_host_id(req);
1388                 break;
1389         case NVME_FEAT_WRITE_PROTECT:
1390                 status = nvmet_set_feat_write_protect(req);
1391                 break;
1392         case NVME_FEAT_RESV_MASK:
1393                 status = nvmet_set_feat_resv_notif_mask(req, cdw11);
1394                 break;
1395         default:
1396                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1397                 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1398                 break;
1399         }
1400
1401         nvmet_req_complete(req, status);
1402 }
1403
1404 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
1405 {
1406         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1407         u32 result;
1408
1409         result = nvmet_req_find_ns(req);
1410         if (result)
1411                 return result;
1412
1413         mutex_lock(&subsys->lock);
1414         if (req->ns->readonly == true)
1415                 result = NVME_NS_WRITE_PROTECT;
1416         else
1417                 result = NVME_NS_NO_WRITE_PROTECT;
1418         nvmet_set_result(req, result);
1419         mutex_unlock(&subsys->lock);
1420
1421         return 0;
1422 }
1423
1424 static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
1425 {
1426         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1427         struct nvmet_feat_irq_coalesce irqc = { };
1428         u16 status;
1429
1430         /*
1431          * This feature is not supported for fabrics controllers and mandatory
1432          * for PCI controllers.
1433          */
1434         if (!nvmet_is_pci_ctrl(ctrl)) {
1435                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1436                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1437         }
1438
1439         status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1440         if (status != NVME_SC_SUCCESS)
1441                 return status;
1442
1443         nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
1444
1445         return NVME_SC_SUCCESS;
1446 }
1447
1448 static u16 nvmet_get_feat_irq_config(struct nvmet_req *req)
1449 {
1450         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1451         u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff;
1452         struct nvmet_feat_irq_config irqcfg = { .iv = iv };
1453         u16 status;
1454
1455         /*
1456          * This feature is not supported for fabrics controllers and mandatory
1457          * for PCI controllers.
1458          */
1459         if (!nvmet_is_pci_ctrl(ctrl)) {
1460                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1461                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1462         }
1463
1464         status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1465         if (status != NVME_SC_SUCCESS)
1466                 return status;
1467
1468         nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv);
1469
1470         return NVME_SC_SUCCESS;
1471 }
1472
1473 static u16 nvmet_get_feat_arbitration(struct nvmet_req *req)
1474 {
1475         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1476         struct nvmet_feat_arbitration arb = { };
1477         u16 status;
1478
1479         if (!ctrl->ops->get_feature) {
1480                 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1481                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1482         }
1483
1484         status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1485         if (status != NVME_SC_SUCCESS)
1486                 return status;
1487
1488         nvmet_set_result(req,
1489                          ((u32)arb.hpw << 24) |
1490                          ((u32)arb.mpw << 16) |
1491                          ((u32)arb.lpw << 8) |
1492                          (arb.ab & 0x3));
1493
1494         return NVME_SC_SUCCESS;
1495 }
1496
1497 void nvmet_get_feat_kato(struct nvmet_req *req)
1498 {
1499         nvmet_set_result(req, req->sq->ctrl->kato * 1000);
1500 }
1501
1502 void nvmet_get_feat_async_event(struct nvmet_req *req)
1503 {
1504         nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
1505 }
1506
1507 void nvmet_execute_get_features(struct nvmet_req *req)
1508 {
1509         struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1510         u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1511         u16 status = 0;
1512
1513         if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
1514                 return;
1515
1516         switch (cdw10 & 0xff) {
1517         /*
1518          * These features are mandatory in the spec, but we don't
1519          * have a useful way to implement them.  We'll eventually
1520          * need to come up with some fake values for these.
1521          */
1522 #if 0
1523         case NVME_FEAT_POWER_MGMT:
1524                 break;
1525         case NVME_FEAT_TEMP_THRESH:
1526                 break;
1527         case NVME_FEAT_ERR_RECOVERY:
1528                 break;
1529         case NVME_FEAT_WRITE_ATOMIC:
1530                 break;
1531 #endif
1532         case NVME_FEAT_ARBITRATION:
1533                 status = nvmet_get_feat_arbitration(req);
1534                 break;
1535         case NVME_FEAT_IRQ_COALESCE:
1536                 status = nvmet_get_feat_irq_coalesce(req);
1537                 break;
1538         case NVME_FEAT_IRQ_CONFIG:
1539                 status = nvmet_get_feat_irq_config(req);
1540                 break;
1541         case NVME_FEAT_ASYNC_EVENT:
1542                 nvmet_get_feat_async_event(req);
1543                 break;
1544         case NVME_FEAT_VOLATILE_WC:
1545                 nvmet_set_result(req, 1);
1546                 break;
1547         case NVME_FEAT_NUM_QUEUES:
1548                 nvmet_set_result(req,
1549                         (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
1550                 break;
1551         case NVME_FEAT_KATO:
1552                 nvmet_get_feat_kato(req);
1553                 break;
1554         case NVME_FEAT_HOST_ID:
1555                 /* need 128-bit host identifier flag */
1556                 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1557                         req->error_loc =
1558                                 offsetof(struct nvme_common_command, cdw11);
1559                         status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1560                         break;
1561                 }
1562
1563                 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
1564                                 sizeof(req->sq->ctrl->hostid));
1565                 break;
1566         case NVME_FEAT_WRITE_PROTECT:
1567                 status = nvmet_get_feat_write_protect(req);
1568                 break;
1569         case NVME_FEAT_RESV_MASK:
1570                 status = nvmet_get_feat_resv_notif_mask(req);
1571                 break;
1572         default:
1573                 req->error_loc =
1574                         offsetof(struct nvme_common_command, cdw10);
1575                 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1576                 break;
1577         }
1578
1579         nvmet_req_complete(req, status);
1580 }
1581
1582 void nvmet_execute_async_event(struct nvmet_req *req)
1583 {
1584         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1585
1586         if (!nvmet_check_transfer_len(req, 0))
1587                 return;
1588
1589         mutex_lock(&ctrl->lock);
1590         if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
1591                 mutex_unlock(&ctrl->lock);
1592                 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
1593                 return;
1594         }
1595         ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
1596         mutex_unlock(&ctrl->lock);
1597
1598         queue_work(nvmet_wq, &ctrl->async_event_work);
1599 }
1600
1601 void nvmet_execute_keep_alive(struct nvmet_req *req)
1602 {
1603         struct nvmet_ctrl *ctrl = req->sq->ctrl;
1604         u16 status = 0;
1605
1606         if (!nvmet_check_transfer_len(req, 0))
1607                 return;
1608
1609         if (!ctrl->kato) {
1610                 status = NVME_SC_KA_TIMEOUT_INVALID;
1611                 goto out;
1612         }
1613
1614         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1615                 ctrl->cntlid, ctrl->kato);
1616         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1617 out:
1618         nvmet_req_complete(req, status);
1619 }
1620
1621 u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
1622 {
1623         struct nvme_command *cmd = req->cmd;
1624
1625         if (nvme_is_fabrics(cmd))
1626                 return nvmet_fabrics_admin_cmd_data_len(req);
1627         if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1628                 return nvmet_discovery_cmd_data_len(req);
1629
1630         switch (cmd->common.opcode) {
1631         case nvme_admin_get_log_page:
1632                 return nvmet_get_log_page_len(cmd);
1633         case nvme_admin_identify:
1634                 return NVME_IDENTIFY_DATA_SIZE;
1635         case nvme_admin_get_features:
1636                 return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10));
1637         default:
1638                 return 0;
1639         }
1640 }
1641
1642 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1643 {
1644         struct nvme_command *cmd = req->cmd;
1645         u16 ret;
1646
1647         if (nvme_is_fabrics(cmd))
1648                 return nvmet_parse_fabrics_admin_cmd(req);
1649         if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1650                 return nvmet_parse_discovery_cmd(req);
1651
1652         ret = nvmet_check_ctrl_status(req);
1653         if (unlikely(ret))
1654                 return ret;
1655
1656         /* For PCI controllers, admin commands shall not use SGL. */
1657         if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
1658             cmd->common.flags & NVME_CMD_SGL_ALL)
1659                 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1660
1661         if (nvmet_is_passthru_req(req))
1662                 return nvmet_parse_passthru_admin_cmd(req);
1663
1664         switch (cmd->common.opcode) {
1665         case nvme_admin_delete_sq:
1666                 req->execute = nvmet_execute_delete_sq;
1667                 return 0;
1668         case nvme_admin_create_sq:
1669                 req->execute = nvmet_execute_create_sq;
1670                 return 0;
1671         case nvme_admin_get_log_page:
1672                 req->execute = nvmet_execute_get_log_page;
1673                 return 0;
1674         case nvme_admin_delete_cq:
1675                 req->execute = nvmet_execute_delete_cq;
1676                 return 0;
1677         case nvme_admin_create_cq:
1678                 req->execute = nvmet_execute_create_cq;
1679                 return 0;
1680         case nvme_admin_identify:
1681                 req->execute = nvmet_execute_identify;
1682                 return 0;
1683         case nvme_admin_abort_cmd:
1684                 req->execute = nvmet_execute_abort;
1685                 return 0;
1686         case nvme_admin_set_features:
1687                 req->execute = nvmet_execute_set_features;
1688                 return 0;
1689         case nvme_admin_get_features:
1690                 req->execute = nvmet_execute_get_features;
1691                 return 0;
1692         case nvme_admin_async_event:
1693                 req->execute = nvmet_execute_async_event;
1694                 return 0;
1695         case nvme_admin_keep_alive:
1696                 req->execute = nvmet_execute_keep_alive;
1697                 return 0;
1698         default:
1699                 return nvmet_report_invalid_opcode(req);
1700         }
1701 }
This page took 0.132213 seconds and 4 git commands to generate.