1 // SPDX-License-Identifier: GPL-2.0
3 * Sysfs interface for the NVMe core driver.
5 * Copyright (c) 2011-2014, Intel Corporation.
8 #include <linux/nvme-auth.h>
13 static ssize_t nvme_sysfs_reset(struct device *dev,
14 struct device_attribute *attr, const char *buf,
17 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
20 ret = nvme_reset_ctrl_sync(ctrl);
25 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
27 static ssize_t nvme_sysfs_rescan(struct device *dev,
28 struct device_attribute *attr, const char *buf,
31 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
33 nvme_queue_scan(ctrl);
36 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
38 static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
39 struct device_attribute *attr, char *buf)
41 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
43 return sysfs_emit(buf,
44 ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
47 static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
48 struct device_attribute *attr, const char *buf, size_t count)
50 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
51 bool passthru_err_log_enabled;
54 err = kstrtobool(buf, &passthru_err_log_enabled);
58 ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
63 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
65 struct gendisk *disk = dev_to_disk(dev);
67 if (nvme_disk_is_ns_head(disk))
68 return disk->private_data;
69 return nvme_get_ns_from_dev(dev)->head;
72 static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
73 struct device_attribute *attr, char *buf)
75 struct nvme_ns_head *head = dev_to_ns_head(dev);
77 return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
80 static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
81 struct device_attribute *attr, const char *buf, size_t count)
83 struct nvme_ns_head *head = dev_to_ns_head(dev);
84 bool passthru_err_log_enabled;
87 err = kstrtobool(buf, &passthru_err_log_enabled);
90 head->passthru_err_log_enabled = passthru_err_log_enabled;
95 static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
96 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
97 nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
99 static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
100 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
101 nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
103 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
106 struct nvme_ns_head *head = dev_to_ns_head(dev);
107 struct nvme_ns_ids *ids = &head->ids;
108 struct nvme_subsystem *subsys = head->subsys;
109 int serial_len = sizeof(subsys->serial);
110 int model_len = sizeof(subsys->model);
112 if (!uuid_is_null(&ids->uuid))
113 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
115 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
116 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
118 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
119 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
121 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
122 subsys->serial[serial_len - 1] == '\0'))
124 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
125 subsys->model[model_len - 1] == '\0'))
128 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
129 serial_len, subsys->serial, model_len, subsys->model,
132 static DEVICE_ATTR_RO(wwid);
134 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
137 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
139 static DEVICE_ATTR_RO(nguid);
141 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
144 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
146 /* For backward compatibility expose the NGUID to userspace if
147 * we have no UUID set
149 if (uuid_is_null(&ids->uuid)) {
151 "No UUID available providing old NGUID\n");
152 return sysfs_emit(buf, "%pU\n", ids->nguid);
154 return sysfs_emit(buf, "%pU\n", &ids->uuid);
156 static DEVICE_ATTR_RO(uuid);
158 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
161 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
163 static DEVICE_ATTR_RO(eui);
165 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
168 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
170 static DEVICE_ATTR_RO(nsid);
172 static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
175 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
177 static DEVICE_ATTR_RO(csi);
179 static ssize_t metadata_bytes_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
182 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
184 static DEVICE_ATTR_RO(metadata_bytes);
186 static int ns_head_update_nuse(struct nvme_ns_head *head)
188 struct nvme_id_ns *id;
190 int srcu_idx, ret = -EWOULDBLOCK;
192 /* Avoid issuing commands too often by rate limiting the update */
193 if (!__ratelimit(&head->rs_nuse))
196 srcu_idx = srcu_read_lock(&head->srcu);
197 ns = nvme_find_path(head);
201 ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
205 head->nuse = le64_to_cpu(id->nuse);
209 srcu_read_unlock(&head->srcu, srcu_idx);
213 static int ns_update_nuse(struct nvme_ns *ns)
215 struct nvme_id_ns *id;
218 /* Avoid issuing commands too often by rate limiting the update. */
219 if (!__ratelimit(&ns->head->rs_nuse))
222 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
226 ns->head->nuse = le64_to_cpu(id->nuse);
231 static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
234 struct nvme_ns_head *head = dev_to_ns_head(dev);
235 struct gendisk *disk = dev_to_disk(dev);
238 if (nvme_disk_is_ns_head(disk))
239 ret = ns_head_update_nuse(head);
241 ret = ns_update_nuse(disk->private_data);
245 return sysfs_emit(buf, "%llu\n", head->nuse);
247 static DEVICE_ATTR_RO(nuse);
249 static struct attribute *nvme_ns_attrs[] = {
252 &dev_attr_nguid.attr,
256 &dev_attr_metadata_bytes.attr,
258 #ifdef CONFIG_NVME_MULTIPATH
259 &dev_attr_ana_grpid.attr,
260 &dev_attr_ana_state.attr,
262 &dev_attr_io_passthru_err_log_enabled.attr,
266 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
267 struct attribute *a, int n)
269 struct device *dev = container_of(kobj, struct device, kobj);
270 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
272 if (a == &dev_attr_uuid.attr) {
273 if (uuid_is_null(&ids->uuid) &&
274 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
277 if (a == &dev_attr_nguid.attr) {
278 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
281 if (a == &dev_attr_eui.attr) {
282 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
285 #ifdef CONFIG_NVME_MULTIPATH
286 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
288 if (nvme_disk_is_ns_head(dev_to_disk(dev)))
290 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
297 static const struct attribute_group nvme_ns_attr_group = {
298 .attrs = nvme_ns_attrs,
299 .is_visible = nvme_ns_attrs_are_visible,
302 const struct attribute_group *nvme_ns_attr_groups[] = {
307 #define nvme_show_str_function(field) \
308 static ssize_t field##_show(struct device *dev, \
309 struct device_attribute *attr, char *buf) \
311 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
312 return sysfs_emit(buf, "%.*s\n", \
313 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
315 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
317 nvme_show_str_function(model);
318 nvme_show_str_function(serial);
319 nvme_show_str_function(firmware_rev);
321 #define nvme_show_int_function(field) \
322 static ssize_t field##_show(struct device *dev, \
323 struct device_attribute *attr, char *buf) \
325 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
326 return sysfs_emit(buf, "%d\n", ctrl->field); \
328 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
330 nvme_show_int_function(cntlid);
331 nvme_show_int_function(numa_node);
332 nvme_show_int_function(queue_count);
333 nvme_show_int_function(sqsize);
334 nvme_show_int_function(kato);
336 static ssize_t nvme_sysfs_delete(struct device *dev,
337 struct device_attribute *attr, const char *buf,
340 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
342 if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
345 if (device_remove_file_self(dev, attr))
346 nvme_delete_ctrl_sync(ctrl);
349 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
351 static ssize_t nvme_sysfs_show_transport(struct device *dev,
352 struct device_attribute *attr,
355 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
357 return sysfs_emit(buf, "%s\n", ctrl->ops->name);
359 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
361 static ssize_t nvme_sysfs_show_state(struct device *dev,
362 struct device_attribute *attr,
365 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
366 unsigned state = (unsigned)nvme_ctrl_state(ctrl);
367 static const char *const state_name[] = {
368 [NVME_CTRL_NEW] = "new",
369 [NVME_CTRL_LIVE] = "live",
370 [NVME_CTRL_RESETTING] = "resetting",
371 [NVME_CTRL_CONNECTING] = "connecting",
372 [NVME_CTRL_DELETING] = "deleting",
373 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
374 [NVME_CTRL_DEAD] = "dead",
377 if (state < ARRAY_SIZE(state_name) && state_name[state])
378 return sysfs_emit(buf, "%s\n", state_name[state]);
380 return sysfs_emit(buf, "unknown state\n");
383 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
385 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
386 struct device_attribute *attr,
389 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
391 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
393 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
395 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
396 struct device_attribute *attr,
399 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
401 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
403 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
405 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
406 struct device_attribute *attr,
409 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
411 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
413 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
415 static ssize_t nvme_sysfs_show_address(struct device *dev,
416 struct device_attribute *attr,
419 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
421 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
423 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
425 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
428 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
429 struct nvmf_ctrl_options *opts = ctrl->opts;
431 if (ctrl->opts->max_reconnects == -1)
432 return sysfs_emit(buf, "off\n");
433 return sysfs_emit(buf, "%d\n",
434 opts->max_reconnects * opts->reconnect_delay);
437 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
438 struct device_attribute *attr, const char *buf, size_t count)
440 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
441 struct nvmf_ctrl_options *opts = ctrl->opts;
442 int ctrl_loss_tmo, err;
444 err = kstrtoint(buf, 10, &ctrl_loss_tmo);
448 if (ctrl_loss_tmo < 0)
449 opts->max_reconnects = -1;
451 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
452 opts->reconnect_delay);
455 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
456 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
458 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
459 struct device_attribute *attr, char *buf)
461 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
463 if (ctrl->opts->reconnect_delay == -1)
464 return sysfs_emit(buf, "off\n");
465 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
468 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
469 struct device_attribute *attr, const char *buf, size_t count)
471 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
475 err = kstrtou32(buf, 10, &v);
479 ctrl->opts->reconnect_delay = v;
482 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
483 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
485 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
486 struct device_attribute *attr, char *buf)
488 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
490 if (ctrl->opts->fast_io_fail_tmo == -1)
491 return sysfs_emit(buf, "off\n");
492 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
495 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
496 struct device_attribute *attr, const char *buf, size_t count)
498 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
499 struct nvmf_ctrl_options *opts = ctrl->opts;
500 int fast_io_fail_tmo, err;
502 err = kstrtoint(buf, 10, &fast_io_fail_tmo);
506 if (fast_io_fail_tmo < 0)
507 opts->fast_io_fail_tmo = -1;
509 opts->fast_io_fail_tmo = fast_io_fail_tmo;
512 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
513 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
515 static ssize_t cntrltype_show(struct device *dev,
516 struct device_attribute *attr, char *buf)
518 static const char * const type[] = {
519 [NVME_CTRL_IO] = "io\n",
520 [NVME_CTRL_DISC] = "discovery\n",
521 [NVME_CTRL_ADMIN] = "admin\n",
523 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
525 if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
526 return sysfs_emit(buf, "reserved\n");
528 return sysfs_emit(buf, type[ctrl->cntrltype]);
530 static DEVICE_ATTR_RO(cntrltype);
532 static ssize_t dctype_show(struct device *dev,
533 struct device_attribute *attr, char *buf)
535 static const char * const type[] = {
536 [NVME_DCTYPE_NOT_REPORTED] = "none\n",
537 [NVME_DCTYPE_DDC] = "ddc\n",
538 [NVME_DCTYPE_CDC] = "cdc\n",
540 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
542 if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
543 return sysfs_emit(buf, "reserved\n");
545 return sysfs_emit(buf, type[ctrl->dctype]);
547 static DEVICE_ATTR_RO(dctype);
549 #ifdef CONFIG_NVME_HOST_AUTH
550 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
551 struct device_attribute *attr, char *buf)
553 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
554 struct nvmf_ctrl_options *opts = ctrl->opts;
556 if (!opts->dhchap_secret)
557 return sysfs_emit(buf, "none\n");
558 return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
561 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
562 struct device_attribute *attr, const char *buf, size_t count)
564 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
565 struct nvmf_ctrl_options *opts = ctrl->opts;
568 if (!ctrl->opts->dhchap_secret)
572 if (memcmp(buf, "DHHC-1:", 7))
575 dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
578 memcpy(dhchap_secret, buf, count);
579 nvme_auth_stop(ctrl);
580 if (strcmp(dhchap_secret, opts->dhchap_secret)) {
581 struct nvme_dhchap_key *key, *host_key;
584 ret = nvme_auth_generate_key(dhchap_secret, &key);
586 kfree(dhchap_secret);
589 kfree(opts->dhchap_secret);
590 opts->dhchap_secret = dhchap_secret;
591 host_key = ctrl->host_key;
592 mutex_lock(&ctrl->dhchap_auth_mutex);
593 ctrl->host_key = key;
594 mutex_unlock(&ctrl->dhchap_auth_mutex);
595 nvme_auth_free_key(host_key);
597 kfree(dhchap_secret);
598 /* Start re-authentication */
599 dev_info(ctrl->device, "re-authenticating controller\n");
600 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
605 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
606 nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
608 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
609 struct device_attribute *attr, char *buf)
611 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
612 struct nvmf_ctrl_options *opts = ctrl->opts;
614 if (!opts->dhchap_ctrl_secret)
615 return sysfs_emit(buf, "none\n");
616 return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
619 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
620 struct device_attribute *attr, const char *buf, size_t count)
622 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
623 struct nvmf_ctrl_options *opts = ctrl->opts;
626 if (!ctrl->opts->dhchap_ctrl_secret)
630 if (memcmp(buf, "DHHC-1:", 7))
633 dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
636 memcpy(dhchap_secret, buf, count);
637 nvme_auth_stop(ctrl);
638 if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
639 struct nvme_dhchap_key *key, *ctrl_key;
642 ret = nvme_auth_generate_key(dhchap_secret, &key);
644 kfree(dhchap_secret);
647 kfree(opts->dhchap_ctrl_secret);
648 opts->dhchap_ctrl_secret = dhchap_secret;
649 ctrl_key = ctrl->ctrl_key;
650 mutex_lock(&ctrl->dhchap_auth_mutex);
651 ctrl->ctrl_key = key;
652 mutex_unlock(&ctrl->dhchap_auth_mutex);
653 nvme_auth_free_key(ctrl_key);
655 kfree(dhchap_secret);
656 /* Start re-authentication */
657 dev_info(ctrl->device, "re-authenticating controller\n");
658 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
663 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
664 nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
667 static struct attribute *nvme_dev_attrs[] = {
668 &dev_attr_reset_controller.attr,
669 &dev_attr_rescan_controller.attr,
670 &dev_attr_model.attr,
671 &dev_attr_serial.attr,
672 &dev_attr_firmware_rev.attr,
673 &dev_attr_cntlid.attr,
674 &dev_attr_delete_controller.attr,
675 &dev_attr_transport.attr,
676 &dev_attr_subsysnqn.attr,
677 &dev_attr_address.attr,
678 &dev_attr_state.attr,
679 &dev_attr_numa_node.attr,
680 &dev_attr_queue_count.attr,
681 &dev_attr_sqsize.attr,
682 &dev_attr_hostnqn.attr,
683 &dev_attr_hostid.attr,
684 &dev_attr_ctrl_loss_tmo.attr,
685 &dev_attr_reconnect_delay.attr,
686 &dev_attr_fast_io_fail_tmo.attr,
688 &dev_attr_cntrltype.attr,
689 &dev_attr_dctype.attr,
690 #ifdef CONFIG_NVME_HOST_AUTH
691 &dev_attr_dhchap_secret.attr,
692 &dev_attr_dhchap_ctrl_secret.attr,
694 &dev_attr_adm_passthru_err_log_enabled.attr,
698 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
699 struct attribute *a, int n)
701 struct device *dev = container_of(kobj, struct device, kobj);
702 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
704 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
706 if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
708 if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
710 if (a == &dev_attr_hostid.attr && !ctrl->opts)
712 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
714 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
716 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
718 #ifdef CONFIG_NVME_HOST_AUTH
719 if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
721 if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
728 const struct attribute_group nvme_dev_attrs_group = {
729 .attrs = nvme_dev_attrs,
730 .is_visible = nvme_dev_attrs_are_visible,
732 EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
734 #ifdef CONFIG_NVME_TCP_TLS
735 static ssize_t tls_key_show(struct device *dev,
736 struct device_attribute *attr, char *buf)
738 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
740 if (!ctrl->tls_pskid)
742 return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid);
744 static DEVICE_ATTR_RO(tls_key);
746 static ssize_t tls_configured_key_show(struct device *dev,
747 struct device_attribute *attr, char *buf)
749 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
750 struct key *key = ctrl->opts->tls_key;
752 return sysfs_emit(buf, "%08x\n", key_serial(key));
754 static DEVICE_ATTR_RO(tls_configured_key);
756 static ssize_t tls_keyring_show(struct device *dev,
757 struct device_attribute *attr, char *buf)
759 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
760 struct key *keyring = ctrl->opts->keyring;
762 return sysfs_emit(buf, "%s\n", keyring->description);
764 static DEVICE_ATTR_RO(tls_keyring);
766 static struct attribute *nvme_tls_attrs[] = {
767 &dev_attr_tls_key.attr,
768 &dev_attr_tls_configured_key.attr,
769 &dev_attr_tls_keyring.attr,
773 static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
774 struct attribute *a, int n)
776 struct device *dev = container_of(kobj, struct device, kobj);
777 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
779 if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))
782 if (a == &dev_attr_tls_key.attr &&
785 if (a == &dev_attr_tls_configured_key.attr &&
786 !ctrl->opts->tls_key)
788 if (a == &dev_attr_tls_keyring.attr &&
789 !ctrl->opts->keyring)
795 const struct attribute_group nvme_tls_attrs_group = {
796 .attrs = nvme_tls_attrs,
797 .is_visible = nvme_tls_attrs_are_visible,
801 const struct attribute_group *nvme_dev_attr_groups[] = {
802 &nvme_dev_attrs_group,
803 #ifdef CONFIG_NVME_TCP_TLS
804 &nvme_tls_attrs_group,
809 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
810 struct device_attribute subsys_attr_##_name = \
811 __ATTR(_name, _mode, _show, NULL)
813 static ssize_t nvme_subsys_show_nqn(struct device *dev,
814 struct device_attribute *attr,
817 struct nvme_subsystem *subsys =
818 container_of(dev, struct nvme_subsystem, dev);
820 return sysfs_emit(buf, "%s\n", subsys->subnqn);
822 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
824 static ssize_t nvme_subsys_show_type(struct device *dev,
825 struct device_attribute *attr,
828 struct nvme_subsystem *subsys =
829 container_of(dev, struct nvme_subsystem, dev);
831 switch (subsys->subtype) {
833 return sysfs_emit(buf, "discovery\n");
835 return sysfs_emit(buf, "nvm\n");
837 return sysfs_emit(buf, "reserved\n");
840 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
842 #define nvme_subsys_show_str_function(field) \
843 static ssize_t subsys_##field##_show(struct device *dev, \
844 struct device_attribute *attr, char *buf) \
846 struct nvme_subsystem *subsys = \
847 container_of(dev, struct nvme_subsystem, dev); \
848 return sysfs_emit(buf, "%.*s\n", \
849 (int)sizeof(subsys->field), subsys->field); \
851 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
853 nvme_subsys_show_str_function(model);
854 nvme_subsys_show_str_function(serial);
855 nvme_subsys_show_str_function(firmware_rev);
857 static struct attribute *nvme_subsys_attrs[] = {
858 &subsys_attr_model.attr,
859 &subsys_attr_serial.attr,
860 &subsys_attr_firmware_rev.attr,
861 &subsys_attr_subsysnqn.attr,
862 &subsys_attr_subsystype.attr,
863 #ifdef CONFIG_NVME_MULTIPATH
864 &subsys_attr_iopolicy.attr,
869 static const struct attribute_group nvme_subsys_attrs_group = {
870 .attrs = nvme_subsys_attrs,
873 const struct attribute_group *nvme_subsys_attrs_groups[] = {
874 &nvme_subsys_attrs_group,