1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_device.c (based on iscsi_target_device.c)
5 * This file contains the TCM Virtual Device and Disk Transport
6 * agnostic related functions.
8 * (c) Copyright 2003-2013 Datera, Inc.
12 ******************************************************************************/
14 #include <linux/net.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
22 #include <linux/export.h>
23 #include <linux/t10-pi.h>
24 #include <asm/unaligned.h>
27 #include <scsi/scsi_common.h>
28 #include <scsi/scsi_proto.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
34 #include "target_core_internal.h"
35 #include "target_core_alua.h"
36 #include "target_core_pr.h"
37 #include "target_core_ua.h"
39 static DEFINE_MUTEX(device_mutex);
40 static LIST_HEAD(device_list);
41 static DEFINE_IDR(devices_idr);
43 static struct se_hba *lun0_hba;
44 /* not static, needed by tpg.c */
45 struct se_device *g_lun0_dev;
48 transport_lookup_cmd_lun(struct se_cmd *se_cmd)
50 struct se_lun *se_lun = NULL;
51 struct se_session *se_sess = se_cmd->se_sess;
52 struct se_node_acl *nacl = se_sess->se_node_acl;
53 struct se_dev_entry *deve;
54 sense_reason_t ret = TCM_NO_SENSE;
57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
59 atomic_long_inc(&deve->total_cmds);
61 if (se_cmd->data_direction == DMA_TO_DEVICE)
62 atomic_long_add(se_cmd->data_length,
64 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
65 atomic_long_add(se_cmd->data_length,
68 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
69 deve->lun_access_ro) {
70 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
71 " Access for 0x%08llx\n",
72 se_cmd->se_tfo->fabric_name,
75 return TCM_WRITE_PROTECTED;
78 se_lun = deve->se_lun;
80 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
85 se_cmd->se_lun = se_lun;
86 se_cmd->pr_res_key = deve->pr_res_key;
87 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
88 se_cmd->lun_ref_active = true;
95 * Use the se_portal_group->tpg_virt_lun0 to allow for
96 * REPORT_LUNS, et al to be returned when no active
97 * MappedLUN=0 exists for this Initiator Port.
99 if (se_cmd->orig_fe_lun != 0) {
100 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
101 " Access for 0x%08llx from %s\n",
102 se_cmd->se_tfo->fabric_name,
104 nacl->initiatorname);
105 return TCM_NON_EXISTENT_LUN;
109 * Force WRITE PROTECT for virtual LUN 0
111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
112 (se_cmd->data_direction != DMA_NONE))
113 return TCM_WRITE_PROTECTED;
115 se_lun = se_sess->se_tpg->tpg_virt_lun0;
116 if (!percpu_ref_tryget_live(&se_lun->lun_ref))
117 return TCM_NON_EXISTENT_LUN;
119 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
121 se_cmd->lun_ref_active = true;
124 * RCU reference protected by percpu se_lun->lun_ref taken above that
125 * must drop to zero (including initial reference) before this se_lun
126 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
127 * target_core_fabric_configfs.c:target_fabric_port_release
129 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
130 atomic_long_inc(&se_cmd->se_dev->num_cmds);
132 if (se_cmd->data_direction == DMA_TO_DEVICE)
133 atomic_long_add(se_cmd->data_length,
134 &se_cmd->se_dev->write_bytes);
135 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
136 atomic_long_add(se_cmd->data_length,
137 &se_cmd->se_dev->read_bytes);
141 EXPORT_SYMBOL(transport_lookup_cmd_lun);
143 int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
145 struct se_dev_entry *deve;
146 struct se_lun *se_lun = NULL;
147 struct se_session *se_sess = se_cmd->se_sess;
148 struct se_node_acl *nacl = se_sess->se_node_acl;
149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
153 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
155 se_lun = deve->se_lun;
157 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
162 se_cmd->se_lun = se_lun;
163 se_cmd->pr_res_key = deve->pr_res_key;
164 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
165 se_cmd->lun_ref_active = true;
171 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
172 " Access for 0x%08llx for %s\n",
173 se_cmd->se_tfo->fabric_name,
175 nacl->initiatorname);
178 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
179 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
187 EXPORT_SYMBOL(transport_lookup_tmr_lun);
189 bool target_lun_is_rdonly(struct se_cmd *cmd)
191 struct se_session *se_sess = cmd->se_sess;
192 struct se_dev_entry *deve;
196 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
197 ret = deve && deve->lun_access_ro;
202 EXPORT_SYMBOL(target_lun_is_rdonly);
205 * This function is called from core_scsi3_emulate_pro_register_and_move()
206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
207 * when a matching rtpi is found.
209 struct se_dev_entry *core_get_se_deve_from_rtpi(
210 struct se_node_acl *nacl,
213 struct se_dev_entry *deve;
215 struct se_portal_group *tpg = nacl->se_tpg;
218 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
221 pr_err("%s device entries device pointer is"
222 " NULL, but Initiator has access.\n",
223 tpg->se_tpg_tfo->fabric_name);
226 if (lun->lun_rtpi != rtpi)
229 kref_get(&deve->pr_kref);
239 void core_free_device_list_for_node(
240 struct se_node_acl *nacl,
241 struct se_portal_group *tpg)
243 struct se_dev_entry *deve;
245 mutex_lock(&nacl->lun_entry_mutex);
246 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
247 core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
248 mutex_unlock(&nacl->lun_entry_mutex);
251 void core_update_device_list_access(
254 struct se_node_acl *nacl)
256 struct se_dev_entry *deve;
258 mutex_lock(&nacl->lun_entry_mutex);
259 deve = target_nacl_find_deve(nacl, mapped_lun);
261 deve->lun_access_ro = lun_access_ro;
262 mutex_unlock(&nacl->lun_entry_mutex);
266 * Called with rcu_read_lock or nacl->device_list_lock held.
268 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
270 struct se_dev_entry *deve;
272 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
273 if (deve->mapped_lun == mapped_lun)
278 EXPORT_SYMBOL(target_nacl_find_deve);
280 void target_pr_kref_release(struct kref *kref)
282 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
284 complete(&deve->pr_comp);
288 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
291 struct se_dev_entry *tmp;
294 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
295 if (skip_new && tmp == new)
297 core_scsi3_ua_allocate(tmp, 0x3F,
298 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
303 int core_enable_device_list_for_node(
305 struct se_lun_acl *lun_acl,
308 struct se_node_acl *nacl,
309 struct se_portal_group *tpg)
311 struct se_dev_entry *orig, *new;
313 new = kzalloc(sizeof(*new), GFP_KERNEL);
315 pr_err("Unable to allocate se_dev_entry memory\n");
319 spin_lock_init(&new->ua_lock);
320 INIT_LIST_HEAD(&new->ua_list);
321 INIT_LIST_HEAD(&new->lun_link);
323 new->mapped_lun = mapped_lun;
324 kref_init(&new->pr_kref);
325 init_completion(&new->pr_comp);
327 new->lun_access_ro = lun_access_ro;
328 new->creation_time = get_jiffies_64();
331 mutex_lock(&nacl->lun_entry_mutex);
332 orig = target_nacl_find_deve(nacl, mapped_lun);
333 if (orig && orig->se_lun) {
334 struct se_lun *orig_lun = orig->se_lun;
336 if (orig_lun != lun) {
337 pr_err("Existing orig->se_lun doesn't match new lun"
338 " for dynamic -> explicit NodeACL conversion:"
339 " %s\n", nacl->initiatorname);
340 mutex_unlock(&nacl->lun_entry_mutex);
344 if (orig->se_lun_acl != NULL) {
345 pr_warn_ratelimited("Detected existing explicit"
346 " se_lun_acl->se_lun_group reference for %s"
347 " mapped_lun: %llu, failing\n",
348 nacl->initiatorname, mapped_lun);
349 mutex_unlock(&nacl->lun_entry_mutex);
355 new->se_lun_acl = lun_acl;
356 hlist_del_rcu(&orig->link);
357 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
358 mutex_unlock(&nacl->lun_entry_mutex);
360 spin_lock(&lun->lun_deve_lock);
361 list_del(&orig->lun_link);
362 list_add_tail(&new->lun_link, &lun->lun_deve_list);
363 spin_unlock(&lun->lun_deve_lock);
365 kref_put(&orig->pr_kref, target_pr_kref_release);
366 wait_for_completion(&orig->pr_comp);
368 target_luns_data_has_changed(nacl, new, true);
369 kfree_rcu(orig, rcu_head);
374 new->se_lun_acl = lun_acl;
375 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
376 mutex_unlock(&nacl->lun_entry_mutex);
378 spin_lock(&lun->lun_deve_lock);
379 list_add_tail(&new->lun_link, &lun->lun_deve_list);
380 spin_unlock(&lun->lun_deve_lock);
382 target_luns_data_has_changed(nacl, new, true);
386 void core_disable_device_list_for_node(
388 struct se_dev_entry *orig,
389 struct se_node_acl *nacl,
390 struct se_portal_group *tpg)
393 * rcu_dereference_raw protected by se_lun->lun_group symlink
394 * reference to se_device->dev_group.
396 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
398 lockdep_assert_held(&nacl->lun_entry_mutex);
401 * If the MappedLUN entry is being disabled, the entry in
402 * lun->lun_deve_list must be removed now before clearing the
403 * struct se_dev_entry pointers below as logic in
404 * core_alua_do_transition_tg_pt() depends on these being present.
406 * deve->se_lun_acl will be NULL for demo-mode created LUNs
407 * that have not been explicitly converted to MappedLUNs ->
408 * struct se_lun_acl, but we remove deve->lun_link from
409 * lun->lun_deve_list. This also means that active UAs and
410 * NodeACL context specific PR metadata for demo-mode
411 * MappedLUN *deve will be released below..
413 spin_lock(&lun->lun_deve_lock);
414 list_del(&orig->lun_link);
415 spin_unlock(&lun->lun_deve_lock);
417 * Disable struct se_dev_entry LUN ACL mapping
419 core_scsi3_ua_release_all(orig);
421 hlist_del_rcu(&orig->link);
422 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
423 orig->lun_access_ro = false;
424 orig->creation_time = 0;
425 orig->attach_count--;
427 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
428 * or REGISTER_AND_MOVE PR operation to complete.
430 kref_put(&orig->pr_kref, target_pr_kref_release);
431 wait_for_completion(&orig->pr_comp);
433 kfree_rcu(orig, rcu_head);
435 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
436 target_luns_data_has_changed(nacl, NULL, false);
439 /* core_clear_lun_from_tpg():
443 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
445 struct se_node_acl *nacl;
446 struct se_dev_entry *deve;
448 mutex_lock(&tpg->acl_node_mutex);
449 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
451 mutex_lock(&nacl->lun_entry_mutex);
452 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
453 if (lun != deve->se_lun)
456 core_disable_device_list_for_node(lun, deve, nacl, tpg);
458 mutex_unlock(&nacl->lun_entry_mutex);
460 mutex_unlock(&tpg->acl_node_mutex);
463 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
467 spin_lock(&dev->se_port_lock);
468 if (dev->export_count == 0x0000ffff) {
469 pr_warn("Reached dev->dev_port_count =="
471 spin_unlock(&dev->se_port_lock);
476 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
477 * Here is the table from spc4r17 section 7.7.3.8.
479 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
483 * 1h Relative port 1, historically known as port A
484 * 2h Relative port 2, historically known as port B
485 * 3h to FFFFh Relative port 3 through 65 535
487 lun->lun_rtpi = dev->dev_rpti_counter++;
491 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
493 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
496 if (lun->lun_rtpi == tmp->lun_rtpi)
499 spin_unlock(&dev->se_port_lock);
504 static void se_release_vpd_for_dev(struct se_device *dev)
506 struct t10_vpd *vpd, *vpd_tmp;
508 spin_lock(&dev->t10_wwn.t10_vpd_lock);
509 list_for_each_entry_safe(vpd, vpd_tmp,
510 &dev->t10_wwn.t10_vpd_list, vpd_list) {
511 list_del(&vpd->vpd_list);
514 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
517 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
519 u32 aligned_max_sectors;
522 * Limit max_sectors to a PAGE_SIZE aligned value for modern
523 * transport_allocate_data_tasks() operation.
525 alignment = max(1ul, PAGE_SIZE / block_size);
526 aligned_max_sectors = rounddown(max_sectors, alignment);
528 if (max_sectors != aligned_max_sectors)
529 pr_info("Rounding down aligned max_sectors from %u to %u\n",
530 max_sectors, aligned_max_sectors);
532 return aligned_max_sectors;
535 int core_dev_add_lun(
536 struct se_portal_group *tpg,
537 struct se_device *dev,
542 rc = core_tpg_add_lun(tpg, lun, false, dev);
546 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
547 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
548 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
549 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
551 * Update LUN maps for dynamically added initiators when
552 * generate_node_acl is enabled.
554 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
555 struct se_node_acl *acl;
557 mutex_lock(&tpg->acl_node_mutex);
558 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
559 if (acl->dynamic_node_acl &&
560 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
561 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
562 core_tpg_add_node_to_devs(acl, tpg, lun);
565 mutex_unlock(&tpg->acl_node_mutex);
571 /* core_dev_del_lun():
575 void core_dev_del_lun(
576 struct se_portal_group *tpg,
579 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
580 " device object\n", tpg->se_tpg_tfo->fabric_name,
581 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
582 tpg->se_tpg_tfo->fabric_name);
584 core_tpg_remove_lun(tpg, lun);
587 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
588 struct se_portal_group *tpg,
589 struct se_node_acl *nacl,
593 struct se_lun_acl *lacl;
595 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
596 pr_err("%s InitiatorName exceeds maximum size.\n",
597 tpg->se_tpg_tfo->fabric_name);
601 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
603 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
608 lacl->mapped_lun = mapped_lun;
609 lacl->se_lun_nacl = nacl;
614 int core_dev_add_initiator_node_lun_acl(
615 struct se_portal_group *tpg,
616 struct se_lun_acl *lacl,
620 struct se_node_acl *nacl = lacl->se_lun_nacl;
622 * rcu_dereference_raw protected by se_lun->lun_group symlink
623 * reference to se_device->dev_group.
625 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
630 if (lun->lun_access_ro)
631 lun_access_ro = true;
635 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
636 lun_access_ro, nacl, tpg) < 0)
639 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
640 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
641 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
642 lun_access_ro ? "RO" : "RW",
643 nacl->initiatorname);
645 * Check to see if there are any existing persistent reservation APTPL
646 * pre-registrations that need to be enabled for this LUN ACL..
648 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
653 int core_dev_del_initiator_node_lun_acl(
655 struct se_lun_acl *lacl)
657 struct se_portal_group *tpg = lun->lun_tpg;
658 struct se_node_acl *nacl;
659 struct se_dev_entry *deve;
661 nacl = lacl->se_lun_nacl;
665 mutex_lock(&nacl->lun_entry_mutex);
666 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
668 core_disable_device_list_for_node(lun, deve, nacl, tpg);
669 mutex_unlock(&nacl->lun_entry_mutex);
671 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
672 " InitiatorNode: %s Mapped LUN: %llu\n",
673 tpg->se_tpg_tfo->fabric_name,
674 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
675 nacl->initiatorname, lacl->mapped_lun);
680 void core_dev_free_initiator_node_lun_acl(
681 struct se_portal_group *tpg,
682 struct se_lun_acl *lacl)
684 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
685 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
686 tpg->se_tpg_tfo->tpg_get_tag(tpg),
687 tpg->se_tpg_tfo->fabric_name,
688 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
693 static void scsi_dump_inquiry(struct se_device *dev)
695 struct t10_wwn *wwn = &dev->t10_wwn;
696 int device_type = dev->transport->get_device_type(dev);
699 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
701 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
703 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
705 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
707 pr_debug(" Type: %s ", scsi_device_type(device_type));
710 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
712 struct se_device *dev;
713 struct se_lun *xcopy_lun;
716 dev = hba->backend->ops->alloc_device(hba, name);
720 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
722 dev->transport->free_device(dev);
726 dev->queue_cnt = nr_cpu_ids;
727 for (i = 0; i < dev->queue_cnt; i++) {
728 struct se_device_queue *q;
731 INIT_LIST_HEAD(&q->state_list);
732 spin_lock_init(&q->lock);
734 init_llist_head(&q->sq.cmd_list);
735 INIT_WORK(&q->sq.work, target_queued_submit_work);
739 dev->transport = hba->backend->ops;
740 dev->transport_flags = dev->transport->transport_flags_default;
741 dev->prot_length = sizeof(struct t10_pi_tuple);
742 dev->hba_index = hba->hba_index;
744 INIT_LIST_HEAD(&dev->dev_sep_list);
745 INIT_LIST_HEAD(&dev->dev_tmr_list);
746 INIT_LIST_HEAD(&dev->delayed_cmd_list);
747 INIT_LIST_HEAD(&dev->qf_cmd_list);
748 spin_lock_init(&dev->delayed_cmd_lock);
749 spin_lock_init(&dev->dev_reservation_lock);
750 spin_lock_init(&dev->se_port_lock);
751 spin_lock_init(&dev->se_tmr_lock);
752 spin_lock_init(&dev->qf_cmd_lock);
753 sema_init(&dev->caw_sem, 1);
754 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
755 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
756 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
757 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
758 spin_lock_init(&dev->t10_pr.registration_lock);
759 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
760 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
761 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
762 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
763 spin_lock_init(&dev->t10_alua.lba_map_lock);
765 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
767 dev->t10_wwn.t10_dev = dev;
769 * Use OpenFabrics IEEE Company ID: 00 14 05
771 dev->t10_wwn.company_id = 0x001405;
773 dev->t10_alua.t10_dev = dev;
775 dev->dev_attrib.da_dev = dev;
776 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
777 dev->dev_attrib.emulate_dpo = 1;
778 dev->dev_attrib.emulate_fua_write = 1;
779 dev->dev_attrib.emulate_fua_read = 1;
780 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
781 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
782 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
783 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
784 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
785 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
786 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
787 dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
788 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
789 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
790 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
791 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
792 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
793 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
794 dev->dev_attrib.max_unmap_block_desc_count =
795 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
796 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
797 dev->dev_attrib.unmap_granularity_alignment =
798 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
799 dev->dev_attrib.unmap_zeroes_data =
800 DA_UNMAP_ZEROES_DATA_DEFAULT;
801 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
803 xcopy_lun = &dev->xcopy_lun;
804 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
805 init_completion(&xcopy_lun->lun_shutdown_comp);
806 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
807 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
808 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
809 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
811 /* Preload the default INQUIRY const values */
812 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
813 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
814 sizeof(dev->t10_wwn.model));
815 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
816 sizeof(dev->t10_wwn.revision));
822 * Check if the underlying struct block_device supports discard and if yes
823 * configure the UNMAP parameters.
825 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
826 struct block_device *bdev)
828 int block_size = bdev_logical_block_size(bdev);
830 if (!bdev_max_discard_sectors(bdev))
833 attrib->max_unmap_lba_count =
834 bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
836 * Currently hardcoded to 1 in Linux/SCSI code..
838 attrib->max_unmap_block_desc_count = 1;
839 attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
840 attrib->unmap_granularity_alignment =
841 bdev_discard_alignment(bdev) / block_size;
844 EXPORT_SYMBOL(target_configure_unmap_from_queue);
847 * Convert from blocksize advertised to the initiator to the 512 byte
848 * units unconditionally used by the Linux block layer.
850 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
852 switch (dev->dev_attrib.block_size) {
863 EXPORT_SYMBOL(target_to_linux_sector);
865 struct devices_idr_iter {
866 struct config_item *prev_item;
867 int (*fn)(struct se_device *dev, void *data);
871 static int target_devices_idr_iter(int id, void *p, void *data)
872 __must_hold(&device_mutex)
874 struct devices_idr_iter *iter = data;
875 struct se_device *dev = p;
878 config_item_put(iter->prev_item);
879 iter->prev_item = NULL;
882 * We add the device early to the idr, so it can be used
883 * by backend modules during configuration. We do not want
884 * to allow other callers to access partially setup devices,
885 * so we skip them here.
887 if (!target_dev_configured(dev))
890 iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
891 if (!iter->prev_item)
893 mutex_unlock(&device_mutex);
895 ret = iter->fn(dev, iter->data);
897 mutex_lock(&device_mutex);
902 * target_for_each_device - iterate over configured devices
903 * @fn: iterator function
904 * @data: pointer to data that will be passed to fn
906 * fn must return 0 to continue looping over devices. non-zero will break
907 * from the loop and return that value to the caller.
909 int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
912 struct devices_idr_iter iter = { .fn = fn, .data = data };
915 mutex_lock(&device_mutex);
916 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
917 mutex_unlock(&device_mutex);
918 config_item_put(iter.prev_item);
922 int target_configure_device(struct se_device *dev)
924 struct se_hba *hba = dev->se_hba;
927 if (target_dev_configured(dev)) {
928 pr_err("se_dev->se_dev_ptr already set for storage"
934 * Add early so modules like tcmu can use during its
937 mutex_lock(&device_mutex);
939 * Use cyclic to try and avoid collisions with devices
940 * that were recently removed.
942 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
943 mutex_unlock(&device_mutex);
950 ret = dev->transport->configure_device(dev);
954 if (dev->transport->configure_unmap &&
955 dev->transport->configure_unmap(dev)) {
956 pr_debug("Discard support available, but disabled by default.\n");
960 * XXX: there is not much point to have two different values here..
962 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
963 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
966 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
968 dev->dev_attrib.hw_max_sectors =
969 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
970 dev->dev_attrib.hw_block_size);
971 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
973 dev->creation_time = get_jiffies_64();
975 ret = core_setup_alua(dev);
977 goto out_destroy_device;
980 * Setup work_queue for QUEUE_FULL
982 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
984 scsi_dump_inquiry(dev);
986 spin_lock(&hba->device_lock);
988 spin_unlock(&hba->device_lock);
990 dev->dev_flags |= DF_CONFIGURED;
995 dev->transport->destroy_device(dev);
997 mutex_lock(&device_mutex);
998 idr_remove(&devices_idr, dev->dev_index);
999 mutex_unlock(&device_mutex);
1001 se_release_vpd_for_dev(dev);
1005 void target_free_device(struct se_device *dev)
1007 struct se_hba *hba = dev->se_hba;
1009 WARN_ON(!list_empty(&dev->dev_sep_list));
1011 if (target_dev_configured(dev)) {
1012 dev->transport->destroy_device(dev);
1014 mutex_lock(&device_mutex);
1015 idr_remove(&devices_idr, dev->dev_index);
1016 mutex_unlock(&device_mutex);
1018 spin_lock(&hba->device_lock);
1020 spin_unlock(&hba->device_lock);
1023 core_alua_free_lu_gp_mem(dev);
1024 core_alua_set_lba_map(dev, NULL, 0, 0);
1025 core_scsi3_free_all_registrations(dev);
1026 se_release_vpd_for_dev(dev);
1028 if (dev->transport->free_prot)
1029 dev->transport->free_prot(dev);
1032 dev->transport->free_device(dev);
1035 int core_dev_setup_virtual_lun0(void)
1038 struct se_device *dev;
1039 char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1042 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1044 return PTR_ERR(hba);
1046 dev = target_alloc_device(hba, "virt_lun0");
1052 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1054 ret = target_configure_device(dev);
1056 goto out_free_se_dev;
1063 target_free_device(dev);
1065 core_delete_hba(hba);
1070 void core_dev_release_virtual_lun0(void)
1072 struct se_hba *hba = lun0_hba;
1078 target_free_device(g_lun0_dev);
1079 core_delete_hba(hba);
1083 * Common CDB parsing for kernel and user passthrough.
1086 passthrough_parse_cdb(struct se_cmd *cmd,
1087 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1089 unsigned char *cdb = cmd->t_task_cdb;
1090 struct se_device *dev = cmd->se_dev;
1094 * For REPORT LUNS we always need to emulate the response, for everything
1097 if (cdb[0] == REPORT_LUNS) {
1098 cmd->execute_cmd = spc_emulate_report_luns;
1099 return TCM_NO_SENSE;
1103 * With emulate_pr disabled, all reservation requests should fail,
1104 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1106 if (!dev->dev_attrib.emulate_pr &&
1107 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1108 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1109 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1110 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1111 return TCM_UNSUPPORTED_SCSI_OPCODE;
1115 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1116 * emulate the response, since tcmu does not have the information
1117 * required to process these commands.
1119 if (!(dev->transport_flags &
1120 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1121 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1122 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1123 size = get_unaligned_be16(&cdb[7]);
1124 return target_cmd_size_check(cmd, size);
1126 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1127 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1128 size = get_unaligned_be32(&cdb[5]);
1129 return target_cmd_size_check(cmd, size);
1132 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1133 cmd->execute_cmd = target_scsi2_reservation_release;
1134 if (cdb[0] == RELEASE_10)
1135 size = get_unaligned_be16(&cdb[7]);
1137 size = cmd->data_length;
1138 return target_cmd_size_check(cmd, size);
1140 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1141 cmd->execute_cmd = target_scsi2_reservation_reserve;
1142 if (cdb[0] == RESERVE_10)
1143 size = get_unaligned_be16(&cdb[7]);
1145 size = cmd->data_length;
1146 return target_cmd_size_check(cmd, size);
1150 /* Set DATA_CDB flag for ops that should have it */
1161 case WRITE_VERIFY_12:
1162 case WRITE_VERIFY_16:
1163 case COMPARE_AND_WRITE:
1164 case XDWRITEREAD_10:
1165 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1167 case VARIABLE_LENGTH_CMD:
1168 switch (get_unaligned_be16(&cdb[8])) {
1171 case WRITE_VERIFY_32:
1172 case XDWRITEREAD_32:
1173 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1178 cmd->execute_cmd = exec_cmd;
1180 return TCM_NO_SENSE;
1182 EXPORT_SYMBOL(passthrough_parse_cdb);