1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_hba.c
5 * This file contains the TCM HBA Transport related functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
11 ******************************************************************************/
13 #include <linux/net.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
19 #include <linux/module.h>
23 #include <target/target_core_base.h>
24 #include <target/target_core_backend.h>
25 #include <target/target_core_fabric.h>
27 #include "target_core_internal.h"
29 static LIST_HEAD(backend_list);
30 static DEFINE_MUTEX(backend_mutex);
32 static u32 hba_id_counter;
34 static DEFINE_SPINLOCK(hba_lock);
35 static LIST_HEAD(hba_list);
38 int transport_backend_register(const struct target_backend_ops *ops)
40 struct target_backend *tb, *old;
42 tb = kzalloc(sizeof(*tb), GFP_KERNEL);
47 mutex_lock(&backend_mutex);
48 list_for_each_entry(old, &backend_list, list) {
49 if (!strcmp(old->ops->name, ops->name)) {
50 pr_err("backend %s already registered.\n", ops->name);
51 mutex_unlock(&backend_mutex);
56 target_setup_backend_cits(tb);
57 list_add_tail(&tb->list, &backend_list);
58 mutex_unlock(&backend_mutex);
60 pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
61 ops->name, ops->owner);
64 EXPORT_SYMBOL(transport_backend_register);
66 void target_backend_unregister(const struct target_backend_ops *ops)
68 struct target_backend *tb;
70 mutex_lock(&backend_mutex);
71 list_for_each_entry(tb, &backend_list, list) {
74 mutex_unlock(&backend_mutex);
76 * Wait for any outstanding backend driver ->rcu_head
77 * callbacks to complete post TBO->free_device() ->
78 * call_rcu(), before allowing backend driver module
79 * unload of target_backend_ops->owner to proceed.
86 mutex_unlock(&backend_mutex);
88 EXPORT_SYMBOL(target_backend_unregister);
90 static struct target_backend *core_get_backend(const char *name)
92 struct target_backend *tb;
94 mutex_lock(&backend_mutex);
95 list_for_each_entry(tb, &backend_list, list) {
96 if (!strcmp(tb->ops->name, name))
99 mutex_unlock(&backend_mutex);
102 if (tb->ops->owner && !try_module_get(tb->ops->owner))
104 mutex_unlock(&backend_mutex);
109 core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
114 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
116 pr_err("Unable to allocate struct se_hba\n");
117 return ERR_PTR(-ENOMEM);
120 spin_lock_init(&hba->device_lock);
121 mutex_init(&hba->hba_access_mutex);
123 hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
124 hba->hba_flags |= hba_flags;
126 hba->backend = core_get_backend(plugin_name);
132 ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
136 spin_lock(&hba_lock);
137 hba->hba_id = hba_id_counter++;
138 list_add_tail(&hba->hba_node, &hba_list);
139 spin_unlock(&hba_lock);
141 pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
142 " Core\n", hba->hba_id);
147 module_put(hba->backend->ops->owner);
155 core_delete_hba(struct se_hba *hba)
157 WARN_ON(hba->dev_count);
159 hba->backend->ops->detach_hba(hba);
161 spin_lock(&hba_lock);
162 list_del(&hba->hba_node);
163 spin_unlock(&hba_lock);
165 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
166 " Core\n", hba->hba_id);
168 module_put(hba->backend->ops->owner);
175 bool target_sense_desc_format(struct se_device *dev)
177 return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;