]>
Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | |
3 | * | |
e3d6f909 | 4 | * This file contains the TCM Virtual Device and Disk Transport |
c66ac9db NB |
5 | * agnostic related functions. |
6 | * | |
4c76251e | 7 | * (c) Copyright 2003-2013 Datera, Inc. |
c66ac9db NB |
8 | * |
9 | * Nicholas A. Bellinger <[email protected]> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License as published by | |
13 | * the Free Software Foundation; either version 2 of the License, or | |
14 | * (at your option) any later version. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
19 | * GNU General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License | |
22 | * along with this program; if not, write to the Free Software | |
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
24 | * | |
25 | ******************************************************************************/ | |
26 | ||
27 | #include <linux/net.h> | |
28 | #include <linux/string.h> | |
29 | #include <linux/delay.h> | |
30 | #include <linux/timer.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/spinlock.h> | |
c66ac9db NB |
33 | #include <linux/kthread.h> |
34 | #include <linux/in.h> | |
c53181af | 35 | #include <linux/export.h> |
c66ac9db NB |
36 | #include <net/sock.h> |
37 | #include <net/tcp.h> | |
38 | #include <scsi/scsi.h> | |
1078da16 | 39 | #include <scsi/scsi_device.h> |
c66ac9db NB |
40 | |
41 | #include <target/target_core_base.h> | |
c4795fb2 CH |
42 | #include <target/target_core_backend.h> |
43 | #include <target/target_core_fabric.h> | |
c66ac9db | 44 | |
e26d99ae | 45 | #include "target_core_internal.h" |
c66ac9db | 46 | #include "target_core_alua.h" |
c66ac9db NB |
47 | #include "target_core_pr.h" |
48 | #include "target_core_ua.h" | |
49 | ||
d9ea32bf NB |
50 | DEFINE_MUTEX(g_device_mutex); |
51 | LIST_HEAD(g_device_list); | |
52 | ||
e3d6f909 | 53 | static struct se_hba *lun0_hba; |
e3d6f909 AG |
54 | /* not static, needed by tpg.c */ |
55 | struct se_device *g_lun0_dev; | |
56 | ||
de103c93 CH |
57 | sense_reason_t |
58 | transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) | |
c66ac9db | 59 | { |
c66ac9db | 60 | struct se_lun *se_lun = NULL; |
e3d6f909 | 61 | struct se_session *se_sess = se_cmd->se_sess; |
5951146d | 62 | struct se_device *dev; |
c66ac9db | 63 | unsigned long flags; |
c66ac9db | 64 | |
de103c93 CH |
65 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) |
66 | return TCM_NON_EXISTENT_LUN; | |
d8144955 | 67 | |
78faae37 | 68 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); |
f2083241 | 69 | se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; |
5951146d AG |
70 | if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
71 | struct se_dev_entry *deve = se_cmd->se_deve; | |
72 | ||
73 | deve->total_cmds++; | |
5951146d AG |
74 | |
75 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && | |
76 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { | |
6708bb27 | 77 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" |
5951146d AG |
78 | " Access for 0x%08x\n", |
79 | se_cmd->se_tfo->get_fabric_name(), | |
80 | unpacked_lun); | |
78faae37 | 81 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); |
de103c93 | 82 | return TCM_WRITE_PROTECTED; |
c66ac9db | 83 | } |
5951146d AG |
84 | |
85 | if (se_cmd->data_direction == DMA_TO_DEVICE) | |
86 | deve->write_bytes += se_cmd->data_length; | |
87 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | |
88 | deve->read_bytes += se_cmd->data_length; | |
89 | ||
5951146d AG |
90 | se_lun = deve->se_lun; |
91 | se_cmd->se_lun = deve->se_lun; | |
c66ac9db NB |
92 | se_cmd->pr_res_key = deve->pr_res_key; |
93 | se_cmd->orig_fe_lun = unpacked_lun; | |
c66ac9db | 94 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
5277797d NB |
95 | |
96 | percpu_ref_get(&se_lun->lun_ref); | |
97 | se_cmd->lun_ref_active = true; | |
c66ac9db | 98 | } |
78faae37 | 99 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); |
c66ac9db NB |
100 | |
101 | if (!se_lun) { | |
5951146d AG |
102 | /* |
103 | * Use the se_portal_group->tpg_virt_lun0 to allow for | |
104 | * REPORT_LUNS, et al to be returned when no active | |
105 | * MappedLUN=0 exists for this Initiator Port. | |
106 | */ | |
107 | if (unpacked_lun != 0) { | |
6708bb27 | 108 | pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
c66ac9db | 109 | " Access for 0x%08x\n", |
e3d6f909 | 110 | se_cmd->se_tfo->get_fabric_name(), |
c66ac9db | 111 | unpacked_lun); |
de103c93 | 112 | return TCM_NON_EXISTENT_LUN; |
5951146d AG |
113 | } |
114 | /* | |
115 | * Force WRITE PROTECT for virtual LUN 0 | |
116 | */ | |
117 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | |
de103c93 CH |
118 | (se_cmd->data_direction != DMA_NONE)) |
119 | return TCM_WRITE_PROTECTED; | |
5951146d AG |
120 | |
121 | se_lun = &se_sess->se_tpg->tpg_virt_lun0; | |
122 | se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | |
123 | se_cmd->orig_fe_lun = 0; | |
5951146d | 124 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
5277797d NB |
125 | |
126 | percpu_ref_get(&se_lun->lun_ref); | |
127 | se_cmd->lun_ref_active = true; | |
c66ac9db | 128 | } |
c66ac9db | 129 | |
5951146d AG |
130 | /* Directly associate cmd with se_dev */ |
131 | se_cmd->se_dev = se_lun->lun_se_dev; | |
132 | ||
5951146d | 133 | dev = se_lun->lun_se_dev; |
ee480683 | 134 | atomic_long_inc(&dev->num_cmds); |
c66ac9db | 135 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
ee480683 | 136 | atomic_long_add(se_cmd->data_length, &dev->write_bytes); |
c66ac9db | 137 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
ee480683 | 138 | atomic_long_add(se_cmd->data_length, &dev->read_bytes); |
c66ac9db | 139 | |
c66ac9db NB |
140 | return 0; |
141 | } | |
5951146d | 142 | EXPORT_SYMBOL(transport_lookup_cmd_lun); |
c66ac9db | 143 | |
5951146d | 144 | int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) |
c66ac9db | 145 | { |
c66ac9db NB |
146 | struct se_dev_entry *deve; |
147 | struct se_lun *se_lun = NULL; | |
e3d6f909 | 148 | struct se_session *se_sess = se_cmd->se_sess; |
c66ac9db | 149 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
5e1be919 | 150 | unsigned long flags; |
c66ac9db | 151 | |
de103c93 | 152 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) |
e3d6f909 | 153 | return -ENODEV; |
d8144955 | 154 | |
5e1be919 | 155 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); |
f2083241 | 156 | se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; |
5951146d AG |
157 | deve = se_cmd->se_deve; |
158 | ||
c66ac9db | 159 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
5951146d AG |
160 | se_tmr->tmr_lun = deve->se_lun; |
161 | se_cmd->se_lun = deve->se_lun; | |
162 | se_lun = deve->se_lun; | |
c66ac9db NB |
163 | se_cmd->pr_res_key = deve->pr_res_key; |
164 | se_cmd->orig_fe_lun = unpacked_lun; | |
c66ac9db | 165 | } |
5e1be919 | 166 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); |
c66ac9db NB |
167 | |
168 | if (!se_lun) { | |
6708bb27 | 169 | pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
c66ac9db | 170 | " Access for 0x%08x\n", |
e3d6f909 | 171 | se_cmd->se_tfo->get_fabric_name(), |
c66ac9db | 172 | unpacked_lun); |
e3d6f909 | 173 | return -ENODEV; |
c66ac9db | 174 | } |
c66ac9db | 175 | |
5951146d AG |
176 | /* Directly associate cmd with se_dev */ |
177 | se_cmd->se_dev = se_lun->lun_se_dev; | |
178 | se_tmr->tmr_dev = se_lun->lun_se_dev; | |
179 | ||
5e1be919 | 180 | spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); |
5951146d | 181 | list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); |
5e1be919 | 182 | spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); |
c66ac9db NB |
183 | |
184 | return 0; | |
185 | } | |
5951146d | 186 | EXPORT_SYMBOL(transport_lookup_tmr_lun); |
c66ac9db NB |
187 | |
188 | /* | |
189 | * This function is called from core_scsi3_emulate_pro_register_and_move() | |
190 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count | |
191 | * when a matching rtpi is found. | |
192 | */ | |
193 | struct se_dev_entry *core_get_se_deve_from_rtpi( | |
194 | struct se_node_acl *nacl, | |
195 | u16 rtpi) | |
196 | { | |
197 | struct se_dev_entry *deve; | |
198 | struct se_lun *lun; | |
199 | struct se_port *port; | |
200 | struct se_portal_group *tpg = nacl->se_tpg; | |
201 | u32 i; | |
202 | ||
203 | spin_lock_irq(&nacl->device_list_lock); | |
204 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
f2083241 | 205 | deve = nacl->device_list[i]; |
c66ac9db NB |
206 | |
207 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
208 | continue; | |
209 | ||
210 | lun = deve->se_lun; | |
6708bb27 AG |
211 | if (!lun) { |
212 | pr_err("%s device entries device pointer is" | |
c66ac9db | 213 | " NULL, but Initiator has access.\n", |
e3d6f909 | 214 | tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
215 | continue; |
216 | } | |
217 | port = lun->lun_sep; | |
6708bb27 AG |
218 | if (!port) { |
219 | pr_err("%s device entries device pointer is" | |
c66ac9db | 220 | " NULL, but Initiator has access.\n", |
e3d6f909 | 221 | tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
222 | continue; |
223 | } | |
224 | if (port->sep_rtpi != rtpi) | |
225 | continue; | |
226 | ||
227 | atomic_inc(&deve->pr_ref_count); | |
228 | smp_mb__after_atomic_inc(); | |
229 | spin_unlock_irq(&nacl->device_list_lock); | |
230 | ||
231 | return deve; | |
232 | } | |
233 | spin_unlock_irq(&nacl->device_list_lock); | |
234 | ||
235 | return NULL; | |
236 | } | |
237 | ||
238 | int core_free_device_list_for_node( | |
239 | struct se_node_acl *nacl, | |
240 | struct se_portal_group *tpg) | |
241 | { | |
242 | struct se_dev_entry *deve; | |
243 | struct se_lun *lun; | |
244 | u32 i; | |
245 | ||
246 | if (!nacl->device_list) | |
247 | return 0; | |
248 | ||
249 | spin_lock_irq(&nacl->device_list_lock); | |
250 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
f2083241 | 251 | deve = nacl->device_list[i]; |
c66ac9db NB |
252 | |
253 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
254 | continue; | |
255 | ||
256 | if (!deve->se_lun) { | |
6708bb27 | 257 | pr_err("%s device entries device pointer is" |
c66ac9db | 258 | " NULL, but Initiator has access.\n", |
e3d6f909 | 259 | tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
260 | continue; |
261 | } | |
262 | lun = deve->se_lun; | |
263 | ||
264 | spin_unlock_irq(&nacl->device_list_lock); | |
e80ac6c4 AG |
265 | core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, |
266 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); | |
c66ac9db NB |
267 | spin_lock_irq(&nacl->device_list_lock); |
268 | } | |
269 | spin_unlock_irq(&nacl->device_list_lock); | |
270 | ||
f2083241 | 271 | array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG); |
c66ac9db NB |
272 | nacl->device_list = NULL; |
273 | ||
274 | return 0; | |
275 | } | |
276 | ||
c66ac9db NB |
277 | void core_update_device_list_access( |
278 | u32 mapped_lun, | |
279 | u32 lun_access, | |
280 | struct se_node_acl *nacl) | |
281 | { | |
282 | struct se_dev_entry *deve; | |
283 | ||
284 | spin_lock_irq(&nacl->device_list_lock); | |
f2083241 | 285 | deve = nacl->device_list[mapped_lun]; |
c66ac9db NB |
286 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
287 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
288 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
289 | } else { | |
290 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
291 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
292 | } | |
293 | spin_unlock_irq(&nacl->device_list_lock); | |
c66ac9db NB |
294 | } |
295 | ||
e80ac6c4 | 296 | /* core_enable_device_list_for_node(): |
c66ac9db NB |
297 | * |
298 | * | |
299 | */ | |
e80ac6c4 | 300 | int core_enable_device_list_for_node( |
c66ac9db NB |
301 | struct se_lun *lun, |
302 | struct se_lun_acl *lun_acl, | |
303 | u32 mapped_lun, | |
304 | u32 lun_access, | |
305 | struct se_node_acl *nacl, | |
e80ac6c4 | 306 | struct se_portal_group *tpg) |
c66ac9db NB |
307 | { |
308 | struct se_port *port = lun->lun_sep; | |
e80ac6c4 | 309 | struct se_dev_entry *deve; |
c66ac9db NB |
310 | |
311 | spin_lock_irq(&nacl->device_list_lock); | |
e80ac6c4 AG |
312 | |
313 | deve = nacl->device_list[mapped_lun]; | |
314 | ||
315 | /* | |
125d0119 | 316 | * Check if the call is handling demo mode -> explicit LUN ACL |
e80ac6c4 AG |
317 | * transition. This transition must be for the same struct se_lun |
318 | * + mapped_lun that was setup in demo mode.. | |
319 | */ | |
320 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
321 | if (deve->se_lun_acl != NULL) { | |
322 | pr_err("struct se_dev_entry->se_lun_acl" | |
125d0119 | 323 | " already set for demo mode -> explicit" |
e80ac6c4 AG |
324 | " LUN ACL transition\n"); |
325 | spin_unlock_irq(&nacl->device_list_lock); | |
326 | return -EINVAL; | |
327 | } | |
328 | if (deve->se_lun != lun) { | |
329 | pr_err("struct se_dev_entry->se_lun does" | |
330 | " match passed struct se_lun for demo mode" | |
125d0119 | 331 | " -> explicit LUN ACL transition\n"); |
e80ac6c4 AG |
332 | spin_unlock_irq(&nacl->device_list_lock); |
333 | return -EINVAL; | |
c66ac9db | 334 | } |
e80ac6c4 | 335 | deve->se_lun_acl = lun_acl; |
c66ac9db NB |
336 | |
337 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | |
338 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
339 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
340 | } else { | |
341 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
342 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
343 | } | |
344 | ||
c66ac9db | 345 | spin_unlock_irq(&nacl->device_list_lock); |
e80ac6c4 AG |
346 | return 0; |
347 | } | |
c66ac9db | 348 | |
e80ac6c4 AG |
349 | deve->se_lun = lun; |
350 | deve->se_lun_acl = lun_acl; | |
351 | deve->mapped_lun = mapped_lun; | |
352 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | |
c66ac9db | 353 | |
e80ac6c4 AG |
354 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
355 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
356 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
357 | } else { | |
358 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
359 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
c66ac9db | 360 | } |
e80ac6c4 AG |
361 | |
362 | deve->creation_time = get_jiffies_64(); | |
363 | deve->attach_count++; | |
364 | spin_unlock_irq(&nacl->device_list_lock); | |
365 | ||
366 | spin_lock_bh(&port->sep_alua_lock); | |
367 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | |
368 | spin_unlock_bh(&port->sep_alua_lock); | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
373 | /* core_disable_device_list_for_node(): | |
374 | * | |
375 | * | |
376 | */ | |
377 | int core_disable_device_list_for_node( | |
378 | struct se_lun *lun, | |
379 | struct se_lun_acl *lun_acl, | |
380 | u32 mapped_lun, | |
381 | u32 lun_access, | |
382 | struct se_node_acl *nacl, | |
383 | struct se_portal_group *tpg) | |
384 | { | |
385 | struct se_port *port = lun->lun_sep; | |
77d4c745 | 386 | struct se_dev_entry *deve = nacl->device_list[mapped_lun]; |
e80ac6c4 AG |
387 | |
388 | /* | |
389 | * If the MappedLUN entry is being disabled, the entry in | |
390 | * port->sep_alua_list must be removed now before clearing the | |
391 | * struct se_dev_entry pointers below as logic in | |
392 | * core_alua_do_transition_tg_pt() depends on these being present. | |
393 | * | |
394 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | |
395 | * that have not been explicitly converted to MappedLUNs -> | |
396 | * struct se_lun_acl, but we remove deve->alua_port_list from | |
397 | * port->sep_alua_list. This also means that active UAs and | |
398 | * NodeACL context specific PR metadata for demo-mode | |
399 | * MappedLUN *deve will be released below.. | |
400 | */ | |
401 | spin_lock_bh(&port->sep_alua_lock); | |
402 | list_del(&deve->alua_port_list); | |
403 | spin_unlock_bh(&port->sep_alua_lock); | |
c66ac9db NB |
404 | /* |
405 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | |
406 | * PR operation to complete. | |
407 | */ | |
c66ac9db NB |
408 | while (atomic_read(&deve->pr_ref_count) != 0) |
409 | cpu_relax(); | |
77d4c745 | 410 | |
c66ac9db NB |
411 | spin_lock_irq(&nacl->device_list_lock); |
412 | /* | |
413 | * Disable struct se_dev_entry LUN ACL mapping | |
414 | */ | |
415 | core_scsi3_ua_release_all(deve); | |
416 | deve->se_lun = NULL; | |
417 | deve->se_lun_acl = NULL; | |
418 | deve->lun_flags = 0; | |
419 | deve->creation_time = 0; | |
420 | deve->attach_count--; | |
421 | spin_unlock_irq(&nacl->device_list_lock); | |
422 | ||
423 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); | |
424 | return 0; | |
425 | } | |
426 | ||
427 | /* core_clear_lun_from_tpg(): | |
428 | * | |
429 | * | |
430 | */ | |
431 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |
432 | { | |
433 | struct se_node_acl *nacl; | |
434 | struct se_dev_entry *deve; | |
435 | u32 i; | |
436 | ||
28638887 | 437 | spin_lock_irq(&tpg->acl_node_lock); |
c66ac9db | 438 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
28638887 | 439 | spin_unlock_irq(&tpg->acl_node_lock); |
c66ac9db NB |
440 | |
441 | spin_lock_irq(&nacl->device_list_lock); | |
442 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
f2083241 | 443 | deve = nacl->device_list[i]; |
c66ac9db NB |
444 | if (lun != deve->se_lun) |
445 | continue; | |
446 | spin_unlock_irq(&nacl->device_list_lock); | |
447 | ||
e80ac6c4 | 448 | core_disable_device_list_for_node(lun, NULL, |
c66ac9db | 449 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, |
e80ac6c4 | 450 | nacl, tpg); |
c66ac9db NB |
451 | |
452 | spin_lock_irq(&nacl->device_list_lock); | |
453 | } | |
454 | spin_unlock_irq(&nacl->device_list_lock); | |
455 | ||
28638887 | 456 | spin_lock_irq(&tpg->acl_node_lock); |
c66ac9db | 457 | } |
28638887 | 458 | spin_unlock_irq(&tpg->acl_node_lock); |
c66ac9db NB |
459 | } |
460 | ||
461 | static struct se_port *core_alloc_port(struct se_device *dev) | |
462 | { | |
463 | struct se_port *port, *port_tmp; | |
464 | ||
465 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | |
6708bb27 AG |
466 | if (!port) { |
467 | pr_err("Unable to allocate struct se_port\n"); | |
e3d6f909 | 468 | return ERR_PTR(-ENOMEM); |
c66ac9db NB |
469 | } |
470 | INIT_LIST_HEAD(&port->sep_alua_list); | |
471 | INIT_LIST_HEAD(&port->sep_list); | |
472 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | |
473 | spin_lock_init(&port->sep_alua_lock); | |
474 | mutex_init(&port->sep_tg_pt_md_mutex); | |
475 | ||
476 | spin_lock(&dev->se_port_lock); | |
477 | if (dev->dev_port_count == 0x0000ffff) { | |
6708bb27 | 478 | pr_warn("Reached dev->dev_port_count ==" |
c66ac9db NB |
479 | " 0x0000ffff\n"); |
480 | spin_unlock(&dev->se_port_lock); | |
e3d6f909 | 481 | return ERR_PTR(-ENOSPC); |
c66ac9db NB |
482 | } |
483 | again: | |
484 | /* | |
35d1efe8 | 485 | * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device |
c66ac9db NB |
486 | * Here is the table from spc4r17 section 7.7.3.8. |
487 | * | |
488 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field | |
489 | * | |
490 | * Code Description | |
491 | * 0h Reserved | |
492 | * 1h Relative port 1, historically known as port A | |
493 | * 2h Relative port 2, historically known as port B | |
494 | * 3h to FFFFh Relative port 3 through 65 535 | |
495 | */ | |
496 | port->sep_rtpi = dev->dev_rpti_counter++; | |
6708bb27 | 497 | if (!port->sep_rtpi) |
c66ac9db NB |
498 | goto again; |
499 | ||
500 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | |
501 | /* | |
35d1efe8 | 502 | * Make sure RELATIVE TARGET PORT IDENTIFIER is unique |
c66ac9db NB |
503 | * for 16-bit wrap.. |
504 | */ | |
505 | if (port->sep_rtpi == port_tmp->sep_rtpi) | |
506 | goto again; | |
507 | } | |
508 | spin_unlock(&dev->se_port_lock); | |
509 | ||
510 | return port; | |
511 | } | |
512 | ||
513 | static void core_export_port( | |
514 | struct se_device *dev, | |
515 | struct se_portal_group *tpg, | |
516 | struct se_port *port, | |
517 | struct se_lun *lun) | |
518 | { | |
c66ac9db NB |
519 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; |
520 | ||
521 | spin_lock(&dev->se_port_lock); | |
522 | spin_lock(&lun->lun_sep_lock); | |
523 | port->sep_tpg = tpg; | |
524 | port->sep_lun = lun; | |
525 | lun->lun_sep = port; | |
526 | spin_unlock(&lun->lun_sep_lock); | |
527 | ||
528 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | |
529 | spin_unlock(&dev->se_port_lock); | |
530 | ||
c87fbd56 CH |
531 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && |
532 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { | |
c66ac9db NB |
533 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); |
534 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | |
6708bb27 | 535 | pr_err("Unable to allocate t10_alua_tg_pt" |
c66ac9db NB |
536 | "_gp_member_t\n"); |
537 | return; | |
538 | } | |
539 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | |
540 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | |
0fd97ccf | 541 | dev->t10_alua.default_tg_pt_gp); |
c66ac9db | 542 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
6708bb27 | 543 | pr_debug("%s/%s: Adding to default ALUA Target Port" |
c66ac9db | 544 | " Group: alua/default_tg_pt_gp\n", |
e3d6f909 | 545 | dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
546 | } |
547 | ||
548 | dev->dev_port_count++; | |
35d1efe8 | 549 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */ |
c66ac9db NB |
550 | } |
551 | ||
552 | /* | |
553 | * Called with struct se_device->se_port_lock spinlock held. | |
554 | */ | |
555 | static void core_release_port(struct se_device *dev, struct se_port *port) | |
5dd7ed2e | 556 | __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) |
c66ac9db NB |
557 | { |
558 | /* | |
559 | * Wait for any port reference for PR ALL_TG_PT=1 operation | |
560 | * to complete in __core_scsi3_alloc_registration() | |
561 | */ | |
562 | spin_unlock(&dev->se_port_lock); | |
563 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) | |
564 | cpu_relax(); | |
565 | spin_lock(&dev->se_port_lock); | |
566 | ||
567 | core_alua_free_tg_pt_gp_mem(port); | |
568 | ||
569 | list_del(&port->sep_list); | |
570 | dev->dev_port_count--; | |
571 | kfree(port); | |
c66ac9db NB |
572 | } |
573 | ||
574 | int core_dev_export( | |
575 | struct se_device *dev, | |
576 | struct se_portal_group *tpg, | |
577 | struct se_lun *lun) | |
578 | { | |
0fd97ccf | 579 | struct se_hba *hba = dev->se_hba; |
c66ac9db NB |
580 | struct se_port *port; |
581 | ||
582 | port = core_alloc_port(dev); | |
e3d6f909 AG |
583 | if (IS_ERR(port)) |
584 | return PTR_ERR(port); | |
c66ac9db NB |
585 | |
586 | lun->lun_se_dev = dev; | |
c66ac9db | 587 | |
0fd97ccf CH |
588 | spin_lock(&hba->device_lock); |
589 | dev->export_count++; | |
590 | spin_unlock(&hba->device_lock); | |
591 | ||
c66ac9db NB |
592 | core_export_port(dev, tpg, port, lun); |
593 | return 0; | |
594 | } | |
595 | ||
596 | void core_dev_unexport( | |
597 | struct se_device *dev, | |
598 | struct se_portal_group *tpg, | |
599 | struct se_lun *lun) | |
600 | { | |
0fd97ccf | 601 | struct se_hba *hba = dev->se_hba; |
c66ac9db NB |
602 | struct se_port *port = lun->lun_sep; |
603 | ||
604 | spin_lock(&lun->lun_sep_lock); | |
605 | if (lun->lun_se_dev == NULL) { | |
606 | spin_unlock(&lun->lun_sep_lock); | |
607 | return; | |
608 | } | |
609 | spin_unlock(&lun->lun_sep_lock); | |
610 | ||
611 | spin_lock(&dev->se_port_lock); | |
c66ac9db NB |
612 | core_release_port(dev, port); |
613 | spin_unlock(&dev->se_port_lock); | |
614 | ||
0fd97ccf CH |
615 | spin_lock(&hba->device_lock); |
616 | dev->export_count--; | |
617 | spin_unlock(&hba->device_lock); | |
618 | ||
c66ac9db NB |
619 | lun->lun_se_dev = NULL; |
620 | } | |
621 | ||
0fd97ccf | 622 | static void se_release_vpd_for_dev(struct se_device *dev) |
c66ac9db NB |
623 | { |
624 | struct t10_vpd *vpd, *vpd_tmp; | |
625 | ||
0fd97ccf | 626 | spin_lock(&dev->t10_wwn.t10_vpd_lock); |
c66ac9db | 627 | list_for_each_entry_safe(vpd, vpd_tmp, |
0fd97ccf | 628 | &dev->t10_wwn.t10_vpd_list, vpd_list) { |
c66ac9db NB |
629 | list_del(&vpd->vpd_list); |
630 | kfree(vpd); | |
631 | } | |
0fd97ccf | 632 | spin_unlock(&dev->t10_wwn.t10_vpd_lock); |
c66ac9db NB |
633 | } |
634 | ||
c8045372 | 635 | static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) |
525a48a2 | 636 | { |
3e03989b RD |
637 | u32 aligned_max_sectors; |
638 | u32 alignment; | |
525a48a2 NB |
639 | /* |
640 | * Limit max_sectors to a PAGE_SIZE aligned value for modern | |
641 | * transport_allocate_data_tasks() operation. | |
642 | */ | |
3e03989b RD |
643 | alignment = max(1ul, PAGE_SIZE / block_size); |
644 | aligned_max_sectors = rounddown(max_sectors, alignment); | |
645 | ||
646 | if (max_sectors != aligned_max_sectors) | |
647 | pr_info("Rounding down aligned max_sectors from %u to %u\n", | |
648 | max_sectors, aligned_max_sectors); | |
525a48a2 | 649 | |
3e03989b | 650 | return aligned_max_sectors; |
525a48a2 NB |
651 | } |
652 | ||
c66ac9db NB |
653 | int se_dev_set_max_unmap_lba_count( |
654 | struct se_device *dev, | |
655 | u32 max_unmap_lba_count) | |
656 | { | |
0fd97ccf | 657 | dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count; |
6708bb27 | 658 | pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", |
0fd97ccf | 659 | dev, dev->dev_attrib.max_unmap_lba_count); |
c66ac9db NB |
660 | return 0; |
661 | } | |
662 | ||
663 | int se_dev_set_max_unmap_block_desc_count( | |
664 | struct se_device *dev, | |
665 | u32 max_unmap_block_desc_count) | |
666 | { | |
0fd97ccf | 667 | dev->dev_attrib.max_unmap_block_desc_count = |
e3d6f909 | 668 | max_unmap_block_desc_count; |
6708bb27 | 669 | pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", |
0fd97ccf | 670 | dev, dev->dev_attrib.max_unmap_block_desc_count); |
c66ac9db NB |
671 | return 0; |
672 | } | |
673 | ||
674 | int se_dev_set_unmap_granularity( | |
675 | struct se_device *dev, | |
676 | u32 unmap_granularity) | |
677 | { | |
0fd97ccf | 678 | dev->dev_attrib.unmap_granularity = unmap_granularity; |
6708bb27 | 679 | pr_debug("dev[%p]: Set unmap_granularity: %u\n", |
0fd97ccf | 680 | dev, dev->dev_attrib.unmap_granularity); |
c66ac9db NB |
681 | return 0; |
682 | } | |
683 | ||
684 | int se_dev_set_unmap_granularity_alignment( | |
685 | struct se_device *dev, | |
686 | u32 unmap_granularity_alignment) | |
687 | { | |
0fd97ccf | 688 | dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; |
6708bb27 | 689 | pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", |
0fd97ccf | 690 | dev, dev->dev_attrib.unmap_granularity_alignment); |
c66ac9db NB |
691 | return 0; |
692 | } | |
693 | ||
773cbaf7 NB |
694 | int se_dev_set_max_write_same_len( |
695 | struct se_device *dev, | |
696 | u32 max_write_same_len) | |
697 | { | |
698 | dev->dev_attrib.max_write_same_len = max_write_same_len; | |
699 | pr_debug("dev[%p]: Set max_write_same_len: %u\n", | |
700 | dev, dev->dev_attrib.max_write_same_len); | |
701 | return 0; | |
702 | } | |
703 | ||
adfa9570 TB |
704 | static void dev_set_t10_wwn_model_alias(struct se_device *dev) |
705 | { | |
706 | const char *configname; | |
707 | ||
708 | configname = config_item_name(&dev->dev_group.cg_item); | |
709 | if (strlen(configname) >= 16) { | |
710 | pr_warn("dev[%p]: Backstore name '%s' is too long for " | |
711 | "INQUIRY_MODEL, truncating to 16 bytes\n", dev, | |
712 | configname); | |
713 | } | |
714 | snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); | |
715 | } | |
716 | ||
717 | int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) | |
718 | { | |
719 | if (dev->export_count) { | |
720 | pr_err("dev[%p]: Unable to change model alias" | |
721 | " while export_count is %d\n", | |
722 | dev, dev->export_count); | |
723 | return -EINVAL; | |
724 | } | |
725 | ||
726 | if (flag != 0 && flag != 1) { | |
727 | pr_err("Illegal value %d\n", flag); | |
728 | return -EINVAL; | |
729 | } | |
730 | ||
731 | if (flag) { | |
732 | dev_set_t10_wwn_model_alias(dev); | |
733 | } else { | |
734 | strncpy(&dev->t10_wwn.model[0], | |
735 | dev->transport->inquiry_prod, 16); | |
736 | } | |
737 | dev->dev_attrib.emulate_model_alias = flag; | |
738 | ||
739 | return 0; | |
740 | } | |
741 | ||
c66ac9db NB |
742 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) |
743 | { | |
f55918fa | 744 | if (flag != 0 && flag != 1) { |
6708bb27 | 745 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 746 | return -EINVAL; |
c66ac9db | 747 | } |
f55918fa | 748 | |
c638830d AG |
749 | if (flag) { |
750 | pr_err("dpo_emulated not supported\n"); | |
751 | return -EINVAL; | |
752 | } | |
753 | ||
754 | return 0; | |
c66ac9db NB |
755 | } |
756 | ||
757 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |
758 | { | |
f55918fa | 759 | if (flag != 0 && flag != 1) { |
6708bb27 | 760 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 761 | return -EINVAL; |
c66ac9db | 762 | } |
f55918fa | 763 | |
fd30e931 NB |
764 | if (flag && |
765 | dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
766 | pr_err("emulate_fua_write not supported for pSCSI\n"); | |
e3d6f909 | 767 | return -EINVAL; |
c66ac9db | 768 | } |
0fd97ccf | 769 | dev->dev_attrib.emulate_fua_write = flag; |
6708bb27 | 770 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
0fd97ccf | 771 | dev, dev->dev_attrib.emulate_fua_write); |
c66ac9db NB |
772 | return 0; |
773 | } | |
774 | ||
775 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | |
776 | { | |
f55918fa | 777 | if (flag != 0 && flag != 1) { |
6708bb27 | 778 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 779 | return -EINVAL; |
c66ac9db | 780 | } |
f55918fa | 781 | |
c638830d AG |
782 | if (flag) { |
783 | pr_err("ua read emulated not supported\n"); | |
784 | return -EINVAL; | |
785 | } | |
786 | ||
787 | return 0; | |
c66ac9db NB |
788 | } |
789 | ||
790 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |
791 | { | |
f55918fa | 792 | if (flag != 0 && flag != 1) { |
6708bb27 | 793 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 794 | return -EINVAL; |
c66ac9db | 795 | } |
fd30e931 NB |
796 | if (flag && |
797 | dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
798 | pr_err("emulate_write_cache not supported for pSCSI\n"); | |
e3d6f909 | 799 | return -EINVAL; |
c66ac9db | 800 | } |
d0c8b259 NB |
801 | if (dev->transport->get_write_cache) { |
802 | pr_warn("emulate_write_cache cannot be changed when underlying" | |
803 | " HW reports WriteCacheEnabled, ignoring request\n"); | |
804 | return 0; | |
805 | } | |
806 | ||
0fd97ccf | 807 | dev->dev_attrib.emulate_write_cache = flag; |
6708bb27 | 808 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
0fd97ccf | 809 | dev, dev->dev_attrib.emulate_write_cache); |
c66ac9db NB |
810 | return 0; |
811 | } | |
812 | ||
813 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | |
814 | { | |
815 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | |
6708bb27 | 816 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 817 | return -EINVAL; |
c66ac9db NB |
818 | } |
819 | ||
0fd97ccf | 820 | if (dev->export_count) { |
6708bb27 | 821 | pr_err("dev[%p]: Unable to change SE Device" |
0fd97ccf CH |
822 | " UA_INTRLCK_CTRL while export_count is %d\n", |
823 | dev, dev->export_count); | |
e3d6f909 | 824 | return -EINVAL; |
c66ac9db | 825 | } |
0fd97ccf | 826 | dev->dev_attrib.emulate_ua_intlck_ctrl = flag; |
6708bb27 | 827 | pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", |
0fd97ccf | 828 | dev, dev->dev_attrib.emulate_ua_intlck_ctrl); |
c66ac9db NB |
829 | |
830 | return 0; | |
831 | } | |
832 | ||
833 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | |
834 | { | |
835 | if ((flag != 0) && (flag != 1)) { | |
6708bb27 | 836 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 837 | return -EINVAL; |
c66ac9db NB |
838 | } |
839 | ||
0fd97ccf | 840 | if (dev->export_count) { |
6708bb27 | 841 | pr_err("dev[%p]: Unable to change SE Device TAS while" |
0fd97ccf CH |
842 | " export_count is %d\n", |
843 | dev, dev->export_count); | |
e3d6f909 | 844 | return -EINVAL; |
c66ac9db | 845 | } |
0fd97ccf | 846 | dev->dev_attrib.emulate_tas = flag; |
6708bb27 | 847 | pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", |
0fd97ccf | 848 | dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); |
c66ac9db NB |
849 | |
850 | return 0; | |
851 | } | |
852 | ||
853 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | |
854 | { | |
855 | if ((flag != 0) && (flag != 1)) { | |
6708bb27 | 856 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 857 | return -EINVAL; |
c66ac9db NB |
858 | } |
859 | /* | |
860 | * We expect this value to be non-zero when generic Block Layer | |
861 | * Discard supported is detected iblock_create_virtdevice(). | |
862 | */ | |
0fd97ccf | 863 | if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { |
6708bb27 | 864 | pr_err("Generic Block Discard not supported\n"); |
c66ac9db NB |
865 | return -ENOSYS; |
866 | } | |
867 | ||
0fd97ccf | 868 | dev->dev_attrib.emulate_tpu = flag; |
6708bb27 | 869 | pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", |
c66ac9db NB |
870 | dev, flag); |
871 | return 0; | |
872 | } | |
873 | ||
874 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | |
875 | { | |
876 | if ((flag != 0) && (flag != 1)) { | |
6708bb27 | 877 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 878 | return -EINVAL; |
c66ac9db NB |
879 | } |
880 | /* | |
881 | * We expect this value to be non-zero when generic Block Layer | |
882 | * Discard supported is detected iblock_create_virtdevice(). | |
883 | */ | |
0fd97ccf | 884 | if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { |
6708bb27 | 885 | pr_err("Generic Block Discard not supported\n"); |
c66ac9db NB |
886 | return -ENOSYS; |
887 | } | |
888 | ||
0fd97ccf | 889 | dev->dev_attrib.emulate_tpws = flag; |
6708bb27 | 890 | pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", |
c66ac9db NB |
891 | dev, flag); |
892 | return 0; | |
893 | } | |
894 | ||
0123a9ec NB |
895 | int se_dev_set_emulate_caw(struct se_device *dev, int flag) |
896 | { | |
897 | if (flag != 0 && flag != 1) { | |
898 | pr_err("Illegal value %d\n", flag); | |
899 | return -EINVAL; | |
900 | } | |
901 | dev->dev_attrib.emulate_caw = flag; | |
902 | pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n", | |
903 | dev, flag); | |
904 | ||
905 | return 0; | |
906 | } | |
907 | ||
d397a445 NB |
908 | int se_dev_set_emulate_3pc(struct se_device *dev, int flag) |
909 | { | |
910 | if (flag != 0 && flag != 1) { | |
911 | pr_err("Illegal value %d\n", flag); | |
912 | return -EINVAL; | |
913 | } | |
914 | dev->dev_attrib.emulate_3pc = flag; | |
915 | pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n", | |
916 | dev, flag); | |
917 | ||
918 | return 0; | |
919 | } | |
920 | ||
c66ac9db NB |
921 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) |
922 | { | |
923 | if ((flag != 0) && (flag != 1)) { | |
6708bb27 | 924 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 925 | return -EINVAL; |
c66ac9db | 926 | } |
0fd97ccf | 927 | dev->dev_attrib.enforce_pr_isids = flag; |
6708bb27 | 928 | pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, |
0fd97ccf | 929 | (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); |
c66ac9db NB |
930 | return 0; |
931 | } | |
932 | ||
e22a7f07 RD |
933 | int se_dev_set_is_nonrot(struct se_device *dev, int flag) |
934 | { | |
935 | if ((flag != 0) && (flag != 1)) { | |
936 | printk(KERN_ERR "Illegal value %d\n", flag); | |
937 | return -EINVAL; | |
938 | } | |
0fd97ccf | 939 | dev->dev_attrib.is_nonrot = flag; |
5de619a3 | 940 | pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", |
e22a7f07 RD |
941 | dev, flag); |
942 | return 0; | |
943 | } | |
944 | ||
5de619a3 NB |
945 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) |
946 | { | |
947 | if (flag != 0) { | |
948 | printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" | |
949 | " reordering not implemented\n", dev); | |
950 | return -ENOSYS; | |
951 | } | |
0fd97ccf | 952 | dev->dev_attrib.emulate_rest_reord = flag; |
5de619a3 NB |
953 | pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); |
954 | return 0; | |
955 | } | |
956 | ||
c66ac9db NB |
957 | /* |
958 | * Note, this can only be called on unexported SE Device Object. | |
959 | */ | |
960 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | |
961 | { | |
0fd97ccf | 962 | if (dev->export_count) { |
6708bb27 | 963 | pr_err("dev[%p]: Unable to change SE Device TCQ while" |
0fd97ccf CH |
964 | " export_count is %d\n", |
965 | dev, dev->export_count); | |
e3d6f909 | 966 | return -EINVAL; |
c66ac9db | 967 | } |
6708bb27 AG |
968 | if (!queue_depth) { |
969 | pr_err("dev[%p]: Illegal ZERO value for queue" | |
c66ac9db | 970 | "_depth\n", dev); |
e3d6f909 | 971 | return -EINVAL; |
c66ac9db NB |
972 | } |
973 | ||
e3d6f909 | 974 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
0fd97ccf | 975 | if (queue_depth > dev->dev_attrib.hw_queue_depth) { |
6708bb27 | 976 | pr_err("dev[%p]: Passed queue_depth: %u" |
c66ac9db NB |
977 | " exceeds TCM/SE_Device TCQ: %u\n", |
978 | dev, queue_depth, | |
0fd97ccf | 979 | dev->dev_attrib.hw_queue_depth); |
e3d6f909 | 980 | return -EINVAL; |
c66ac9db NB |
981 | } |
982 | } else { | |
0fd97ccf CH |
983 | if (queue_depth > dev->dev_attrib.queue_depth) { |
984 | if (queue_depth > dev->dev_attrib.hw_queue_depth) { | |
6708bb27 | 985 | pr_err("dev[%p]: Passed queue_depth:" |
c66ac9db NB |
986 | " %u exceeds TCM/SE_Device MAX" |
987 | " TCQ: %u\n", dev, queue_depth, | |
0fd97ccf | 988 | dev->dev_attrib.hw_queue_depth); |
e3d6f909 | 989 | return -EINVAL; |
c66ac9db NB |
990 | } |
991 | } | |
992 | } | |
993 | ||
0fd97ccf | 994 | dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; |
6708bb27 | 995 | pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", |
c66ac9db NB |
996 | dev, queue_depth); |
997 | return 0; | |
998 | } | |
999 | ||
015487b8 RD |
1000 | int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) |
1001 | { | |
7a3cf6ca NB |
1002 | int block_size = dev->dev_attrib.block_size; |
1003 | ||
0fd97ccf | 1004 | if (dev->export_count) { |
015487b8 | 1005 | pr_err("dev[%p]: Unable to change SE Device" |
0fd97ccf CH |
1006 | " fabric_max_sectors while export_count is %d\n", |
1007 | dev, dev->export_count); | |
015487b8 RD |
1008 | return -EINVAL; |
1009 | } | |
1010 | if (!fabric_max_sectors) { | |
1011 | pr_err("dev[%p]: Illegal ZERO value for" | |
1012 | " fabric_max_sectors\n", dev); | |
1013 | return -EINVAL; | |
1014 | } | |
1015 | if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | |
1016 | pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" | |
1017 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, | |
1018 | DA_STATUS_MAX_SECTORS_MIN); | |
1019 | return -EINVAL; | |
1020 | } | |
1021 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
0fd97ccf | 1022 | if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { |
015487b8 RD |
1023 | pr_err("dev[%p]: Passed fabric_max_sectors: %u" |
1024 | " greater than TCM/SE_Device max_sectors:" | |
1025 | " %u\n", dev, fabric_max_sectors, | |
0fd97ccf | 1026 | dev->dev_attrib.hw_max_sectors); |
015487b8 RD |
1027 | return -EINVAL; |
1028 | } | |
1029 | } else { | |
1030 | if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | |
1031 | pr_err("dev[%p]: Passed fabric_max_sectors: %u" | |
1032 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | |
1033 | " %u\n", dev, fabric_max_sectors, | |
1034 | DA_STATUS_MAX_SECTORS_MAX); | |
1035 | return -EINVAL; | |
1036 | } | |
1037 | } | |
1038 | /* | |
1039 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | |
1040 | */ | |
7a3cf6ca NB |
1041 | if (!block_size) { |
1042 | block_size = 512; | |
1043 | pr_warn("Defaulting to 512 for zero block_size\n"); | |
1044 | } | |
015487b8 | 1045 | fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, |
7a3cf6ca | 1046 | block_size); |
015487b8 | 1047 | |
0fd97ccf | 1048 | dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; |
015487b8 RD |
1049 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", |
1050 | dev, fabric_max_sectors); | |
1051 | return 0; | |
1052 | } | |
1053 | ||
c66ac9db NB |
1054 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) |
1055 | { | |
0fd97ccf | 1056 | if (dev->export_count) { |
6708bb27 | 1057 | pr_err("dev[%p]: Unable to change SE Device" |
0fd97ccf CH |
1058 | " optimal_sectors while export_count is %d\n", |
1059 | dev, dev->export_count); | |
c66ac9db NB |
1060 | return -EINVAL; |
1061 | } | |
e3d6f909 | 1062 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
6708bb27 | 1063 | pr_err("dev[%p]: Passed optimal_sectors cannot be" |
c66ac9db NB |
1064 | " changed for TCM/pSCSI\n", dev); |
1065 | return -EINVAL; | |
1066 | } | |
0fd97ccf | 1067 | if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { |
6708bb27 | 1068 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" |
015487b8 | 1069 | " greater than fabric_max_sectors: %u\n", dev, |
0fd97ccf | 1070 | optimal_sectors, dev->dev_attrib.fabric_max_sectors); |
c66ac9db NB |
1071 | return -EINVAL; |
1072 | } | |
1073 | ||
0fd97ccf | 1074 | dev->dev_attrib.optimal_sectors = optimal_sectors; |
6708bb27 | 1075 | pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", |
c66ac9db NB |
1076 | dev, optimal_sectors); |
1077 | return 0; | |
1078 | } | |
1079 | ||
1080 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |
1081 | { | |
0fd97ccf | 1082 | if (dev->export_count) { |
6708bb27 | 1083 | pr_err("dev[%p]: Unable to change SE Device block_size" |
0fd97ccf CH |
1084 | " while export_count is %d\n", |
1085 | dev, dev->export_count); | |
e3d6f909 | 1086 | return -EINVAL; |
c66ac9db NB |
1087 | } |
1088 | ||
1089 | if ((block_size != 512) && | |
1090 | (block_size != 1024) && | |
1091 | (block_size != 2048) && | |
1092 | (block_size != 4096)) { | |
6708bb27 | 1093 | pr_err("dev[%p]: Illegal value for block_device: %u" |
c66ac9db NB |
1094 | " for SE device, must be 512, 1024, 2048 or 4096\n", |
1095 | dev, block_size); | |
e3d6f909 | 1096 | return -EINVAL; |
c66ac9db NB |
1097 | } |
1098 | ||
e3d6f909 | 1099 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
6708bb27 | 1100 | pr_err("dev[%p]: Not allowed to change block_size for" |
c66ac9db NB |
1101 | " Physical Device, use for Linux/SCSI to change" |
1102 | " block_size for underlying hardware\n", dev); | |
e3d6f909 | 1103 | return -EINVAL; |
c66ac9db NB |
1104 | } |
1105 | ||
0fd97ccf | 1106 | dev->dev_attrib.block_size = block_size; |
6708bb27 | 1107 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", |
c66ac9db | 1108 | dev, block_size); |
95cadace NB |
1109 | |
1110 | if (dev->dev_attrib.max_bytes_per_io) | |
1111 | dev->dev_attrib.hw_max_sectors = | |
1112 | dev->dev_attrib.max_bytes_per_io / block_size; | |
1113 | ||
c66ac9db NB |
1114 | return 0; |
1115 | } | |
1116 | ||
1117 | struct se_lun *core_dev_add_lun( | |
1118 | struct se_portal_group *tpg, | |
c66ac9db NB |
1119 | struct se_device *dev, |
1120 | u32 lun) | |
1121 | { | |
1122 | struct se_lun *lun_p; | |
8d9efe53 | 1123 | int rc; |
c66ac9db | 1124 | |
c66ac9db | 1125 | lun_p = core_tpg_pre_addlun(tpg, lun); |
8d9efe53 SAS |
1126 | if (IS_ERR(lun_p)) |
1127 | return lun_p; | |
c66ac9db | 1128 | |
58d92618 NB |
1129 | rc = core_tpg_post_addlun(tpg, lun_p, |
1130 | TRANSPORT_LUNFLAGS_READ_WRITE, dev); | |
8d9efe53 SAS |
1131 | if (rc < 0) |
1132 | return ERR_PTR(rc); | |
c66ac9db | 1133 | |
6708bb27 | 1134 | pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" |
e3d6f909 AG |
1135 | " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1136 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, | |
2dca673b | 1137 | tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); |
c66ac9db NB |
1138 | /* |
1139 | * Update LUN maps for dynamically added initiators when | |
1140 | * generate_node_acl is enabled. | |
1141 | */ | |
e3d6f909 | 1142 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
c66ac9db | 1143 | struct se_node_acl *acl; |
28638887 | 1144 | spin_lock_irq(&tpg->acl_node_lock); |
c66ac9db | 1145 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
052605c6 NB |
1146 | if (acl->dynamic_node_acl && |
1147 | (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || | |
1148 | !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { | |
28638887 | 1149 | spin_unlock_irq(&tpg->acl_node_lock); |
c66ac9db | 1150 | core_tpg_add_node_to_devs(acl, tpg); |
28638887 | 1151 | spin_lock_irq(&tpg->acl_node_lock); |
c66ac9db NB |
1152 | } |
1153 | } | |
28638887 | 1154 | spin_unlock_irq(&tpg->acl_node_lock); |
c66ac9db NB |
1155 | } |
1156 | ||
1157 | return lun_p; | |
1158 | } | |
1159 | ||
1160 | /* core_dev_del_lun(): | |
1161 | * | |
1162 | * | |
1163 | */ | |
1164 | int core_dev_del_lun( | |
1165 | struct se_portal_group *tpg, | |
1166 | u32 unpacked_lun) | |
1167 | { | |
1168 | struct se_lun *lun; | |
c66ac9db | 1169 | |
8d9efe53 SAS |
1170 | lun = core_tpg_pre_dellun(tpg, unpacked_lun); |
1171 | if (IS_ERR(lun)) | |
1172 | return PTR_ERR(lun); | |
c66ac9db NB |
1173 | |
1174 | core_tpg_post_dellun(tpg, lun); | |
1175 | ||
6708bb27 | 1176 | pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" |
e3d6f909 AG |
1177 | " device object\n", tpg->se_tpg_tfo->get_fabric_name(), |
1178 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, | |
1179 | tpg->se_tpg_tfo->get_fabric_name()); | |
c66ac9db NB |
1180 | |
1181 | return 0; | |
1182 | } | |
1183 | ||
1184 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) | |
1185 | { | |
1186 | struct se_lun *lun; | |
1187 | ||
1188 | spin_lock(&tpg->tpg_lun_lock); | |
1189 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
6708bb27 | 1190 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" |
c66ac9db | 1191 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", |
e3d6f909 | 1192 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
c66ac9db | 1193 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
e3d6f909 | 1194 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
c66ac9db NB |
1195 | spin_unlock(&tpg->tpg_lun_lock); |
1196 | return NULL; | |
1197 | } | |
4a5a75f3 | 1198 | lun = tpg->tpg_lun_list[unpacked_lun]; |
c66ac9db NB |
1199 | |
1200 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | |
6708bb27 | 1201 | pr_err("%s Logical Unit Number: %u is not free on" |
c66ac9db | 1202 | " Target Portal Group: %hu, ignoring request.\n", |
e3d6f909 AG |
1203 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1204 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
c66ac9db NB |
1205 | spin_unlock(&tpg->tpg_lun_lock); |
1206 | return NULL; | |
1207 | } | |
1208 | spin_unlock(&tpg->tpg_lun_lock); | |
1209 | ||
1210 | return lun; | |
1211 | } | |
1212 | ||
1213 | /* core_dev_get_lun(): | |
1214 | * | |
1215 | * | |
1216 | */ | |
1217 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) | |
1218 | { | |
1219 | struct se_lun *lun; | |
1220 | ||
1221 | spin_lock(&tpg->tpg_lun_lock); | |
1222 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
6708bb27 | 1223 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" |
c66ac9db | 1224 | "_TPG-1: %u for Target Portal Group: %hu\n", |
e3d6f909 | 1225 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
c66ac9db | 1226 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
e3d6f909 | 1227 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
c66ac9db NB |
1228 | spin_unlock(&tpg->tpg_lun_lock); |
1229 | return NULL; | |
1230 | } | |
4a5a75f3 | 1231 | lun = tpg->tpg_lun_list[unpacked_lun]; |
c66ac9db NB |
1232 | |
1233 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | |
6708bb27 | 1234 | pr_err("%s Logical Unit Number: %u is not active on" |
c66ac9db | 1235 | " Target Portal Group: %hu, ignoring request.\n", |
e3d6f909 AG |
1236 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1237 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
c66ac9db NB |
1238 | spin_unlock(&tpg->tpg_lun_lock); |
1239 | return NULL; | |
1240 | } | |
1241 | spin_unlock(&tpg->tpg_lun_lock); | |
1242 | ||
1243 | return lun; | |
1244 | } | |
1245 | ||
1246 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |
1247 | struct se_portal_group *tpg, | |
fcf29481 | 1248 | struct se_node_acl *nacl, |
c66ac9db | 1249 | u32 mapped_lun, |
c66ac9db NB |
1250 | int *ret) |
1251 | { | |
1252 | struct se_lun_acl *lacl; | |
c66ac9db | 1253 | |
fcf29481 | 1254 | if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { |
6708bb27 | 1255 | pr_err("%s InitiatorName exceeds maximum size.\n", |
e3d6f909 | 1256 | tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
1257 | *ret = -EOVERFLOW; |
1258 | return NULL; | |
1259 | } | |
c66ac9db | 1260 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); |
6708bb27 AG |
1261 | if (!lacl) { |
1262 | pr_err("Unable to allocate memory for struct se_lun_acl.\n"); | |
c66ac9db NB |
1263 | *ret = -ENOMEM; |
1264 | return NULL; | |
1265 | } | |
1266 | ||
1267 | INIT_LIST_HEAD(&lacl->lacl_list); | |
1268 | lacl->mapped_lun = mapped_lun; | |
1269 | lacl->se_lun_nacl = nacl; | |
fcf29481 NB |
1270 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", |
1271 | nacl->initiatorname); | |
c66ac9db NB |
1272 | |
1273 | return lacl; | |
1274 | } | |
1275 | ||
1276 | int core_dev_add_initiator_node_lun_acl( | |
1277 | struct se_portal_group *tpg, | |
1278 | struct se_lun_acl *lacl, | |
1279 | u32 unpacked_lun, | |
1280 | u32 lun_access) | |
1281 | { | |
1282 | struct se_lun *lun; | |
1283 | struct se_node_acl *nacl; | |
1284 | ||
1285 | lun = core_dev_get_lun(tpg, unpacked_lun); | |
6708bb27 AG |
1286 | if (!lun) { |
1287 | pr_err("%s Logical Unit Number: %u is not active on" | |
c66ac9db | 1288 | " Target Portal Group: %hu, ignoring request.\n", |
e3d6f909 AG |
1289 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1290 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
c66ac9db NB |
1291 | return -EINVAL; |
1292 | } | |
1293 | ||
1294 | nacl = lacl->se_lun_nacl; | |
6708bb27 | 1295 | if (!nacl) |
c66ac9db NB |
1296 | return -EINVAL; |
1297 | ||
1298 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && | |
1299 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) | |
1300 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
1301 | ||
1302 | lacl->se_lun = lun; | |
1303 | ||
e80ac6c4 AG |
1304 | if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, |
1305 | lun_access, nacl, tpg) < 0) | |
c66ac9db NB |
1306 | return -EINVAL; |
1307 | ||
1308 | spin_lock(&lun->lun_acl_lock); | |
1309 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | |
1310 | atomic_inc(&lun->lun_acl_count); | |
1311 | smp_mb__after_atomic_inc(); | |
1312 | spin_unlock(&lun->lun_acl_lock); | |
1313 | ||
6708bb27 | 1314 | pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " |
e3d6f909 AG |
1315 | " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
1316 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | |
c66ac9db NB |
1317 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", |
1318 | lacl->initiatorname); | |
1319 | /* | |
1320 | * Check to see if there are any existing persistent reservation APTPL | |
1321 | * pre-registrations that need to be enabled for this LUN ACL.. | |
1322 | */ | |
1323 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); | |
1324 | return 0; | |
1325 | } | |
1326 | ||
1327 | /* core_dev_del_initiator_node_lun_acl(): | |
1328 | * | |
1329 | * | |
1330 | */ | |
1331 | int core_dev_del_initiator_node_lun_acl( | |
1332 | struct se_portal_group *tpg, | |
1333 | struct se_lun *lun, | |
1334 | struct se_lun_acl *lacl) | |
1335 | { | |
1336 | struct se_node_acl *nacl; | |
1337 | ||
1338 | nacl = lacl->se_lun_nacl; | |
6708bb27 | 1339 | if (!nacl) |
c66ac9db NB |
1340 | return -EINVAL; |
1341 | ||
1342 | spin_lock(&lun->lun_acl_lock); | |
1343 | list_del(&lacl->lacl_list); | |
1344 | atomic_dec(&lun->lun_acl_count); | |
1345 | smp_mb__after_atomic_dec(); | |
1346 | spin_unlock(&lun->lun_acl_lock); | |
1347 | ||
e80ac6c4 AG |
1348 | core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, |
1349 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); | |
c66ac9db NB |
1350 | |
1351 | lacl->se_lun = NULL; | |
1352 | ||
6708bb27 | 1353 | pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" |
c66ac9db | 1354 | " InitiatorNode: %s Mapped LUN: %u\n", |
e3d6f909 AG |
1355 | tpg->se_tpg_tfo->get_fabric_name(), |
1356 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, | |
c66ac9db NB |
1357 | lacl->initiatorname, lacl->mapped_lun); |
1358 | ||
1359 | return 0; | |
1360 | } | |
1361 | ||
1362 | void core_dev_free_initiator_node_lun_acl( | |
1363 | struct se_portal_group *tpg, | |
1364 | struct se_lun_acl *lacl) | |
1365 | { | |
6708bb27 | 1366 | pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" |
e3d6f909 AG |
1367 | " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1368 | tpg->se_tpg_tfo->tpg_get_tag(tpg), | |
1369 | tpg->se_tpg_tfo->get_fabric_name(), | |
c66ac9db NB |
1370 | lacl->initiatorname, lacl->mapped_lun); |
1371 | ||
1372 | kfree(lacl); | |
1373 | } | |
1374 | ||
0fd97ccf CH |
1375 | static void scsi_dump_inquiry(struct se_device *dev) |
1376 | { | |
1377 | struct t10_wwn *wwn = &dev->t10_wwn; | |
1378 | char buf[17]; | |
1379 | int i, device_type; | |
1380 | /* | |
1381 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | |
1382 | */ | |
1383 | for (i = 0; i < 8; i++) | |
1384 | if (wwn->vendor[i] >= 0x20) | |
1385 | buf[i] = wwn->vendor[i]; | |
1386 | else | |
1387 | buf[i] = ' '; | |
1388 | buf[i] = '\0'; | |
1389 | pr_debug(" Vendor: %s\n", buf); | |
1390 | ||
1391 | for (i = 0; i < 16; i++) | |
1392 | if (wwn->model[i] >= 0x20) | |
1393 | buf[i] = wwn->model[i]; | |
1394 | else | |
1395 | buf[i] = ' '; | |
1396 | buf[i] = '\0'; | |
1397 | pr_debug(" Model: %s\n", buf); | |
1398 | ||
1399 | for (i = 0; i < 4; i++) | |
1400 | if (wwn->revision[i] >= 0x20) | |
1401 | buf[i] = wwn->revision[i]; | |
1402 | else | |
1403 | buf[i] = ' '; | |
1404 | buf[i] = '\0'; | |
1405 | pr_debug(" Revision: %s\n", buf); | |
1406 | ||
1407 | device_type = dev->transport->get_device_type(dev); | |
1408 | pr_debug(" Type: %s ", scsi_device_type(device_type)); | |
0fd97ccf CH |
1409 | } |
1410 | ||
1411 | struct se_device *target_alloc_device(struct se_hba *hba, const char *name) | |
1412 | { | |
1413 | struct se_device *dev; | |
4863e525 | 1414 | struct se_lun *xcopy_lun; |
0fd97ccf CH |
1415 | |
1416 | dev = hba->transport->alloc_device(hba, name); | |
1417 | if (!dev) | |
1418 | return NULL; | |
1419 | ||
0ff87549 | 1420 | dev->dev_link_magic = SE_DEV_LINK_MAGIC; |
0fd97ccf CH |
1421 | dev->se_hba = hba; |
1422 | dev->transport = hba->transport; | |
1423 | ||
1424 | INIT_LIST_HEAD(&dev->dev_list); | |
1425 | INIT_LIST_HEAD(&dev->dev_sep_list); | |
1426 | INIT_LIST_HEAD(&dev->dev_tmr_list); | |
1427 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | |
1428 | INIT_LIST_HEAD(&dev->state_list); | |
1429 | INIT_LIST_HEAD(&dev->qf_cmd_list); | |
d9ea32bf | 1430 | INIT_LIST_HEAD(&dev->g_dev_node); |
0fd97ccf CH |
1431 | spin_lock_init(&dev->execute_task_lock); |
1432 | spin_lock_init(&dev->delayed_cmd_lock); | |
1433 | spin_lock_init(&dev->dev_reservation_lock); | |
1434 | spin_lock_init(&dev->se_port_lock); | |
1435 | spin_lock_init(&dev->se_tmr_lock); | |
1436 | spin_lock_init(&dev->qf_cmd_lock); | |
68ff9b9b | 1437 | sema_init(&dev->caw_sem, 1); |
0fd97ccf CH |
1438 | atomic_set(&dev->dev_ordered_id, 0); |
1439 | INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); | |
1440 | spin_lock_init(&dev->t10_wwn.t10_vpd_lock); | |
1441 | INIT_LIST_HEAD(&dev->t10_pr.registration_list); | |
1442 | INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); | |
1443 | spin_lock_init(&dev->t10_pr.registration_lock); | |
1444 | spin_lock_init(&dev->t10_pr.aptpl_reg_lock); | |
1445 | INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); | |
1446 | spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); | |
1447 | ||
0fd97ccf CH |
1448 | dev->t10_wwn.t10_dev = dev; |
1449 | dev->t10_alua.t10_dev = dev; | |
1450 | ||
1451 | dev->dev_attrib.da_dev = dev; | |
adfa9570 | 1452 | dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; |
0fd97ccf CH |
1453 | dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; |
1454 | dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; | |
1455 | dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; | |
1456 | dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; | |
1457 | dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | |
1458 | dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; | |
1459 | dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; | |
1460 | dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; | |
0123a9ec | 1461 | dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; |
d397a445 | 1462 | dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; |
0fd97ccf CH |
1463 | dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; |
1464 | dev->dev_attrib.is_nonrot = DA_IS_NONROT; | |
1465 | dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; | |
1466 | dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; | |
1467 | dev->dev_attrib.max_unmap_block_desc_count = | |
1468 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | |
1469 | dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | |
1470 | dev->dev_attrib.unmap_granularity_alignment = | |
1471 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | |
773cbaf7 | 1472 | dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; |
0fd97ccf CH |
1473 | dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; |
1474 | dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; | |
1475 | ||
4863e525 NB |
1476 | xcopy_lun = &dev->xcopy_lun; |
1477 | xcopy_lun->lun_se_dev = dev; | |
1478 | init_completion(&xcopy_lun->lun_shutdown_comp); | |
1479 | INIT_LIST_HEAD(&xcopy_lun->lun_acl_list); | |
1480 | spin_lock_init(&xcopy_lun->lun_acl_lock); | |
1481 | spin_lock_init(&xcopy_lun->lun_sep_lock); | |
1482 | init_completion(&xcopy_lun->lun_ref_comp); | |
1483 | ||
0fd97ccf CH |
1484 | return dev; |
1485 | } | |
1486 | ||
1487 | int target_configure_device(struct se_device *dev) | |
1488 | { | |
1489 | struct se_hba *hba = dev->se_hba; | |
1490 | int ret; | |
1491 | ||
1492 | if (dev->dev_flags & DF_CONFIGURED) { | |
1493 | pr_err("se_dev->se_dev_ptr already set for storage" | |
1494 | " object\n"); | |
1495 | return -EEXIST; | |
1496 | } | |
1497 | ||
1498 | ret = dev->transport->configure_device(dev); | |
1499 | if (ret) | |
1500 | goto out; | |
1501 | dev->dev_flags |= DF_CONFIGURED; | |
1502 | ||
1503 | /* | |
1504 | * XXX: there is not much point to have two different values here.. | |
1505 | */ | |
1506 | dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; | |
1507 | dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; | |
1508 | ||
1509 | /* | |
1510 | * Align max_hw_sectors down to PAGE_SIZE I/O transfers | |
1511 | */ | |
1512 | dev->dev_attrib.hw_max_sectors = | |
1513 | se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, | |
1514 | dev->dev_attrib.hw_block_size); | |
1515 | ||
1516 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | |
1517 | dev->creation_time = get_jiffies_64(); | |
1518 | ||
0fd97ccf CH |
1519 | ret = core_setup_alua(dev); |
1520 | if (ret) | |
1521 | goto out; | |
1522 | ||
1523 | /* | |
1524 | * Startup the struct se_device processing thread | |
1525 | */ | |
1526 | dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, | |
1527 | dev->transport->name); | |
1528 | if (!dev->tmr_wq) { | |
1529 | pr_err("Unable to create tmr workqueue for %s\n", | |
1530 | dev->transport->name); | |
1531 | ret = -ENOMEM; | |
1532 | goto out_free_alua; | |
1533 | } | |
1534 | ||
1535 | /* | |
1536 | * Setup work_queue for QUEUE_FULL | |
1537 | */ | |
1538 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); | |
1539 | ||
1540 | /* | |
1541 | * Preload the initial INQUIRY const values if we are doing | |
1542 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | |
1543 | * passthrough because this is being provided by the backend LLD. | |
1544 | */ | |
1545 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1546 | strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); | |
1547 | strncpy(&dev->t10_wwn.model[0], | |
1548 | dev->transport->inquiry_prod, 16); | |
1549 | strncpy(&dev->t10_wwn.revision[0], | |
1550 | dev->transport->inquiry_rev, 4); | |
1551 | } | |
1552 | ||
1553 | scsi_dump_inquiry(dev); | |
1554 | ||
1555 | spin_lock(&hba->device_lock); | |
1556 | hba->dev_count++; | |
1557 | spin_unlock(&hba->device_lock); | |
d9ea32bf NB |
1558 | |
1559 | mutex_lock(&g_device_mutex); | |
1560 | list_add_tail(&dev->g_dev_node, &g_device_list); | |
1561 | mutex_unlock(&g_device_mutex); | |
1562 | ||
0fd97ccf CH |
1563 | return 0; |
1564 | ||
1565 | out_free_alua: | |
1566 | core_alua_free_lu_gp_mem(dev); | |
1567 | out: | |
1568 | se_release_vpd_for_dev(dev); | |
1569 | return ret; | |
1570 | } | |
1571 | ||
1572 | void target_free_device(struct se_device *dev) | |
1573 | { | |
1574 | struct se_hba *hba = dev->se_hba; | |
1575 | ||
1576 | WARN_ON(!list_empty(&dev->dev_sep_list)); | |
1577 | ||
1578 | if (dev->dev_flags & DF_CONFIGURED) { | |
1579 | destroy_workqueue(dev->tmr_wq); | |
1580 | ||
d9ea32bf NB |
1581 | mutex_lock(&g_device_mutex); |
1582 | list_del(&dev->g_dev_node); | |
1583 | mutex_unlock(&g_device_mutex); | |
1584 | ||
0fd97ccf CH |
1585 | spin_lock(&hba->device_lock); |
1586 | hba->dev_count--; | |
1587 | spin_unlock(&hba->device_lock); | |
1588 | } | |
1589 | ||
1590 | core_alua_free_lu_gp_mem(dev); | |
1591 | core_scsi3_free_all_registrations(dev); | |
1592 | se_release_vpd_for_dev(dev); | |
1593 | ||
1594 | dev->transport->free_device(dev); | |
1595 | } | |
1596 | ||
c66ac9db NB |
1597 | int core_dev_setup_virtual_lun0(void) |
1598 | { | |
1599 | struct se_hba *hba; | |
1600 | struct se_device *dev; | |
db5d1c3c | 1601 | char buf[] = "rd_pages=8,rd_nullio=1"; |
c66ac9db NB |
1602 | int ret; |
1603 | ||
6708bb27 | 1604 | hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); |
c66ac9db NB |
1605 | if (IS_ERR(hba)) |
1606 | return PTR_ERR(hba); | |
1607 | ||
0fd97ccf CH |
1608 | dev = target_alloc_device(hba, "virt_lun0"); |
1609 | if (!dev) { | |
c66ac9db | 1610 | ret = -ENOMEM; |
0fd97ccf | 1611 | goto out_free_hba; |
c66ac9db | 1612 | } |
c66ac9db | 1613 | |
0fd97ccf | 1614 | hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); |
c66ac9db | 1615 | |
0fd97ccf CH |
1616 | ret = target_configure_device(dev); |
1617 | if (ret) | |
1618 | goto out_free_se_dev; | |
c66ac9db | 1619 | |
0fd97ccf CH |
1620 | lun0_hba = hba; |
1621 | g_lun0_dev = dev; | |
c66ac9db | 1622 | return 0; |
0fd97ccf CH |
1623 | |
1624 | out_free_se_dev: | |
1625 | target_free_device(dev); | |
1626 | out_free_hba: | |
1627 | core_delete_hba(hba); | |
c66ac9db NB |
1628 | return ret; |
1629 | } | |
1630 | ||
1631 | ||
1632 | void core_dev_release_virtual_lun0(void) | |
1633 | { | |
e3d6f909 | 1634 | struct se_hba *hba = lun0_hba; |
c66ac9db | 1635 | |
6708bb27 | 1636 | if (!hba) |
c66ac9db NB |
1637 | return; |
1638 | ||
e3d6f909 | 1639 | if (g_lun0_dev) |
0fd97ccf | 1640 | target_free_device(g_lun0_dev); |
c66ac9db NB |
1641 | core_delete_hba(hba); |
1642 | } |