]>
Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | |
3 | * | |
4 | * This file contains the iSCSI Virtual Device and Disk Transport | |
5 | * agnostic related functions. | |
6 | * | |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | |
8 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. | |
9 | * Copyright (c) 2007-2010 Rising Tide Systems | |
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
11 | * | |
12 | * Nicholas A. Bellinger <[email protected]> | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License as published by | |
16 | * the Free Software Foundation; either version 2 of the License, or | |
17 | * (at your option) any later version. | |
18 | * | |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with this program; if not, write to the Free Software | |
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
27 | * | |
28 | ******************************************************************************/ | |
29 | ||
30 | #include <linux/net.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/delay.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/spinlock.h> | |
c66ac9db NB |
36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | |
38 | #include <net/sock.h> | |
39 | #include <net/tcp.h> | |
40 | #include <scsi/scsi.h> | |
41 | ||
42 | #include <target/target_core_base.h> | |
43 | #include <target/target_core_device.h> | |
44 | #include <target/target_core_tpg.h> | |
45 | #include <target/target_core_transport.h> | |
46 | #include <target/target_core_fabric_ops.h> | |
47 | ||
48 | #include "target_core_alua.h" | |
49 | #include "target_core_hba.h" | |
50 | #include "target_core_pr.h" | |
51 | #include "target_core_ua.h" | |
52 | ||
53 | static void se_dev_start(struct se_device *dev); | |
54 | static void se_dev_stop(struct se_device *dev); | |
55 | ||
56 | int transport_get_lun_for_cmd( | |
57 | struct se_cmd *se_cmd, | |
58 | unsigned char *cdb, | |
59 | u32 unpacked_lun) | |
60 | { | |
61 | struct se_dev_entry *deve; | |
62 | struct se_lun *se_lun = NULL; | |
63 | struct se_session *se_sess = SE_SESS(se_cmd); | |
64 | unsigned long flags; | |
65 | int read_only = 0; | |
66 | ||
67 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
68 | deve = se_cmd->se_deve = | |
69 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | |
70 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
71 | if (se_cmd) { | |
72 | deve->total_cmds++; | |
73 | deve->total_bytes += se_cmd->data_length; | |
74 | ||
75 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | |
76 | if (deve->lun_flags & | |
77 | TRANSPORT_LUNFLAGS_READ_ONLY) { | |
78 | read_only = 1; | |
79 | goto out; | |
80 | } | |
81 | deve->write_bytes += se_cmd->data_length; | |
82 | } else if (se_cmd->data_direction == | |
83 | DMA_FROM_DEVICE) { | |
84 | deve->read_bytes += se_cmd->data_length; | |
85 | } | |
86 | } | |
87 | deve->deve_cmds++; | |
88 | ||
89 | se_lun = se_cmd->se_lun = deve->se_lun; | |
90 | se_cmd->pr_res_key = deve->pr_res_key; | |
91 | se_cmd->orig_fe_lun = unpacked_lun; | |
92 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | |
93 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | |
94 | } | |
95 | out: | |
96 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
97 | ||
98 | if (!se_lun) { | |
99 | if (read_only) { | |
100 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
101 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
102 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | |
103 | " Access for 0x%08x\n", | |
104 | CMD_TFO(se_cmd)->get_fabric_name(), | |
105 | unpacked_lun); | |
106 | return -1; | |
107 | } else { | |
108 | /* | |
109 | * Use the se_portal_group->tpg_virt_lun0 to allow for | |
110 | * REPORT_LUNS, et al to be returned when no active | |
111 | * MappedLUN=0 exists for this Initiator Port. | |
112 | */ | |
113 | if (unpacked_lun != 0) { | |
114 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | |
115 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
116 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | |
117 | " Access for 0x%08x\n", | |
118 | CMD_TFO(se_cmd)->get_fabric_name(), | |
119 | unpacked_lun); | |
120 | return -1; | |
121 | } | |
122 | /* | |
123 | * Force WRITE PROTECT for virtual LUN 0 | |
124 | */ | |
125 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | |
126 | (se_cmd->data_direction != DMA_NONE)) { | |
127 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
128 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
129 | return -1; | |
130 | } | |
131 | #if 0 | |
132 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", | |
133 | CMD_TFO(se_cmd)->get_fabric_name()); | |
134 | #endif | |
135 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | |
136 | se_cmd->orig_fe_lun = 0; | |
137 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | |
138 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | |
139 | } | |
140 | } | |
141 | /* | |
142 | * Determine if the struct se_lun is online. | |
143 | */ | |
144 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | |
145 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | |
146 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | |
147 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
148 | return -1; | |
149 | } | |
150 | ||
151 | { | |
152 | struct se_device *dev = se_lun->lun_se_dev; | |
153 | spin_lock(&dev->stats_lock); | |
154 | dev->num_cmds++; | |
155 | if (se_cmd->data_direction == DMA_TO_DEVICE) | |
156 | dev->write_bytes += se_cmd->data_length; | |
157 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | |
158 | dev->read_bytes += se_cmd->data_length; | |
159 | spin_unlock(&dev->stats_lock); | |
160 | } | |
161 | ||
162 | /* | |
163 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used | |
164 | * for tracking state of struct se_cmds during LUN shutdown events. | |
165 | */ | |
166 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | |
167 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); | |
168 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); | |
169 | #if 0 | |
170 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", | |
171 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); | |
172 | #endif | |
173 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); | |
174 | ||
175 | return 0; | |
176 | } | |
177 | EXPORT_SYMBOL(transport_get_lun_for_cmd); | |
178 | ||
179 | int transport_get_lun_for_tmr( | |
180 | struct se_cmd *se_cmd, | |
181 | u32 unpacked_lun) | |
182 | { | |
183 | struct se_device *dev = NULL; | |
184 | struct se_dev_entry *deve; | |
185 | struct se_lun *se_lun = NULL; | |
186 | struct se_session *se_sess = SE_SESS(se_cmd); | |
187 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | |
188 | ||
189 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
190 | deve = se_cmd->se_deve = | |
191 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | |
192 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
193 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; | |
194 | dev = se_tmr->tmr_dev = se_lun->lun_se_dev; | |
195 | se_cmd->pr_res_key = deve->pr_res_key; | |
196 | se_cmd->orig_fe_lun = unpacked_lun; | |
197 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | |
198 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ | |
199 | } | |
200 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
201 | ||
202 | if (!se_lun) { | |
203 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | |
204 | " Access for 0x%08x\n", | |
205 | CMD_TFO(se_cmd)->get_fabric_name(), | |
206 | unpacked_lun); | |
207 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
208 | return -1; | |
209 | } | |
210 | /* | |
211 | * Determine if the struct se_lun is online. | |
212 | */ | |
213 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | |
214 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | |
215 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
216 | return -1; | |
217 | } | |
218 | ||
219 | spin_lock(&dev->se_tmr_lock); | |
220 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); | |
221 | spin_unlock(&dev->se_tmr_lock); | |
222 | ||
223 | return 0; | |
224 | } | |
225 | EXPORT_SYMBOL(transport_get_lun_for_tmr); | |
226 | ||
227 | /* | |
228 | * This function is called from core_scsi3_emulate_pro_register_and_move() | |
229 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count | |
230 | * when a matching rtpi is found. | |
231 | */ | |
232 | struct se_dev_entry *core_get_se_deve_from_rtpi( | |
233 | struct se_node_acl *nacl, | |
234 | u16 rtpi) | |
235 | { | |
236 | struct se_dev_entry *deve; | |
237 | struct se_lun *lun; | |
238 | struct se_port *port; | |
239 | struct se_portal_group *tpg = nacl->se_tpg; | |
240 | u32 i; | |
241 | ||
242 | spin_lock_irq(&nacl->device_list_lock); | |
243 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
244 | deve = &nacl->device_list[i]; | |
245 | ||
246 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
247 | continue; | |
248 | ||
249 | lun = deve->se_lun; | |
250 | if (!(lun)) { | |
251 | printk(KERN_ERR "%s device entries device pointer is" | |
252 | " NULL, but Initiator has access.\n", | |
253 | TPG_TFO(tpg)->get_fabric_name()); | |
254 | continue; | |
255 | } | |
256 | port = lun->lun_sep; | |
257 | if (!(port)) { | |
258 | printk(KERN_ERR "%s device entries device pointer is" | |
259 | " NULL, but Initiator has access.\n", | |
260 | TPG_TFO(tpg)->get_fabric_name()); | |
261 | continue; | |
262 | } | |
263 | if (port->sep_rtpi != rtpi) | |
264 | continue; | |
265 | ||
266 | atomic_inc(&deve->pr_ref_count); | |
267 | smp_mb__after_atomic_inc(); | |
268 | spin_unlock_irq(&nacl->device_list_lock); | |
269 | ||
270 | return deve; | |
271 | } | |
272 | spin_unlock_irq(&nacl->device_list_lock); | |
273 | ||
274 | return NULL; | |
275 | } | |
276 | ||
277 | int core_free_device_list_for_node( | |
278 | struct se_node_acl *nacl, | |
279 | struct se_portal_group *tpg) | |
280 | { | |
281 | struct se_dev_entry *deve; | |
282 | struct se_lun *lun; | |
283 | u32 i; | |
284 | ||
285 | if (!nacl->device_list) | |
286 | return 0; | |
287 | ||
288 | spin_lock_irq(&nacl->device_list_lock); | |
289 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
290 | deve = &nacl->device_list[i]; | |
291 | ||
292 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
293 | continue; | |
294 | ||
295 | if (!deve->se_lun) { | |
296 | printk(KERN_ERR "%s device entries device pointer is" | |
297 | " NULL, but Initiator has access.\n", | |
298 | TPG_TFO(tpg)->get_fabric_name()); | |
299 | continue; | |
300 | } | |
301 | lun = deve->se_lun; | |
302 | ||
303 | spin_unlock_irq(&nacl->device_list_lock); | |
304 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | |
305 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | |
306 | spin_lock_irq(&nacl->device_list_lock); | |
307 | } | |
308 | spin_unlock_irq(&nacl->device_list_lock); | |
309 | ||
310 | kfree(nacl->device_list); | |
311 | nacl->device_list = NULL; | |
312 | ||
313 | return 0; | |
314 | } | |
315 | ||
316 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | |
317 | { | |
318 | struct se_dev_entry *deve; | |
319 | ||
320 | spin_lock_irq(&se_nacl->device_list_lock); | |
321 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | |
322 | deve->deve_cmds--; | |
323 | spin_unlock_irq(&se_nacl->device_list_lock); | |
324 | ||
325 | return; | |
326 | } | |
327 | ||
328 | void core_update_device_list_access( | |
329 | u32 mapped_lun, | |
330 | u32 lun_access, | |
331 | struct se_node_acl *nacl) | |
332 | { | |
333 | struct se_dev_entry *deve; | |
334 | ||
335 | spin_lock_irq(&nacl->device_list_lock); | |
336 | deve = &nacl->device_list[mapped_lun]; | |
337 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | |
338 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
339 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
340 | } else { | |
341 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
342 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
343 | } | |
344 | spin_unlock_irq(&nacl->device_list_lock); | |
345 | ||
346 | return; | |
347 | } | |
348 | ||
349 | /* core_update_device_list_for_node(): | |
350 | * | |
351 | * | |
352 | */ | |
353 | int core_update_device_list_for_node( | |
354 | struct se_lun *lun, | |
355 | struct se_lun_acl *lun_acl, | |
356 | u32 mapped_lun, | |
357 | u32 lun_access, | |
358 | struct se_node_acl *nacl, | |
359 | struct se_portal_group *tpg, | |
360 | int enable) | |
361 | { | |
362 | struct se_port *port = lun->lun_sep; | |
363 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; | |
364 | int trans = 0; | |
365 | /* | |
366 | * If the MappedLUN entry is being disabled, the entry in | |
367 | * port->sep_alua_list must be removed now before clearing the | |
368 | * struct se_dev_entry pointers below as logic in | |
369 | * core_alua_do_transition_tg_pt() depends on these being present. | |
370 | */ | |
371 | if (!(enable)) { | |
372 | /* | |
373 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | |
374 | * that have not been explictly concerted to MappedLUNs -> | |
29fe609d NB |
375 | * struct se_lun_acl, but we remove deve->alua_port_list from |
376 | * port->sep_alua_list. This also means that active UAs and | |
377 | * NodeACL context specific PR metadata for demo-mode | |
378 | * MappedLUN *deve will be released below.. | |
c66ac9db | 379 | */ |
c66ac9db NB |
380 | spin_lock_bh(&port->sep_alua_lock); |
381 | list_del(&deve->alua_port_list); | |
382 | spin_unlock_bh(&port->sep_alua_lock); | |
383 | } | |
384 | ||
385 | spin_lock_irq(&nacl->device_list_lock); | |
386 | if (enable) { | |
387 | /* | |
388 | * Check if the call is handling demo mode -> explict LUN ACL | |
389 | * transition. This transition must be for the same struct se_lun | |
390 | * + mapped_lun that was setup in demo mode.. | |
391 | */ | |
392 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
393 | if (deve->se_lun_acl != NULL) { | |
394 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | |
395 | " already set for demo mode -> explict" | |
396 | " LUN ACL transition\n"); | |
85dc98d9 | 397 | spin_unlock_irq(&nacl->device_list_lock); |
c66ac9db NB |
398 | return -1; |
399 | } | |
400 | if (deve->se_lun != lun) { | |
401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | |
402 | " match passed struct se_lun for demo mode" | |
403 | " -> explict LUN ACL transition\n"); | |
85dc98d9 | 404 | spin_unlock_irq(&nacl->device_list_lock); |
c66ac9db NB |
405 | return -1; |
406 | } | |
407 | deve->se_lun_acl = lun_acl; | |
408 | trans = 1; | |
409 | } else { | |
410 | deve->se_lun = lun; | |
411 | deve->se_lun_acl = lun_acl; | |
412 | deve->mapped_lun = mapped_lun; | |
413 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | |
414 | } | |
415 | ||
416 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | |
417 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
418 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
419 | } else { | |
420 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
421 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
422 | } | |
423 | ||
424 | if (trans) { | |
425 | spin_unlock_irq(&nacl->device_list_lock); | |
426 | return 0; | |
427 | } | |
428 | deve->creation_time = get_jiffies_64(); | |
429 | deve->attach_count++; | |
430 | spin_unlock_irq(&nacl->device_list_lock); | |
431 | ||
432 | spin_lock_bh(&port->sep_alua_lock); | |
433 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | |
434 | spin_unlock_bh(&port->sep_alua_lock); | |
435 | ||
436 | return 0; | |
437 | } | |
438 | /* | |
439 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | |
440 | * PR operation to complete. | |
441 | */ | |
442 | spin_unlock_irq(&nacl->device_list_lock); | |
443 | while (atomic_read(&deve->pr_ref_count) != 0) | |
444 | cpu_relax(); | |
445 | spin_lock_irq(&nacl->device_list_lock); | |
446 | /* | |
447 | * Disable struct se_dev_entry LUN ACL mapping | |
448 | */ | |
449 | core_scsi3_ua_release_all(deve); | |
450 | deve->se_lun = NULL; | |
451 | deve->se_lun_acl = NULL; | |
452 | deve->lun_flags = 0; | |
453 | deve->creation_time = 0; | |
454 | deve->attach_count--; | |
455 | spin_unlock_irq(&nacl->device_list_lock); | |
456 | ||
457 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); | |
458 | return 0; | |
459 | } | |
460 | ||
461 | /* core_clear_lun_from_tpg(): | |
462 | * | |
463 | * | |
464 | */ | |
465 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |
466 | { | |
467 | struct se_node_acl *nacl; | |
468 | struct se_dev_entry *deve; | |
469 | u32 i; | |
470 | ||
471 | spin_lock_bh(&tpg->acl_node_lock); | |
472 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { | |
473 | spin_unlock_bh(&tpg->acl_node_lock); | |
474 | ||
475 | spin_lock_irq(&nacl->device_list_lock); | |
476 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
477 | deve = &nacl->device_list[i]; | |
478 | if (lun != deve->se_lun) | |
479 | continue; | |
480 | spin_unlock_irq(&nacl->device_list_lock); | |
481 | ||
482 | core_update_device_list_for_node(lun, NULL, | |
483 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, | |
484 | nacl, tpg, 0); | |
485 | ||
486 | spin_lock_irq(&nacl->device_list_lock); | |
487 | } | |
488 | spin_unlock_irq(&nacl->device_list_lock); | |
489 | ||
490 | spin_lock_bh(&tpg->acl_node_lock); | |
491 | } | |
492 | spin_unlock_bh(&tpg->acl_node_lock); | |
493 | ||
494 | return; | |
495 | } | |
496 | ||
497 | static struct se_port *core_alloc_port(struct se_device *dev) | |
498 | { | |
499 | struct se_port *port, *port_tmp; | |
500 | ||
501 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | |
502 | if (!(port)) { | |
503 | printk(KERN_ERR "Unable to allocate struct se_port\n"); | |
504 | return NULL; | |
505 | } | |
506 | INIT_LIST_HEAD(&port->sep_alua_list); | |
507 | INIT_LIST_HEAD(&port->sep_list); | |
508 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | |
509 | spin_lock_init(&port->sep_alua_lock); | |
510 | mutex_init(&port->sep_tg_pt_md_mutex); | |
511 | ||
512 | spin_lock(&dev->se_port_lock); | |
513 | if (dev->dev_port_count == 0x0000ffff) { | |
514 | printk(KERN_WARNING "Reached dev->dev_port_count ==" | |
515 | " 0x0000ffff\n"); | |
516 | spin_unlock(&dev->se_port_lock); | |
517 | return NULL; | |
518 | } | |
519 | again: | |
520 | /* | |
521 | * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device | |
522 | * Here is the table from spc4r17 section 7.7.3.8. | |
523 | * | |
524 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field | |
525 | * | |
526 | * Code Description | |
527 | * 0h Reserved | |
528 | * 1h Relative port 1, historically known as port A | |
529 | * 2h Relative port 2, historically known as port B | |
530 | * 3h to FFFFh Relative port 3 through 65 535 | |
531 | */ | |
532 | port->sep_rtpi = dev->dev_rpti_counter++; | |
533 | if (!(port->sep_rtpi)) | |
534 | goto again; | |
535 | ||
536 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | |
537 | /* | |
538 | * Make sure RELATIVE TARGET PORT IDENTIFER is unique | |
539 | * for 16-bit wrap.. | |
540 | */ | |
541 | if (port->sep_rtpi == port_tmp->sep_rtpi) | |
542 | goto again; | |
543 | } | |
544 | spin_unlock(&dev->se_port_lock); | |
545 | ||
546 | return port; | |
547 | } | |
548 | ||
549 | static void core_export_port( | |
550 | struct se_device *dev, | |
551 | struct se_portal_group *tpg, | |
552 | struct se_port *port, | |
553 | struct se_lun *lun) | |
554 | { | |
555 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | |
556 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; | |
557 | ||
558 | spin_lock(&dev->se_port_lock); | |
559 | spin_lock(&lun->lun_sep_lock); | |
560 | port->sep_tpg = tpg; | |
561 | port->sep_lun = lun; | |
562 | lun->lun_sep = port; | |
563 | spin_unlock(&lun->lun_sep_lock); | |
564 | ||
565 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | |
566 | spin_unlock(&dev->se_port_lock); | |
567 | ||
568 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { | |
569 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | |
570 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | |
571 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" | |
572 | "_gp_member_t\n"); | |
573 | return; | |
574 | } | |
575 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | |
576 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | |
577 | T10_ALUA(su_dev)->default_tg_pt_gp); | |
578 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | |
579 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" | |
580 | " Group: alua/default_tg_pt_gp\n", | |
581 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); | |
582 | } | |
583 | ||
584 | dev->dev_port_count++; | |
585 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ | |
586 | } | |
587 | ||
588 | /* | |
589 | * Called with struct se_device->se_port_lock spinlock held. | |
590 | */ | |
591 | static void core_release_port(struct se_device *dev, struct se_port *port) | |
592 | { | |
593 | /* | |
594 | * Wait for any port reference for PR ALL_TG_PT=1 operation | |
595 | * to complete in __core_scsi3_alloc_registration() | |
596 | */ | |
597 | spin_unlock(&dev->se_port_lock); | |
598 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) | |
599 | cpu_relax(); | |
600 | spin_lock(&dev->se_port_lock); | |
601 | ||
602 | core_alua_free_tg_pt_gp_mem(port); | |
603 | ||
604 | list_del(&port->sep_list); | |
605 | dev->dev_port_count--; | |
606 | kfree(port); | |
607 | ||
608 | return; | |
609 | } | |
610 | ||
611 | int core_dev_export( | |
612 | struct se_device *dev, | |
613 | struct se_portal_group *tpg, | |
614 | struct se_lun *lun) | |
615 | { | |
616 | struct se_port *port; | |
617 | ||
618 | port = core_alloc_port(dev); | |
619 | if (!(port)) | |
620 | return -1; | |
621 | ||
622 | lun->lun_se_dev = dev; | |
623 | se_dev_start(dev); | |
624 | ||
625 | atomic_inc(&dev->dev_export_obj.obj_access_count); | |
626 | core_export_port(dev, tpg, port, lun); | |
627 | return 0; | |
628 | } | |
629 | ||
630 | void core_dev_unexport( | |
631 | struct se_device *dev, | |
632 | struct se_portal_group *tpg, | |
633 | struct se_lun *lun) | |
634 | { | |
635 | struct se_port *port = lun->lun_sep; | |
636 | ||
637 | spin_lock(&lun->lun_sep_lock); | |
638 | if (lun->lun_se_dev == NULL) { | |
639 | spin_unlock(&lun->lun_sep_lock); | |
640 | return; | |
641 | } | |
642 | spin_unlock(&lun->lun_sep_lock); | |
643 | ||
644 | spin_lock(&dev->se_port_lock); | |
645 | atomic_dec(&dev->dev_export_obj.obj_access_count); | |
646 | core_release_port(dev, port); | |
647 | spin_unlock(&dev->se_port_lock); | |
648 | ||
649 | se_dev_stop(dev); | |
650 | lun->lun_se_dev = NULL; | |
651 | } | |
652 | ||
653 | int transport_core_report_lun_response(struct se_cmd *se_cmd) | |
654 | { | |
655 | struct se_dev_entry *deve; | |
656 | struct se_lun *se_lun; | |
657 | struct se_session *se_sess = SE_SESS(se_cmd); | |
658 | struct se_task *se_task; | |
659 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; | |
660 | u32 cdb_offset = 0, lun_count = 0, offset = 8; | |
661 | u64 i, lun; | |
662 | ||
663 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) | |
664 | break; | |
665 | ||
666 | if (!(se_task)) { | |
667 | printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); | |
668 | return PYX_TRANSPORT_LU_COMM_FAILURE; | |
669 | } | |
670 | ||
671 | /* | |
672 | * If no struct se_session pointer is present, this struct se_cmd is | |
673 | * coming via a target_core_mod PASSTHROUGH op, and not through | |
674 | * a $FABRIC_MOD. In that case, report LUN=0 only. | |
675 | */ | |
676 | if (!(se_sess)) { | |
677 | lun = 0; | |
678 | buf[offset++] = ((lun >> 56) & 0xff); | |
679 | buf[offset++] = ((lun >> 48) & 0xff); | |
680 | buf[offset++] = ((lun >> 40) & 0xff); | |
681 | buf[offset++] = ((lun >> 32) & 0xff); | |
682 | buf[offset++] = ((lun >> 24) & 0xff); | |
683 | buf[offset++] = ((lun >> 16) & 0xff); | |
684 | buf[offset++] = ((lun >> 8) & 0xff); | |
685 | buf[offset++] = (lun & 0xff); | |
686 | lun_count = 1; | |
687 | goto done; | |
688 | } | |
689 | ||
690 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
691 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
692 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; | |
693 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
694 | continue; | |
695 | se_lun = deve->se_lun; | |
696 | /* | |
697 | * We determine the correct LUN LIST LENGTH even once we | |
698 | * have reached the initial allocation length. | |
699 | * See SPC2-R20 7.19. | |
700 | */ | |
701 | lun_count++; | |
702 | if ((cdb_offset + 8) >= se_cmd->data_length) | |
703 | continue; | |
704 | ||
705 | lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); | |
706 | buf[offset++] = ((lun >> 56) & 0xff); | |
707 | buf[offset++] = ((lun >> 48) & 0xff); | |
708 | buf[offset++] = ((lun >> 40) & 0xff); | |
709 | buf[offset++] = ((lun >> 32) & 0xff); | |
710 | buf[offset++] = ((lun >> 24) & 0xff); | |
711 | buf[offset++] = ((lun >> 16) & 0xff); | |
712 | buf[offset++] = ((lun >> 8) & 0xff); | |
713 | buf[offset++] = (lun & 0xff); | |
714 | cdb_offset += 8; | |
715 | } | |
716 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
717 | ||
718 | /* | |
719 | * See SPC3 r07, page 159. | |
720 | */ | |
721 | done: | |
722 | lun_count *= 8; | |
723 | buf[0] = ((lun_count >> 24) & 0xff); | |
724 | buf[1] = ((lun_count >> 16) & 0xff); | |
725 | buf[2] = ((lun_count >> 8) & 0xff); | |
726 | buf[3] = (lun_count & 0xff); | |
727 | ||
728 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | |
729 | } | |
730 | ||
731 | /* se_release_device_for_hba(): | |
732 | * | |
733 | * | |
734 | */ | |
735 | void se_release_device_for_hba(struct se_device *dev) | |
736 | { | |
737 | struct se_hba *hba = dev->se_hba; | |
738 | ||
739 | if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | |
740 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || | |
741 | (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || | |
742 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || | |
743 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) | |
744 | se_dev_stop(dev); | |
745 | ||
746 | if (dev->dev_ptr) { | |
747 | kthread_stop(dev->process_thread); | |
748 | if (dev->transport->free_device) | |
749 | dev->transport->free_device(dev->dev_ptr); | |
750 | } | |
751 | ||
752 | spin_lock(&hba->device_lock); | |
753 | list_del(&dev->dev_list); | |
754 | hba->dev_count--; | |
755 | spin_unlock(&hba->device_lock); | |
756 | ||
757 | core_scsi3_free_all_registrations(dev); | |
758 | se_release_vpd_for_dev(dev); | |
759 | ||
760 | kfree(dev->dev_status_queue_obj); | |
761 | kfree(dev->dev_queue_obj); | |
762 | kfree(dev); | |
763 | ||
764 | return; | |
765 | } | |
766 | ||
767 | void se_release_vpd_for_dev(struct se_device *dev) | |
768 | { | |
769 | struct t10_vpd *vpd, *vpd_tmp; | |
770 | ||
771 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); | |
772 | list_for_each_entry_safe(vpd, vpd_tmp, | |
773 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { | |
774 | list_del(&vpd->vpd_list); | |
775 | kfree(vpd); | |
776 | } | |
777 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); | |
778 | ||
779 | return; | |
780 | } | |
781 | ||
782 | /* | |
783 | * Called with struct se_hba->device_lock held. | |
784 | */ | |
785 | void se_clear_dev_ports(struct se_device *dev) | |
786 | { | |
787 | struct se_hba *hba = dev->se_hba; | |
788 | struct se_lun *lun; | |
789 | struct se_portal_group *tpg; | |
790 | struct se_port *sep, *sep_tmp; | |
791 | ||
792 | spin_lock(&dev->se_port_lock); | |
793 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | |
794 | spin_unlock(&dev->se_port_lock); | |
795 | spin_unlock(&hba->device_lock); | |
796 | ||
797 | lun = sep->sep_lun; | |
798 | tpg = sep->sep_tpg; | |
799 | spin_lock(&lun->lun_sep_lock); | |
800 | if (lun->lun_se_dev == NULL) { | |
801 | spin_unlock(&lun->lun_sep_lock); | |
802 | continue; | |
803 | } | |
804 | spin_unlock(&lun->lun_sep_lock); | |
805 | ||
806 | core_dev_del_lun(tpg, lun->unpacked_lun); | |
807 | ||
808 | spin_lock(&hba->device_lock); | |
809 | spin_lock(&dev->se_port_lock); | |
810 | } | |
811 | spin_unlock(&dev->se_port_lock); | |
812 | ||
813 | return; | |
814 | } | |
815 | ||
816 | /* se_free_virtual_device(): | |
817 | * | |
818 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. | |
819 | */ | |
820 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) | |
821 | { | |
822 | spin_lock(&hba->device_lock); | |
823 | se_clear_dev_ports(dev); | |
824 | spin_unlock(&hba->device_lock); | |
825 | ||
826 | core_alua_free_lu_gp_mem(dev); | |
827 | se_release_device_for_hba(dev); | |
828 | ||
829 | return 0; | |
830 | } | |
831 | ||
832 | static void se_dev_start(struct se_device *dev) | |
833 | { | |
834 | struct se_hba *hba = dev->se_hba; | |
835 | ||
836 | spin_lock(&hba->device_lock); | |
837 | atomic_inc(&dev->dev_obj.obj_access_count); | |
838 | if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { | |
839 | if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { | |
840 | dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; | |
841 | dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; | |
842 | } else if (dev->dev_status & | |
843 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { | |
844 | dev->dev_status &= | |
845 | ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | |
846 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | |
847 | } | |
848 | } | |
849 | spin_unlock(&hba->device_lock); | |
850 | } | |
851 | ||
852 | static void se_dev_stop(struct se_device *dev) | |
853 | { | |
854 | struct se_hba *hba = dev->se_hba; | |
855 | ||
856 | spin_lock(&hba->device_lock); | |
857 | atomic_dec(&dev->dev_obj.obj_access_count); | |
858 | if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { | |
859 | if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { | |
860 | dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; | |
861 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
862 | } else if (dev->dev_status & | |
863 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { | |
864 | dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | |
865 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | |
866 | } | |
867 | } | |
868 | spin_unlock(&hba->device_lock); | |
c66ac9db NB |
869 | } |
870 | ||
871 | int se_dev_check_online(struct se_device *dev) | |
872 | { | |
873 | int ret; | |
874 | ||
875 | spin_lock_irq(&dev->dev_status_lock); | |
876 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | |
877 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; | |
878 | spin_unlock_irq(&dev->dev_status_lock); | |
879 | ||
880 | return ret; | |
881 | } | |
882 | ||
883 | int se_dev_check_shutdown(struct se_device *dev) | |
884 | { | |
885 | int ret; | |
886 | ||
887 | spin_lock_irq(&dev->dev_status_lock); | |
888 | ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); | |
889 | spin_unlock_irq(&dev->dev_status_lock); | |
890 | ||
891 | return ret; | |
892 | } | |
893 | ||
894 | void se_dev_set_default_attribs( | |
895 | struct se_device *dev, | |
896 | struct se_dev_limits *dev_limits) | |
897 | { | |
898 | struct queue_limits *limits = &dev_limits->limits; | |
899 | ||
900 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; | |
901 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; | |
902 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; | |
903 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; | |
904 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | |
905 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; | |
906 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; | |
907 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; | |
908 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; | |
909 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; | |
910 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; | |
911 | /* | |
912 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK | |
913 | * iblock_create_virtdevice() from struct queue_limits values | |
914 | * if blk_queue_discard()==1 | |
915 | */ | |
916 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; | |
917 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = | |
918 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | |
919 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | |
920 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | |
921 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | |
922 | /* | |
923 | * block_size is based on subsystem plugin dependent requirements. | |
924 | */ | |
925 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; | |
926 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; | |
927 | /* | |
928 | * max_sectors is based on subsystem plugin dependent requirements. | |
929 | */ | |
930 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; | |
931 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; | |
932 | /* | |
933 | * Set optimal_sectors from max_sectors, which can be lowered via | |
934 | * configfs. | |
935 | */ | |
936 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; | |
937 | /* | |
938 | * queue_depth is based on subsystem plugin dependent requirements. | |
939 | */ | |
940 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; | |
941 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; | |
942 | } | |
943 | ||
944 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | |
945 | { | |
946 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { | |
947 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" | |
948 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); | |
949 | return -1; | |
950 | } else { | |
951 | DEV_ATTRIB(dev)->task_timeout = task_timeout; | |
952 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", | |
953 | dev, task_timeout); | |
954 | } | |
955 | ||
956 | return 0; | |
957 | } | |
958 | ||
959 | int se_dev_set_max_unmap_lba_count( | |
960 | struct se_device *dev, | |
961 | u32 max_unmap_lba_count) | |
962 | { | |
963 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; | |
964 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", | |
965 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); | |
966 | return 0; | |
967 | } | |
968 | ||
969 | int se_dev_set_max_unmap_block_desc_count( | |
970 | struct se_device *dev, | |
971 | u32 max_unmap_block_desc_count) | |
972 | { | |
973 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; | |
974 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", | |
975 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); | |
976 | return 0; | |
977 | } | |
978 | ||
979 | int se_dev_set_unmap_granularity( | |
980 | struct se_device *dev, | |
981 | u32 unmap_granularity) | |
982 | { | |
983 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; | |
984 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", | |
985 | dev, DEV_ATTRIB(dev)->unmap_granularity); | |
986 | return 0; | |
987 | } | |
988 | ||
989 | int se_dev_set_unmap_granularity_alignment( | |
990 | struct se_device *dev, | |
991 | u32 unmap_granularity_alignment) | |
992 | { | |
993 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; | |
994 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", | |
995 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); | |
996 | return 0; | |
997 | } | |
998 | ||
999 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | |
1000 | { | |
1001 | if ((flag != 0) && (flag != 1)) { | |
1002 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1003 | return -1; | |
1004 | } | |
1005 | if (TRANSPORT(dev)->dpo_emulated == NULL) { | |
1006 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); | |
1007 | return -1; | |
1008 | } | |
1009 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { | |
1010 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); | |
1011 | return -1; | |
1012 | } | |
1013 | DEV_ATTRIB(dev)->emulate_dpo = flag; | |
1014 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" | |
1015 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); | |
1016 | return 0; | |
1017 | } | |
1018 | ||
1019 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |
1020 | { | |
1021 | if ((flag != 0) && (flag != 1)) { | |
1022 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1023 | return -1; | |
1024 | } | |
1025 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { | |
1026 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); | |
1027 | return -1; | |
1028 | } | |
1029 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { | |
1030 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); | |
1031 | return -1; | |
1032 | } | |
1033 | DEV_ATTRIB(dev)->emulate_fua_write = flag; | |
1034 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | |
1035 | dev, DEV_ATTRIB(dev)->emulate_fua_write); | |
1036 | return 0; | |
1037 | } | |
1038 | ||
1039 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | |
1040 | { | |
1041 | if ((flag != 0) && (flag != 1)) { | |
1042 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1043 | return -1; | |
1044 | } | |
1045 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { | |
1046 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); | |
1047 | return -1; | |
1048 | } | |
1049 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { | |
1050 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); | |
1051 | return -1; | |
1052 | } | |
1053 | DEV_ATTRIB(dev)->emulate_fua_read = flag; | |
1054 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", | |
1055 | dev, DEV_ATTRIB(dev)->emulate_fua_read); | |
1056 | return 0; | |
1057 | } | |
1058 | ||
1059 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |
1060 | { | |
1061 | if ((flag != 0) && (flag != 1)) { | |
1062 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1063 | return -1; | |
1064 | } | |
1065 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { | |
1066 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); | |
1067 | return -1; | |
1068 | } | |
1069 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { | |
1070 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); | |
1071 | return -1; | |
1072 | } | |
1073 | DEV_ATTRIB(dev)->emulate_write_cache = flag; | |
1074 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | |
1075 | dev, DEV_ATTRIB(dev)->emulate_write_cache); | |
1076 | return 0; | |
1077 | } | |
1078 | ||
1079 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | |
1080 | { | |
1081 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | |
1082 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1083 | return -1; | |
1084 | } | |
1085 | ||
1086 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1087 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | |
1088 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" | |
1089 | " exists\n", dev, | |
1090 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1091 | return -1; | |
1092 | } | |
1093 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; | |
1094 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | |
1095 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); | |
1096 | ||
1097 | return 0; | |
1098 | } | |
1099 | ||
1100 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | |
1101 | { | |
1102 | if ((flag != 0) && (flag != 1)) { | |
1103 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1104 | return -1; | |
1105 | } | |
1106 | ||
1107 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1108 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" | |
1109 | " dev_export_obj: %d count exists\n", dev, | |
1110 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1111 | return -1; | |
1112 | } | |
1113 | DEV_ATTRIB(dev)->emulate_tas = flag; | |
1114 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | |
1115 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); | |
1116 | ||
1117 | return 0; | |
1118 | } | |
1119 | ||
1120 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | |
1121 | { | |
1122 | if ((flag != 0) && (flag != 1)) { | |
1123 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1124 | return -1; | |
1125 | } | |
1126 | /* | |
1127 | * We expect this value to be non-zero when generic Block Layer | |
1128 | * Discard supported is detected iblock_create_virtdevice(). | |
1129 | */ | |
1130 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | |
1131 | printk(KERN_ERR "Generic Block Discard not supported\n"); | |
1132 | return -ENOSYS; | |
1133 | } | |
1134 | ||
1135 | DEV_ATTRIB(dev)->emulate_tpu = flag; | |
1136 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | |
1137 | dev, flag); | |
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | |
1142 | { | |
1143 | if ((flag != 0) && (flag != 1)) { | |
1144 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1145 | return -1; | |
1146 | } | |
1147 | /* | |
1148 | * We expect this value to be non-zero when generic Block Layer | |
1149 | * Discard supported is detected iblock_create_virtdevice(). | |
1150 | */ | |
1151 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | |
1152 | printk(KERN_ERR "Generic Block Discard not supported\n"); | |
1153 | return -ENOSYS; | |
1154 | } | |
1155 | ||
1156 | DEV_ATTRIB(dev)->emulate_tpws = flag; | |
1157 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | |
1158 | dev, flag); | |
1159 | return 0; | |
1160 | } | |
1161 | ||
1162 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | |
1163 | { | |
1164 | if ((flag != 0) && (flag != 1)) { | |
1165 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1166 | return -1; | |
1167 | } | |
1168 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; | |
1169 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | |
1170 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); | |
1171 | return 0; | |
1172 | } | |
1173 | ||
1174 | /* | |
1175 | * Note, this can only be called on unexported SE Device Object. | |
1176 | */ | |
1177 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | |
1178 | { | |
1179 | u32 orig_queue_depth = dev->queue_depth; | |
1180 | ||
1181 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1182 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" | |
1183 | " dev_export_obj: %d count exists\n", dev, | |
1184 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1185 | return -1; | |
1186 | } | |
1187 | if (!(queue_depth)) { | |
1188 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" | |
1189 | "_depth\n", dev); | |
1190 | return -1; | |
1191 | } | |
1192 | ||
1193 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1194 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | |
1195 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" | |
1196 | " exceeds TCM/SE_Device TCQ: %u\n", | |
1197 | dev, queue_depth, | |
1198 | DEV_ATTRIB(dev)->hw_queue_depth); | |
1199 | return -1; | |
1200 | } | |
1201 | } else { | |
1202 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { | |
1203 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | |
1204 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" | |
1205 | " %u exceeds TCM/SE_Device MAX" | |
1206 | " TCQ: %u\n", dev, queue_depth, | |
1207 | DEV_ATTRIB(dev)->hw_queue_depth); | |
1208 | return -1; | |
1209 | } | |
1210 | } | |
1211 | } | |
1212 | ||
1213 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; | |
1214 | if (queue_depth > orig_queue_depth) | |
1215 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); | |
1216 | else if (queue_depth < orig_queue_depth) | |
1217 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); | |
1218 | ||
1219 | printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", | |
1220 | dev, queue_depth); | |
1221 | return 0; | |
1222 | } | |
1223 | ||
1224 | int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |
1225 | { | |
1226 | int force = 0; /* Force setting for VDEVS */ | |
1227 | ||
1228 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1229 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | |
1230 | " max_sectors while dev_export_obj: %d count exists\n", | |
1231 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1232 | return -1; | |
1233 | } | |
1234 | if (!(max_sectors)) { | |
1235 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" | |
1236 | " max_sectors\n", dev); | |
1237 | return -1; | |
1238 | } | |
1239 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | |
1240 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" | |
1241 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, | |
1242 | DA_STATUS_MAX_SECTORS_MIN); | |
1243 | return -1; | |
1244 | } | |
1245 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1246 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { | |
1247 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | |
1248 | " greater than TCM/SE_Device max_sectors:" | |
1249 | " %u\n", dev, max_sectors, | |
1250 | DEV_ATTRIB(dev)->hw_max_sectors); | |
1251 | return -1; | |
1252 | } | |
1253 | } else { | |
1254 | if (!(force) && (max_sectors > | |
1255 | DEV_ATTRIB(dev)->hw_max_sectors)) { | |
1256 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | |
1257 | " greater than TCM/SE_Device max_sectors" | |
1258 | ": %u, use force=1 to override.\n", dev, | |
1259 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); | |
1260 | return -1; | |
1261 | } | |
1262 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | |
1263 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | |
1264 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | |
1265 | " %u\n", dev, max_sectors, | |
1266 | DA_STATUS_MAX_SECTORS_MAX); | |
1267 | return -1; | |
1268 | } | |
1269 | } | |
1270 | ||
1271 | DEV_ATTRIB(dev)->max_sectors = max_sectors; | |
1272 | printk("dev[%p]: SE Device max_sectors changed to %u\n", | |
1273 | dev, max_sectors); | |
1274 | return 0; | |
1275 | } | |
1276 | ||
1277 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |
1278 | { | |
1279 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1280 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | |
1281 | " optimal_sectors while dev_export_obj: %d count exists\n", | |
1282 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1283 | return -EINVAL; | |
1284 | } | |
1285 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1286 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" | |
1287 | " changed for TCM/pSCSI\n", dev); | |
1288 | return -EINVAL; | |
1289 | } | |
1290 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { | |
1291 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" | |
1292 | " greater than max_sectors: %u\n", dev, | |
1293 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); | |
1294 | return -EINVAL; | |
1295 | } | |
1296 | ||
1297 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; | |
1298 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", | |
1299 | dev, optimal_sectors); | |
1300 | return 0; | |
1301 | } | |
1302 | ||
1303 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |
1304 | { | |
1305 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1306 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" | |
1307 | " while dev_export_obj: %d count exists\n", dev, | |
1308 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1309 | return -1; | |
1310 | } | |
1311 | ||
1312 | if ((block_size != 512) && | |
1313 | (block_size != 1024) && | |
1314 | (block_size != 2048) && | |
1315 | (block_size != 4096)) { | |
1316 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" | |
1317 | " for SE device, must be 512, 1024, 2048 or 4096\n", | |
1318 | dev, block_size); | |
1319 | return -1; | |
1320 | } | |
1321 | ||
1322 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1323 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" | |
1324 | " Physical Device, use for Linux/SCSI to change" | |
1325 | " block_size for underlying hardware\n", dev); | |
1326 | return -1; | |
1327 | } | |
1328 | ||
1329 | DEV_ATTRIB(dev)->block_size = block_size; | |
1330 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", | |
1331 | dev, block_size); | |
1332 | return 0; | |
1333 | } | |
1334 | ||
1335 | struct se_lun *core_dev_add_lun( | |
1336 | struct se_portal_group *tpg, | |
1337 | struct se_hba *hba, | |
1338 | struct se_device *dev, | |
1339 | u32 lun) | |
1340 | { | |
1341 | struct se_lun *lun_p; | |
1342 | u32 lun_access = 0; | |
1343 | ||
1344 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { | |
1345 | printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", | |
1346 | atomic_read(&dev->dev_access_obj.obj_access_count)); | |
1347 | return NULL; | |
1348 | } | |
1349 | ||
1350 | lun_p = core_tpg_pre_addlun(tpg, lun); | |
1351 | if ((IS_ERR(lun_p)) || !(lun_p)) | |
1352 | return NULL; | |
1353 | ||
1354 | if (dev->dev_flags & DF_READ_ONLY) | |
1355 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
1356 | else | |
1357 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | |
1358 | ||
1359 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) | |
1360 | return NULL; | |
1361 | ||
1362 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | |
1363 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), | |
1364 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, | |
1365 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); | |
1366 | /* | |
1367 | * Update LUN maps for dynamically added initiators when | |
1368 | * generate_node_acl is enabled. | |
1369 | */ | |
1370 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { | |
1371 | struct se_node_acl *acl; | |
1372 | spin_lock_bh(&tpg->acl_node_lock); | |
1373 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | |
1374 | if (acl->dynamic_node_acl) { | |
1375 | spin_unlock_bh(&tpg->acl_node_lock); | |
1376 | core_tpg_add_node_to_devs(acl, tpg); | |
1377 | spin_lock_bh(&tpg->acl_node_lock); | |
1378 | } | |
1379 | } | |
1380 | spin_unlock_bh(&tpg->acl_node_lock); | |
1381 | } | |
1382 | ||
1383 | return lun_p; | |
1384 | } | |
1385 | ||
1386 | /* core_dev_del_lun(): | |
1387 | * | |
1388 | * | |
1389 | */ | |
1390 | int core_dev_del_lun( | |
1391 | struct se_portal_group *tpg, | |
1392 | u32 unpacked_lun) | |
1393 | { | |
1394 | struct se_lun *lun; | |
1395 | int ret = 0; | |
1396 | ||
1397 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); | |
1398 | if (!(lun)) | |
1399 | return ret; | |
1400 | ||
1401 | core_tpg_post_dellun(tpg, lun); | |
1402 | ||
1403 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" | |
1404 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), | |
1405 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, | |
1406 | TPG_TFO(tpg)->get_fabric_name()); | |
1407 | ||
1408 | return 0; | |
1409 | } | |
1410 | ||
1411 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) | |
1412 | { | |
1413 | struct se_lun *lun; | |
1414 | ||
1415 | spin_lock(&tpg->tpg_lun_lock); | |
1416 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
1417 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" | |
1418 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", | |
1419 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1420 | TRANSPORT_MAX_LUNS_PER_TPG-1, | |
1421 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1422 | spin_unlock(&tpg->tpg_lun_lock); | |
1423 | return NULL; | |
1424 | } | |
1425 | lun = &tpg->tpg_lun_list[unpacked_lun]; | |
1426 | ||
1427 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | |
1428 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" | |
1429 | " Target Portal Group: %hu, ignoring request.\n", | |
1430 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1431 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1432 | spin_unlock(&tpg->tpg_lun_lock); | |
1433 | return NULL; | |
1434 | } | |
1435 | spin_unlock(&tpg->tpg_lun_lock); | |
1436 | ||
1437 | return lun; | |
1438 | } | |
1439 | ||
1440 | /* core_dev_get_lun(): | |
1441 | * | |
1442 | * | |
1443 | */ | |
1444 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) | |
1445 | { | |
1446 | struct se_lun *lun; | |
1447 | ||
1448 | spin_lock(&tpg->tpg_lun_lock); | |
1449 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
1450 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" | |
1451 | "_TPG-1: %u for Target Portal Group: %hu\n", | |
1452 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1453 | TRANSPORT_MAX_LUNS_PER_TPG-1, | |
1454 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1455 | spin_unlock(&tpg->tpg_lun_lock); | |
1456 | return NULL; | |
1457 | } | |
1458 | lun = &tpg->tpg_lun_list[unpacked_lun]; | |
1459 | ||
1460 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | |
1461 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | |
1462 | " Target Portal Group: %hu, ignoring request.\n", | |
1463 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1464 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1465 | spin_unlock(&tpg->tpg_lun_lock); | |
1466 | return NULL; | |
1467 | } | |
1468 | spin_unlock(&tpg->tpg_lun_lock); | |
1469 | ||
1470 | return lun; | |
1471 | } | |
1472 | ||
1473 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |
1474 | struct se_portal_group *tpg, | |
1475 | u32 mapped_lun, | |
1476 | char *initiatorname, | |
1477 | int *ret) | |
1478 | { | |
1479 | struct se_lun_acl *lacl; | |
1480 | struct se_node_acl *nacl; | |
1481 | ||
1482 | if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { | |
1483 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", | |
1484 | TPG_TFO(tpg)->get_fabric_name()); | |
1485 | *ret = -EOVERFLOW; | |
1486 | return NULL; | |
1487 | } | |
1488 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | |
1489 | if (!(nacl)) { | |
1490 | *ret = -EINVAL; | |
1491 | return NULL; | |
1492 | } | |
1493 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); | |
1494 | if (!(lacl)) { | |
1495 | printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); | |
1496 | *ret = -ENOMEM; | |
1497 | return NULL; | |
1498 | } | |
1499 | ||
1500 | INIT_LIST_HEAD(&lacl->lacl_list); | |
1501 | lacl->mapped_lun = mapped_lun; | |
1502 | lacl->se_lun_nacl = nacl; | |
1503 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | |
1504 | ||
1505 | return lacl; | |
1506 | } | |
1507 | ||
1508 | int core_dev_add_initiator_node_lun_acl( | |
1509 | struct se_portal_group *tpg, | |
1510 | struct se_lun_acl *lacl, | |
1511 | u32 unpacked_lun, | |
1512 | u32 lun_access) | |
1513 | { | |
1514 | struct se_lun *lun; | |
1515 | struct se_node_acl *nacl; | |
1516 | ||
1517 | lun = core_dev_get_lun(tpg, unpacked_lun); | |
1518 | if (!(lun)) { | |
1519 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | |
1520 | " Target Portal Group: %hu, ignoring request.\n", | |
1521 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1522 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1523 | return -EINVAL; | |
1524 | } | |
1525 | ||
1526 | nacl = lacl->se_lun_nacl; | |
1527 | if (!(nacl)) | |
1528 | return -EINVAL; | |
1529 | ||
1530 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && | |
1531 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) | |
1532 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
1533 | ||
1534 | lacl->se_lun = lun; | |
1535 | ||
1536 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, | |
1537 | lun_access, nacl, tpg, 1) < 0) | |
1538 | return -EINVAL; | |
1539 | ||
1540 | spin_lock(&lun->lun_acl_lock); | |
1541 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | |
1542 | atomic_inc(&lun->lun_acl_count); | |
1543 | smp_mb__after_atomic_inc(); | |
1544 | spin_unlock(&lun->lun_acl_lock); | |
1545 | ||
1546 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | |
1547 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), | |
1548 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | |
1549 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", | |
1550 | lacl->initiatorname); | |
1551 | /* | |
1552 | * Check to see if there are any existing persistent reservation APTPL | |
1553 | * pre-registrations that need to be enabled for this LUN ACL.. | |
1554 | */ | |
1555 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); | |
1556 | return 0; | |
1557 | } | |
1558 | ||
1559 | /* core_dev_del_initiator_node_lun_acl(): | |
1560 | * | |
1561 | * | |
1562 | */ | |
1563 | int core_dev_del_initiator_node_lun_acl( | |
1564 | struct se_portal_group *tpg, | |
1565 | struct se_lun *lun, | |
1566 | struct se_lun_acl *lacl) | |
1567 | { | |
1568 | struct se_node_acl *nacl; | |
1569 | ||
1570 | nacl = lacl->se_lun_nacl; | |
1571 | if (!(nacl)) | |
1572 | return -EINVAL; | |
1573 | ||
1574 | spin_lock(&lun->lun_acl_lock); | |
1575 | list_del(&lacl->lacl_list); | |
1576 | atomic_dec(&lun->lun_acl_count); | |
1577 | smp_mb__after_atomic_dec(); | |
1578 | spin_unlock(&lun->lun_acl_lock); | |
1579 | ||
1580 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, | |
1581 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | |
1582 | ||
1583 | lacl->se_lun = NULL; | |
1584 | ||
1585 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" | |
1586 | " InitiatorNode: %s Mapped LUN: %u\n", | |
1587 | TPG_TFO(tpg)->get_fabric_name(), | |
1588 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | |
1589 | lacl->initiatorname, lacl->mapped_lun); | |
1590 | ||
1591 | return 0; | |
1592 | } | |
1593 | ||
1594 | void core_dev_free_initiator_node_lun_acl( | |
1595 | struct se_portal_group *tpg, | |
1596 | struct se_lun_acl *lacl) | |
1597 | { | |
1598 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" | |
1599 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), | |
1600 | TPG_TFO(tpg)->tpg_get_tag(tpg), | |
1601 | TPG_TFO(tpg)->get_fabric_name(), | |
1602 | lacl->initiatorname, lacl->mapped_lun); | |
1603 | ||
1604 | kfree(lacl); | |
1605 | } | |
1606 | ||
1607 | int core_dev_setup_virtual_lun0(void) | |
1608 | { | |
1609 | struct se_hba *hba; | |
1610 | struct se_device *dev; | |
1611 | struct se_subsystem_dev *se_dev = NULL; | |
1612 | struct se_subsystem_api *t; | |
1613 | char buf[16]; | |
1614 | int ret; | |
1615 | ||
1616 | hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); | |
1617 | if (IS_ERR(hba)) | |
1618 | return PTR_ERR(hba); | |
1619 | ||
1620 | se_global->g_lun0_hba = hba; | |
1621 | t = hba->transport; | |
1622 | ||
1623 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | |
1624 | if (!(se_dev)) { | |
1625 | printk(KERN_ERR "Unable to allocate memory for" | |
1626 | " struct se_subsystem_dev\n"); | |
1627 | ret = -ENOMEM; | |
1628 | goto out; | |
1629 | } | |
1630 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | |
1631 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | |
1632 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | |
1633 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | |
1634 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | |
1635 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | |
1636 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | |
1637 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | |
1638 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | |
1639 | spin_lock_init(&se_dev->se_dev_lock); | |
1640 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | |
1641 | se_dev->t10_wwn.t10_sub_dev = se_dev; | |
1642 | se_dev->t10_alua.t10_sub_dev = se_dev; | |
1643 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | |
1644 | se_dev->se_dev_hba = hba; | |
1645 | ||
1646 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); | |
1647 | if (!(se_dev->se_dev_su_ptr)) { | |
1648 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" | |
1649 | " from allocate_virtdevice()\n"); | |
1650 | ret = -ENOMEM; | |
1651 | goto out; | |
1652 | } | |
1653 | se_global->g_lun0_su_dev = se_dev; | |
1654 | ||
1655 | memset(buf, 0, 16); | |
1656 | sprintf(buf, "rd_pages=8"); | |
1657 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); | |
1658 | ||
1659 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | |
1660 | if (!(dev) || IS_ERR(dev)) { | |
1661 | ret = -ENOMEM; | |
1662 | goto out; | |
1663 | } | |
1664 | se_dev->se_dev_ptr = dev; | |
1665 | se_global->g_lun0_dev = dev; | |
1666 | ||
1667 | return 0; | |
1668 | out: | |
1669 | se_global->g_lun0_su_dev = NULL; | |
1670 | kfree(se_dev); | |
1671 | if (se_global->g_lun0_hba) { | |
1672 | core_delete_hba(se_global->g_lun0_hba); | |
1673 | se_global->g_lun0_hba = NULL; | |
1674 | } | |
1675 | return ret; | |
1676 | } | |
1677 | ||
1678 | ||
1679 | void core_dev_release_virtual_lun0(void) | |
1680 | { | |
1681 | struct se_hba *hba = se_global->g_lun0_hba; | |
1682 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; | |
1683 | ||
1684 | if (!(hba)) | |
1685 | return; | |
1686 | ||
1687 | if (se_global->g_lun0_dev) | |
1688 | se_free_virtual_device(se_global->g_lun0_dev, hba); | |
1689 | ||
1690 | kfree(su_dev); | |
1691 | core_delete_hba(hba); | |
1692 | } |