1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * This file contains main functions related to the iSCSI Target Core Driver.
5 * (c) Copyright 2007-2013 Datera, Inc.
9 ******************************************************************************/
11 #include <crypto/hash.h>
12 #include <linux/string.h>
13 #include <linux/kthread.h>
14 #include <linux/completion.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/idr.h>
18 #include <linux/delay.h>
19 #include <linux/sched/signal.h>
20 #include <asm/unaligned.h>
21 #include <linux/inet.h>
23 #include <scsi/scsi_proto.h>
24 #include <scsi/iscsi_proto.h>
25 #include <scsi/scsi_tcq.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
29 #include <target/target_core_backend.h>
30 #include <target/iscsi/iscsi_target_core.h>
31 #include "iscsi_target_parameters.h"
32 #include "iscsi_target_seq_pdu_list.h"
33 #include "iscsi_target_datain_values.h"
34 #include "iscsi_target_erl0.h"
35 #include "iscsi_target_erl1.h"
36 #include "iscsi_target_erl2.h"
37 #include "iscsi_target_login.h"
38 #include "iscsi_target_tmr.h"
39 #include "iscsi_target_tpg.h"
40 #include "iscsi_target_util.h"
41 #include "iscsi_target.h"
42 #include "iscsi_target_device.h"
43 #include <target/iscsi/iscsi_target_stat.h>
45 #include <target/iscsi/iscsi_transport.h>
47 static LIST_HEAD(g_tiqn_list);
48 static LIST_HEAD(g_np_list);
49 static DEFINE_SPINLOCK(tiqn_lock);
50 static DEFINE_MUTEX(np_lock);
52 static struct idr tiqn_idr;
54 struct mutex auth_id_lock;
56 struct iscsit_global *iscsit_global;
58 struct kmem_cache *lio_qr_cache;
59 struct kmem_cache *lio_dr_cache;
60 struct kmem_cache *lio_ooo_cache;
61 struct kmem_cache *lio_r2t_cache;
63 static int iscsit_handle_immediate_data(struct iscsit_cmd *,
64 struct iscsi_scsi_req *, u32);
66 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
68 struct iscsi_tiqn *tiqn = NULL;
70 spin_lock(&tiqn_lock);
71 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
72 if (!strcmp(tiqn->tiqn, buf)) {
74 spin_lock(&tiqn->tiqn_state_lock);
75 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
76 tiqn->tiqn_access_count++;
77 spin_unlock(&tiqn->tiqn_state_lock);
78 spin_unlock(&tiqn_lock);
81 spin_unlock(&tiqn->tiqn_state_lock);
84 spin_unlock(&tiqn_lock);
89 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
91 spin_lock(&tiqn->tiqn_state_lock);
92 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
93 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
94 spin_unlock(&tiqn->tiqn_state_lock);
97 spin_unlock(&tiqn->tiqn_state_lock);
102 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
104 spin_lock(&tiqn->tiqn_state_lock);
105 tiqn->tiqn_access_count--;
106 spin_unlock(&tiqn->tiqn_state_lock);
110 * Note that IQN formatting is expected to be done in userspace, and
111 * no explict IQN format checks are done here.
113 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
115 struct iscsi_tiqn *tiqn = NULL;
118 if (strlen(buf) >= ISCSI_IQN_LEN) {
119 pr_err("Target IQN exceeds %d bytes\n",
121 return ERR_PTR(-EINVAL);
124 tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
126 return ERR_PTR(-ENOMEM);
128 sprintf(tiqn->tiqn, "%s", buf);
129 INIT_LIST_HEAD(&tiqn->tiqn_list);
130 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
131 spin_lock_init(&tiqn->tiqn_state_lock);
132 spin_lock_init(&tiqn->tiqn_tpg_lock);
133 spin_lock_init(&tiqn->sess_err_stats.lock);
134 spin_lock_init(&tiqn->login_stats.lock);
135 spin_lock_init(&tiqn->logout_stats.lock);
137 tiqn->tiqn_state = TIQN_STATE_ACTIVE;
139 idr_preload(GFP_KERNEL);
140 spin_lock(&tiqn_lock);
142 ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
144 pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
145 spin_unlock(&tiqn_lock);
150 tiqn->tiqn_index = ret;
151 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
153 spin_unlock(&tiqn_lock);
156 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
162 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
165 * Wait for accesses to said struct iscsi_tiqn to end.
167 spin_lock(&tiqn->tiqn_state_lock);
168 while (tiqn->tiqn_access_count != 0) {
169 spin_unlock(&tiqn->tiqn_state_lock);
171 spin_lock(&tiqn->tiqn_state_lock);
173 spin_unlock(&tiqn->tiqn_state_lock);
176 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
179 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
180 * while holding tiqn->tiqn_state_lock. This means that all subsequent
181 * attempts to access this struct iscsi_tiqn will fail from both transport
182 * fabric and control code paths.
184 if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
185 pr_err("iscsit_set_tiqn_shutdown() failed\n");
189 iscsit_wait_for_tiqn(tiqn);
191 spin_lock(&tiqn_lock);
192 list_del(&tiqn->tiqn_list);
193 idr_remove(&tiqn_idr, tiqn->tiqn_index);
194 spin_unlock(&tiqn_lock);
196 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
201 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
205 * Determine if the network portal is accepting storage traffic.
207 spin_lock_bh(&np->np_thread_lock);
208 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
209 spin_unlock_bh(&np->np_thread_lock);
212 spin_unlock_bh(&np->np_thread_lock);
214 * Determine if the portal group is accepting storage traffic.
216 spin_lock_bh(&tpg->tpg_state_lock);
217 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
218 spin_unlock_bh(&tpg->tpg_state_lock);
221 spin_unlock_bh(&tpg->tpg_state_lock);
224 * Here we serialize access across the TIQN+TPG Tuple.
226 ret = down_interruptible(&tpg->np_login_sem);
230 spin_lock_bh(&tpg->tpg_state_lock);
231 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
232 spin_unlock_bh(&tpg->tpg_state_lock);
233 up(&tpg->np_login_sem);
236 spin_unlock_bh(&tpg->tpg_state_lock);
241 void iscsit_login_kref_put(struct kref *kref)
243 struct iscsi_tpg_np *tpg_np = container_of(kref,
244 struct iscsi_tpg_np, tpg_np_kref);
246 complete(&tpg_np->tpg_np_comp);
249 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
250 struct iscsi_tpg_np *tpg_np)
252 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
254 up(&tpg->np_login_sem);
257 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
260 iscsit_put_tiqn_for_login(tiqn);
265 bool iscsit_check_np_match(
266 struct sockaddr_storage *sockaddr,
268 int network_transport)
270 struct sockaddr_in *sock_in, *sock_in_e;
271 struct sockaddr_in6 *sock_in6, *sock_in6_e;
272 bool ip_match = false;
275 if (sockaddr->ss_family == AF_INET6) {
276 sock_in6 = (struct sockaddr_in6 *)sockaddr;
277 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
279 if (!memcmp(&sock_in6->sin6_addr.in6_u,
280 &sock_in6_e->sin6_addr.in6_u,
281 sizeof(struct in6_addr)))
284 port = ntohs(sock_in6->sin6_port);
285 port_e = ntohs(sock_in6_e->sin6_port);
287 sock_in = (struct sockaddr_in *)sockaddr;
288 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
290 if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
293 port = ntohs(sock_in->sin_port);
294 port_e = ntohs(sock_in_e->sin_port);
297 if (ip_match && (port_e == port) &&
298 (np->np_network_transport == network_transport))
304 static struct iscsi_np *iscsit_get_np(
305 struct sockaddr_storage *sockaddr,
306 int network_transport)
311 lockdep_assert_held(&np_lock);
313 list_for_each_entry(np, &g_np_list, np_list) {
314 spin_lock_bh(&np->np_thread_lock);
315 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
316 spin_unlock_bh(&np->np_thread_lock);
320 match = iscsit_check_np_match(sockaddr, np, network_transport);
323 * Increment the np_exports reference count now to
324 * prevent iscsit_del_np() below from being called
325 * while iscsi_tpg_add_network_portal() is called.
328 spin_unlock_bh(&np->np_thread_lock);
331 spin_unlock_bh(&np->np_thread_lock);
337 struct iscsi_np *iscsit_add_np(
338 struct sockaddr_storage *sockaddr,
339 int network_transport)
344 mutex_lock(&np_lock);
347 * Locate the existing struct iscsi_np if already active..
349 np = iscsit_get_np(sockaddr, network_transport);
351 mutex_unlock(&np_lock);
355 np = kzalloc(sizeof(*np), GFP_KERNEL);
357 mutex_unlock(&np_lock);
358 return ERR_PTR(-ENOMEM);
361 np->np_flags |= NPF_IP_NETWORK;
362 np->np_network_transport = network_transport;
363 spin_lock_init(&np->np_thread_lock);
364 init_completion(&np->np_restart_comp);
365 INIT_LIST_HEAD(&np->np_list);
367 ret = iscsi_target_setup_login_socket(np, sockaddr);
370 mutex_unlock(&np_lock);
374 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
375 if (IS_ERR(np->np_thread)) {
376 pr_err("Unable to create kthread: iscsi_np\n");
377 ret = PTR_ERR(np->np_thread);
379 mutex_unlock(&np_lock);
383 * Increment the np_exports reference count now to prevent
384 * iscsit_del_np() below from being run while a new call to
385 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
386 * active. We don't need to hold np->np_thread_lock at this
387 * point because iscsi_np has not been added to g_np_list yet.
390 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
392 list_add_tail(&np->np_list, &g_np_list);
393 mutex_unlock(&np_lock);
395 pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
396 &np->np_sockaddr, np->np_transport->name);
401 int iscsit_reset_np_thread(
403 struct iscsi_tpg_np *tpg_np,
404 struct iscsi_portal_group *tpg,
407 spin_lock_bh(&np->np_thread_lock);
408 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
409 spin_unlock_bh(&np->np_thread_lock);
412 np->np_thread_state = ISCSI_NP_THREAD_RESET;
413 atomic_inc(&np->np_reset_count);
416 spin_unlock_bh(&np->np_thread_lock);
417 send_sig(SIGINT, np->np_thread, 1);
418 wait_for_completion(&np->np_restart_comp);
419 spin_lock_bh(&np->np_thread_lock);
421 spin_unlock_bh(&np->np_thread_lock);
423 if (tpg_np && shutdown) {
424 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
426 wait_for_completion(&tpg_np->tpg_np_comp);
432 static void iscsit_free_np(struct iscsi_np *np)
435 sock_release(np->np_socket);
438 int iscsit_del_np(struct iscsi_np *np)
440 spin_lock_bh(&np->np_thread_lock);
442 if (np->np_exports) {
444 spin_unlock_bh(&np->np_thread_lock);
447 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
448 spin_unlock_bh(&np->np_thread_lock);
452 * We need to send the signal to wakeup Linux/Net
453 * which may be sleeping in sock_accept()..
455 send_sig(SIGINT, np->np_thread, 1);
456 kthread_stop(np->np_thread);
457 np->np_thread = NULL;
460 np->np_transport->iscsit_free_np(np);
462 mutex_lock(&np_lock);
463 list_del(&np->np_list);
464 mutex_unlock(&np_lock);
466 pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
467 &np->np_sockaddr, np->np_transport->name);
469 iscsit_put_transport(np->np_transport);
474 static void iscsit_get_rx_pdu(struct iscsit_conn *);
476 int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
478 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
480 EXPORT_SYMBOL(iscsit_queue_rsp);
482 void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
484 spin_lock_bh(&conn->cmd_lock);
485 if (!list_empty(&cmd->i_conn_node))
486 list_del_init(&cmd->i_conn_node);
487 spin_unlock_bh(&conn->cmd_lock);
489 __iscsit_free_cmd(cmd, true);
491 EXPORT_SYMBOL(iscsit_aborted_task);
493 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
494 u32, u32, const void *, void *);
495 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
498 iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
499 const void *data_buf, u32 data_buf_len)
501 struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
503 u32 niov = 0, tx_size = ISCSI_HDR_LEN;
506 iov = &cmd->iov_misc[0];
507 iov[niov].iov_base = cmd->pdu;
508 iov[niov++].iov_len = ISCSI_HDR_LEN;
510 if (conn->conn_ops->HeaderDigest) {
511 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
513 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
514 ISCSI_HDR_LEN, 0, NULL,
517 iov[0].iov_len += ISCSI_CRC_LEN;
518 tx_size += ISCSI_CRC_LEN;
519 pr_debug("Attaching CRC32C HeaderDigest"
520 " to opcode 0x%x 0x%08x\n",
521 hdr->opcode, *header_digest);
525 u32 padding = ((-data_buf_len) & 3);
527 iov[niov].iov_base = (void *)data_buf;
528 iov[niov++].iov_len = data_buf_len;
529 tx_size += data_buf_len;
532 iov[niov].iov_base = &cmd->pad_bytes;
533 iov[niov++].iov_len = padding;
535 pr_debug("Attaching %u additional"
536 " padding bytes.\n", padding);
539 if (conn->conn_ops->DataDigest) {
540 iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
541 data_buf, data_buf_len,
542 padding, &cmd->pad_bytes,
545 iov[niov].iov_base = &cmd->data_crc;
546 iov[niov++].iov_len = ISCSI_CRC_LEN;
547 tx_size += ISCSI_CRC_LEN;
548 pr_debug("Attached DataDigest for %u"
549 " bytes opcode 0x%x, CRC 0x%08x\n",
550 data_buf_len, hdr->opcode, cmd->data_crc);
554 cmd->iov_misc_count = niov;
555 cmd->tx_size = tx_size;
557 ret = iscsit_send_tx_data(cmd, conn, 1);
559 iscsit_tx_thread_wait_for_tcp(conn);
566 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
567 u32 data_offset, u32 data_length);
568 static void iscsit_unmap_iovec(struct iscsit_cmd *);
569 static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
570 u32, u32, u32, u8 *);
572 iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
573 const struct iscsi_datain *datain)
576 u32 iov_count = 0, tx_size = 0;
579 iov = &cmd->iov_data[0];
580 iov[iov_count].iov_base = cmd->pdu;
581 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
582 tx_size += ISCSI_HDR_LEN;
584 if (conn->conn_ops->HeaderDigest) {
585 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
587 iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
588 ISCSI_HDR_LEN, 0, NULL,
591 iov[0].iov_len += ISCSI_CRC_LEN;
592 tx_size += ISCSI_CRC_LEN;
594 pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
598 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
599 cmd->orig_iov_data_count - (iov_count + 2),
600 datain->offset, datain->length);
604 iov_count += iov_ret;
605 tx_size += datain->length;
607 cmd->padding = ((-datain->length) & 3);
609 iov[iov_count].iov_base = cmd->pad_bytes;
610 iov[iov_count++].iov_len = cmd->padding;
611 tx_size += cmd->padding;
613 pr_debug("Attaching %u padding bytes\n", cmd->padding);
616 if (conn->conn_ops->DataDigest) {
617 cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
623 iov[iov_count].iov_base = &cmd->data_crc;
624 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
625 tx_size += ISCSI_CRC_LEN;
627 pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
628 datain->length + cmd->padding, cmd->data_crc);
631 cmd->iov_data_count = iov_count;
632 cmd->tx_size = tx_size;
634 ret = iscsit_fe_sendpage_sg(cmd, conn);
636 iscsit_unmap_iovec(cmd);
639 iscsit_tx_thread_wait_for_tcp(conn);
646 static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
647 struct iscsi_datain_req *dr, const void *buf,
651 return iscsit_xmit_datain_pdu(conn, cmd, buf);
653 return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
656 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn)
658 return TARGET_PROT_NORMAL;
661 static struct iscsit_transport iscsi_target_transport = {
663 .transport_type = ISCSI_TCP,
664 .rdma_shutdown = false,
666 .iscsit_setup_np = iscsit_setup_np,
667 .iscsit_accept_np = iscsit_accept_np,
668 .iscsit_free_np = iscsit_free_np,
669 .iscsit_get_login_rx = iscsit_get_login_rx,
670 .iscsit_put_login_tx = iscsit_put_login_tx,
671 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
672 .iscsit_immediate_queue = iscsit_immediate_queue,
673 .iscsit_response_queue = iscsit_response_queue,
674 .iscsit_queue_data_in = iscsit_queue_rsp,
675 .iscsit_queue_status = iscsit_queue_rsp,
676 .iscsit_aborted_task = iscsit_aborted_task,
677 .iscsit_xmit_pdu = iscsit_xmit_pdu,
678 .iscsit_get_rx_pdu = iscsit_get_rx_pdu,
679 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
682 static int __init iscsi_target_init_module(void)
686 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
687 iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
691 spin_lock_init(&iscsit_global->ts_bitmap_lock);
692 mutex_init(&auth_id_lock);
695 ret = target_register_template(&iscsi_ops);
699 size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
700 iscsit_global->ts_bitmap = vzalloc(size);
701 if (!iscsit_global->ts_bitmap)
704 if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
705 pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
708 cpumask_setall(iscsit_global->allowed_cpumask);
710 lio_qr_cache = kmem_cache_create("lio_qr_cache",
711 sizeof(struct iscsi_queue_req),
712 __alignof__(struct iscsi_queue_req), 0, NULL);
714 pr_err("Unable to kmem_cache_create() for"
719 lio_dr_cache = kmem_cache_create("lio_dr_cache",
720 sizeof(struct iscsi_datain_req),
721 __alignof__(struct iscsi_datain_req), 0, NULL);
723 pr_err("Unable to kmem_cache_create() for"
728 lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
729 sizeof(struct iscsi_ooo_cmdsn),
730 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
731 if (!lio_ooo_cache) {
732 pr_err("Unable to kmem_cache_create() for"
737 lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
738 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
740 if (!lio_r2t_cache) {
741 pr_err("Unable to kmem_cache_create() for"
746 iscsit_register_transport(&iscsi_target_transport);
748 if (iscsit_load_discovery_tpg() < 0)
753 iscsit_unregister_transport(&iscsi_target_transport);
754 kmem_cache_destroy(lio_r2t_cache);
756 kmem_cache_destroy(lio_ooo_cache);
758 kmem_cache_destroy(lio_dr_cache);
760 kmem_cache_destroy(lio_qr_cache);
762 free_cpumask_var(iscsit_global->allowed_cpumask);
764 vfree(iscsit_global->ts_bitmap);
766 /* XXX: this probably wants it to be it's own unwind step.. */
767 if (iscsit_global->discovery_tpg)
768 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
769 target_unregister_template(&iscsi_ops);
771 kfree(iscsit_global);
775 static void __exit iscsi_target_cleanup_module(void)
777 iscsit_release_discovery_tpg();
778 iscsit_unregister_transport(&iscsi_target_transport);
779 kmem_cache_destroy(lio_qr_cache);
780 kmem_cache_destroy(lio_dr_cache);
781 kmem_cache_destroy(lio_ooo_cache);
782 kmem_cache_destroy(lio_r2t_cache);
785 * Shutdown discovery sessions and disable discovery TPG
787 if (iscsit_global->discovery_tpg)
788 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
790 target_unregister_template(&iscsi_ops);
792 free_cpumask_var(iscsit_global->allowed_cpumask);
793 vfree(iscsit_global->ts_bitmap);
794 kfree(iscsit_global);
797 int iscsit_add_reject(
798 struct iscsit_conn *conn,
802 struct iscsit_cmd *cmd;
804 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
808 cmd->iscsi_opcode = ISCSI_OP_REJECT;
809 cmd->reject_reason = reason;
811 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
813 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
814 iscsit_free_cmd(cmd, false);
818 spin_lock_bh(&conn->cmd_lock);
819 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
820 spin_unlock_bh(&conn->cmd_lock);
822 cmd->i_state = ISTATE_SEND_REJECT;
823 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
827 EXPORT_SYMBOL(iscsit_add_reject);
829 static int iscsit_add_reject_from_cmd(
830 struct iscsit_cmd *cmd,
835 struct iscsit_conn *conn;
836 const bool do_put = cmd->se_cmd.se_tfo != NULL;
839 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
845 cmd->iscsi_opcode = ISCSI_OP_REJECT;
846 cmd->reject_reason = reason;
848 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
850 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
851 iscsit_free_cmd(cmd, false);
856 spin_lock_bh(&conn->cmd_lock);
857 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
858 spin_unlock_bh(&conn->cmd_lock);
861 cmd->i_state = ISTATE_SEND_REJECT;
862 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
864 * Perform the kref_put now if se_cmd has already been setup by
865 * scsit_setup_scsi_cmd()
868 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
869 target_put_sess_cmd(&cmd->se_cmd);
874 static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason,
877 return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
880 int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf)
882 return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
884 EXPORT_SYMBOL(iscsit_reject_cmd);
887 * Map some portion of the allocated scatterlist to an iovec, suitable for
888 * kernel sockets to copy data in/out.
890 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
891 u32 data_offset, u32 data_length)
893 u32 i = 0, orig_data_length = data_length;
894 struct scatterlist *sg;
895 unsigned int page_off;
898 * We know each entry in t_data_sg contains a page.
900 u32 ent = data_offset / PAGE_SIZE;
905 if (ent >= cmd->se_cmd.t_data_nents) {
906 pr_err("Initial page entry out-of-bounds\n");
910 sg = &cmd->se_cmd.t_data_sg[ent];
911 page_off = (data_offset % PAGE_SIZE);
913 cmd->first_data_sg = sg;
914 cmd->first_data_sg_off = page_off;
916 while (data_length) {
919 if (WARN_ON_ONCE(!sg || i >= nvec))
922 cur_len = min_t(u32, data_length, sg->length - page_off);
924 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
925 iov[i].iov_len = cur_len;
927 data_length -= cur_len;
933 cmd->kmapped_nents = i;
938 pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
939 data_offset, orig_data_length, i, nvec);
940 for_each_sg(cmd->se_cmd.t_data_sg, sg,
941 cmd->se_cmd.t_data_nents, i) {
942 pr_err("[%d] off %d len %d\n",
943 i, sg->offset, sg->length);
948 static void iscsit_unmap_iovec(struct iscsit_cmd *cmd)
951 struct scatterlist *sg;
953 sg = cmd->first_data_sg;
955 for (i = 0; i < cmd->kmapped_nents; i++)
956 kunmap(sg_page(&sg[i]));
959 static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn)
962 struct iscsit_cmd *cmd, *cmd_p;
964 conn->exp_statsn = exp_statsn;
966 if (conn->sess->sess_ops->RDMAExtensions)
969 spin_lock_bh(&conn->cmd_lock);
970 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
971 spin_lock(&cmd->istate_lock);
972 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
973 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
974 cmd->i_state = ISTATE_REMOVE;
975 spin_unlock(&cmd->istate_lock);
976 list_move_tail(&cmd->i_conn_node, &ack_list);
979 spin_unlock(&cmd->istate_lock);
981 spin_unlock_bh(&conn->cmd_lock);
983 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
984 list_del_init(&cmd->i_conn_node);
985 iscsit_free_cmd(cmd, false);
989 static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd)
991 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
993 iov_count += ISCSI_IOV_DATA_BUFFER;
994 cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
998 cmd->orig_iov_data_count = iov_count;
1002 int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1005 int data_direction, payload_length;
1006 struct iscsi_ecdb_ahdr *ecdb_ahdr;
1007 struct iscsi_scsi_req *hdr;
1008 int iscsi_task_attr;
1012 atomic_long_inc(&conn->sess->cmd_pdus);
1014 hdr = (struct iscsi_scsi_req *) buf;
1015 payload_length = ntoh24(hdr->dlength);
1017 /* FIXME; Add checks for AdditionalHeaderSegment */
1019 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
1020 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1021 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
1022 " not set. Bad iSCSI Initiator.\n");
1023 return iscsit_add_reject_cmd(cmd,
1024 ISCSI_REASON_BOOKMARK_INVALID, buf);
1027 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
1028 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
1030 * From RFC-3720 Section 10.3.1:
1032 * "Either or both of R and W MAY be 1 when either the
1033 * Expected Data Transfer Length and/or Bidirectional Read
1034 * Expected Data Transfer Length are 0"
1036 * For this case, go ahead and clear the unnecssary bits
1037 * to avoid any confusion with ->data_direction.
1039 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
1040 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1042 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1043 " set when Expected Data Transfer Length is 0 for"
1044 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1047 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
1048 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
1049 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
1050 " MUST be set if Expected Data Transfer Length is not 0."
1051 " Bad iSCSI Initiator\n");
1052 return iscsit_add_reject_cmd(cmd,
1053 ISCSI_REASON_BOOKMARK_INVALID, buf);
1056 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
1057 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
1058 pr_err("Bidirectional operations not supported!\n");
1059 return iscsit_add_reject_cmd(cmd,
1060 ISCSI_REASON_BOOKMARK_INVALID, buf);
1063 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1064 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
1065 " Scsi Command PDU.\n");
1066 return iscsit_add_reject_cmd(cmd,
1067 ISCSI_REASON_BOOKMARK_INVALID, buf);
1070 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
1071 pr_err("ImmediateData=No but DataSegmentLength=%u,"
1072 " protocol error.\n", payload_length);
1073 return iscsit_add_reject_cmd(cmd,
1074 ISCSI_REASON_PROTOCOL_ERROR, buf);
1077 if ((be32_to_cpu(hdr->data_length) == payload_length) &&
1078 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
1079 pr_err("Expected Data Transfer Length and Length of"
1080 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
1081 " bit is not set protocol error\n");
1082 return iscsit_add_reject_cmd(cmd,
1083 ISCSI_REASON_PROTOCOL_ERROR, buf);
1086 if (payload_length > be32_to_cpu(hdr->data_length)) {
1087 pr_err("DataSegmentLength: %u is greater than"
1088 " EDTL: %u, protocol error.\n", payload_length,
1090 return iscsit_add_reject_cmd(cmd,
1091 ISCSI_REASON_PROTOCOL_ERROR, buf);
1094 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1095 pr_err("DataSegmentLength: %u is greater than"
1096 " MaxXmitDataSegmentLength: %u, protocol error.\n",
1097 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1098 return iscsit_add_reject_cmd(cmd,
1099 ISCSI_REASON_PROTOCOL_ERROR, buf);
1102 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
1103 pr_err("DataSegmentLength: %u is greater than"
1104 " FirstBurstLength: %u, protocol error.\n",
1105 payload_length, conn->sess->sess_ops->FirstBurstLength);
1106 return iscsit_add_reject_cmd(cmd,
1107 ISCSI_REASON_BOOKMARK_INVALID, buf);
1113 ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
1114 if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
1115 pr_err("Additional Header Segment type %d not supported!\n",
1116 ecdb_ahdr->ahstype);
1117 return iscsit_add_reject_cmd(cmd,
1118 ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
1121 cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
1124 return iscsit_add_reject_cmd(cmd,
1125 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1126 memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
1127 memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
1128 be16_to_cpu(ecdb_ahdr->ahslength) - 1);
1131 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
1132 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
1135 cmd->data_direction = data_direction;
1136 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
1138 * Figure out the SAM Task Attribute for the incoming SCSI CDB
1140 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
1141 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
1142 sam_task_attr = TCM_SIMPLE_TAG;
1143 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
1144 sam_task_attr = TCM_ORDERED_TAG;
1145 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
1146 sam_task_attr = TCM_HEAD_TAG;
1147 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
1148 sam_task_attr = TCM_ACA_TAG;
1150 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
1151 " TCM_SIMPLE_TAG\n", iscsi_task_attr);
1152 sam_task_attr = TCM_SIMPLE_TAG;
1155 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
1156 cmd->i_state = ISTATE_NEW_CMD;
1157 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1158 cmd->immediate_data = (payload_length) ? 1 : 0;
1159 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
1160 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
1161 if (cmd->unsolicited_data)
1162 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
1164 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1165 if (hdr->flags & ISCSI_FLAG_CMD_READ)
1166 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
1168 cmd->targ_xfer_tag = 0xFFFFFFFF;
1169 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1170 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1171 cmd->first_burst_len = payload_length;
1173 if (!conn->sess->sess_ops->RDMAExtensions &&
1174 cmd->data_direction == DMA_FROM_DEVICE) {
1175 struct iscsi_datain_req *dr;
1177 dr = iscsit_allocate_datain_req();
1179 if (cdb != hdr->cdb)
1181 return iscsit_add_reject_cmd(cmd,
1182 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1185 iscsit_attach_datain_req(cmd, dr);
1189 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
1191 __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
1192 conn->sess->se_sess, be32_to_cpu(hdr->data_length),
1193 cmd->data_direction, sam_task_attr,
1194 cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
1197 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
1198 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
1199 hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
1202 target_get_sess_cmd(&cmd->se_cmd, true);
1204 cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
1205 cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
1208 if (cdb != hdr->cdb)
1211 if (cmd->sense_reason) {
1212 if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
1213 return iscsit_add_reject_cmd(cmd,
1214 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1220 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
1221 if (cmd->sense_reason)
1224 cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
1225 if (cmd->sense_reason)
1228 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
1229 return iscsit_add_reject_cmd(cmd,
1230 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1234 spin_lock_bh(&conn->cmd_lock);
1235 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1236 spin_unlock_bh(&conn->cmd_lock);
1239 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1241 void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd)
1243 iscsit_set_dataout_sequence_values(cmd);
1245 spin_lock_bh(&cmd->dataout_timeout_lock);
1246 iscsit_start_dataout_timer(cmd, cmd->conn);
1247 spin_unlock_bh(&cmd->dataout_timeout_lock);
1249 EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
1251 int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1252 struct iscsi_scsi_req *hdr)
1256 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1257 * the Immediate Bit is not set, and no Immediate
1260 * A PDU/CmdSN carrying Immediate Data can only
1261 * be processed after the DataCRC has passed.
1262 * If the DataCRC fails, the CmdSN MUST NOT
1263 * be acknowledged. (See below)
1265 if (!cmd->immediate_data) {
1266 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1267 (unsigned char *)hdr, hdr->cmdsn);
1268 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1270 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1271 target_put_sess_cmd(&cmd->se_cmd);
1276 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1279 * If no Immediate Data is attached, it's OK to return now.
1281 if (!cmd->immediate_data) {
1282 if (!cmd->sense_reason && cmd->unsolicited_data)
1283 iscsit_set_unsolicited_dataout(cmd);
1284 if (!cmd->sense_reason)
1287 target_put_sess_cmd(&cmd->se_cmd);
1292 * Early CHECK_CONDITIONs with ImmediateData never make it to command
1293 * execution. These exceptions are processed in CmdSN order using
1294 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1296 if (cmd->sense_reason)
1299 * Call directly into transport_generic_new_cmd() to perform
1300 * the backend memory allocation.
1302 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1303 if (cmd->sense_reason)
1308 EXPORT_SYMBOL(iscsit_process_scsi_cmd);
1311 iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
1314 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1318 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1321 u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
1322 cmd->first_burst_len);
1324 pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
1325 cmd->se_cmd.data_length, cmd->write_data_done,
1326 cmd->first_burst_len, length);
1327 rc = iscsit_dump_data_payload(cmd->conn, length, 1);
1328 pr_debug("Finished dumping immediate data\n");
1330 immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
1332 immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1333 cmd->first_burst_len);
1336 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1338 * A PDU/CmdSN carrying Immediate Data passed
1339 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1340 * Immediate Bit is not set.
1342 cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
1343 (unsigned char *)hdr, hdr->cmdsn);
1344 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1347 if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1348 target_put_sess_cmd(&cmd->se_cmd);
1351 } else if (cmd->unsolicited_data)
1352 iscsit_set_unsolicited_dataout(cmd);
1354 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1356 * Immediate Data failed DataCRC and ERL>=1,
1357 * silently drop this PDU and let the initiator
1358 * plug the CmdSN gap.
1360 * FIXME: Send Unsolicited NOPIN with reserved
1361 * TTT here to help the initiator figure out
1362 * the missing CmdSN, although they should be
1363 * intelligent enough to determine the missing
1364 * CmdSN and issue a retry to plug the sequence.
1366 cmd->i_state = ISTATE_REMOVE;
1367 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
1368 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1375 iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1378 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1380 bool dump_payload = false;
1382 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1386 * Allocation iovecs needed for struct socket operations for
1387 * traditional iSCSI block I/O.
1389 if (iscsit_allocate_iovecs(cmd) < 0) {
1390 return iscsit_reject_cmd(cmd,
1391 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1393 immed_data = cmd->immediate_data;
1395 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1399 dump_payload = true;
1404 return iscsit_get_immediate_data(cmd, hdr, dump_payload);
1407 static u32 iscsit_do_crypto_hash_sg(
1408 struct ahash_request *hash,
1409 struct iscsit_cmd *cmd,
1416 struct scatterlist *sg;
1417 unsigned int page_off;
1419 crypto_ahash_init(hash);
1421 sg = cmd->first_data_sg;
1422 page_off = cmd->first_data_sg_off;
1424 if (data_length && page_off) {
1425 struct scatterlist first_sg;
1426 u32 len = min_t(u32, data_length, sg->length - page_off);
1428 sg_init_table(&first_sg, 1);
1429 sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
1431 ahash_request_set_crypt(hash, &first_sg, NULL, len);
1432 crypto_ahash_update(hash);
1438 while (data_length) {
1439 u32 cur_len = min_t(u32, data_length, sg->length);
1441 ahash_request_set_crypt(hash, sg, NULL, cur_len);
1442 crypto_ahash_update(hash);
1444 data_length -= cur_len;
1445 /* iscsit_map_iovec has already checked for invalid sg pointers */
1450 struct scatterlist pad_sg;
1452 sg_init_one(&pad_sg, pad_bytes, padding);
1453 ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
1455 crypto_ahash_finup(hash);
1457 ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
1458 crypto_ahash_final(hash);
1464 static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
1465 const void *buf, u32 payload_length, u32 padding,
1466 const void *pad_bytes, void *data_crc)
1468 struct scatterlist sg[2];
1470 sg_init_table(sg, ARRAY_SIZE(sg));
1471 sg_set_buf(sg, buf, payload_length);
1473 sg_set_buf(sg + 1, pad_bytes, padding);
1475 ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
1477 crypto_ahash_digest(hash);
1481 __iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1482 struct iscsit_cmd *cmd, u32 payload_length,
1485 struct iscsi_data *hdr = buf;
1486 struct se_cmd *se_cmd;
1490 atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1492 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1493 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1494 hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
1495 payload_length, conn->cid);
1497 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1498 pr_err("Command ITT: 0x%08x received DataOUT after"
1499 " last DataOUT received, dumping payload\n",
1500 cmd->init_task_tag);
1501 return iscsit_dump_data_payload(conn, payload_length, 1);
1504 if (cmd->data_direction != DMA_TO_DEVICE) {
1505 pr_err("Command ITT: 0x%08x received DataOUT for a"
1506 " NON-WRITE command.\n", cmd->init_task_tag);
1507 return iscsit_dump_data_payload(conn, payload_length, 1);
1509 se_cmd = &cmd->se_cmd;
1510 iscsit_mod_dataout_timer(cmd);
1512 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1513 pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
1514 be32_to_cpu(hdr->offset), payload_length,
1515 cmd->se_cmd.data_length);
1516 return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
1519 if (cmd->unsolicited_data) {
1520 int dump_unsolicited_data = 0;
1522 if (conn->sess->sess_ops->InitialR2T) {
1523 pr_err("Received unexpected unsolicited data"
1524 " while InitialR2T=Yes, protocol error.\n");
1525 transport_send_check_condition_and_sense(&cmd->se_cmd,
1526 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1530 * Special case for dealing with Unsolicited DataOUT
1531 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1535 /* Something's amiss if we're not in WRITE_PENDING state... */
1536 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1537 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
1538 dump_unsolicited_data = 1;
1540 if (dump_unsolicited_data) {
1542 * Check if a delayed TASK_ABORTED status needs to
1543 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1544 * received with the unsolicited data out.
1546 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1547 iscsit_stop_dataout_timer(cmd);
1549 return iscsit_dump_data_payload(conn, payload_length, 1);
1553 * For the normal solicited data path:
1555 * Check for a delayed TASK_ABORTED status and dump any
1556 * incoming data out payload if one exists. Also, when the
1557 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1558 * data out sequence, we decrement outstanding_r2ts. Once
1559 * outstanding_r2ts reaches zero, go ahead and send the delayed
1560 * TASK_ABORTED status.
1562 if (se_cmd->transport_state & CMD_T_ABORTED) {
1563 if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
1564 --cmd->outstanding_r2ts < 1)
1565 iscsit_stop_dataout_timer(cmd);
1567 return iscsit_dump_data_payload(conn, payload_length, 1);
1571 * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1572 * within-command recovery checks before receiving the payload.
1574 rc = iscsit_check_pre_dataout(cmd, buf);
1575 if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
1577 else if (rc == DATAOUT_CANNOT_RECOVER)
1582 EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
1585 iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1586 struct iscsit_cmd **out_cmd)
1588 struct iscsi_data *hdr = buf;
1589 struct iscsit_cmd *cmd;
1590 u32 payload_length = ntoh24(hdr->dlength);
1592 bool success = false;
1594 if (!payload_length) {
1595 pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
1599 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1600 pr_err_ratelimited("DataSegmentLength: %u is greater than"
1601 " MaxXmitDataSegmentLength: %u\n", payload_length,
1602 conn->conn_ops->MaxXmitDataSegmentLength);
1603 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
1606 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
1610 rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
1617 EXPORT_SYMBOL(iscsit_check_dataout_hdr);
1620 iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1621 struct iscsi_data *hdr)
1624 u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
1626 int iov_ret, data_crc_failed = 0;
1628 payload_length = min_t(u32, cmd->se_cmd.data_length,
1629 ntoh24(hdr->dlength));
1630 rx_size += payload_length;
1631 iov = &cmd->iov_data[0];
1633 iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
1634 be32_to_cpu(hdr->offset), payload_length);
1638 iov_count += iov_ret;
1640 padding = ((-payload_length) & 3);
1642 iov[iov_count].iov_base = cmd->pad_bytes;
1643 iov[iov_count++].iov_len = padding;
1645 pr_debug("Receiving %u padding bytes.\n", padding);
1648 if (conn->conn_ops->DataDigest) {
1649 iov[iov_count].iov_base = &checksum;
1650 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1651 rx_size += ISCSI_CRC_LEN;
1654 WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
1655 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1657 iscsit_unmap_iovec(cmd);
1659 if (rx_got != rx_size)
1662 if (conn->conn_ops->DataDigest) {
1665 data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
1666 be32_to_cpu(hdr->offset),
1667 payload_length, padding,
1670 if (checksum != data_crc) {
1671 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1672 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1673 " does not match computed 0x%08x\n",
1674 hdr->itt, hdr->offset, payload_length,
1675 hdr->datasn, checksum, data_crc);
1676 data_crc_failed = 1;
1678 pr_debug("Got CRC32C DataDigest 0x%08x for"
1679 " %u bytes of Data Out\n", checksum,
1684 return data_crc_failed;
1688 iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr,
1689 bool data_crc_failed)
1691 struct iscsit_conn *conn = cmd->conn;
1694 * Increment post receive data and CRC values or perform
1695 * within-command recovery.
1697 rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
1698 if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
1700 else if (rc == DATAOUT_SEND_R2T) {
1701 iscsit_set_dataout_sequence_values(cmd);
1702 conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1703 } else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
1705 * Handle extra special case for out of order
1706 * Unsolicited Data Out.
1708 spin_lock_bh(&cmd->istate_lock);
1709 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1710 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1711 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1712 spin_unlock_bh(&cmd->istate_lock);
1714 iscsit_stop_dataout_timer(cmd);
1717 target_execute_cmd(&cmd->se_cmd);
1719 } else /* DATAOUT_CANNOT_RECOVER */
1724 EXPORT_SYMBOL(iscsit_check_dataout_payload);
1726 static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf)
1728 struct iscsit_cmd *cmd = NULL;
1729 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1731 bool data_crc_failed = false;
1733 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1739 rc = iscsit_get_dataout(conn, cmd, hdr);
1743 data_crc_failed = true;
1745 return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
1748 int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1749 struct iscsi_nopout *hdr)
1751 u32 payload_length = ntoh24(hdr->dlength);
1753 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1754 pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
1756 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1757 (unsigned char *)hdr);
1759 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1760 (unsigned char *)hdr);
1763 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1764 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1765 " not set, protocol error.\n");
1767 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1768 (unsigned char *)hdr);
1770 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1771 (unsigned char *)hdr);
1774 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1775 pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1776 " greater than MaxXmitDataSegmentLength: %u, protocol"
1777 " error.\n", payload_length,
1778 conn->conn_ops->MaxXmitDataSegmentLength);
1780 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1781 (unsigned char *)hdr);
1783 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1784 (unsigned char *)hdr);
1787 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
1788 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1789 hdr->itt == RESERVED_ITT ? "Response" : "Request",
1790 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1793 * This is not a response to a Unsolicited NopIN, which means
1794 * it can either be a NOPOUT ping request (with a valid ITT),
1795 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1796 * Either way, make sure we allocate an struct iscsit_cmd, as both
1797 * can contain ping data.
1799 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1800 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
1801 cmd->i_state = ISTATE_SEND_NOPIN;
1802 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1804 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1805 cmd->targ_xfer_tag = 0xFFFFFFFF;
1806 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
1807 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
1808 cmd->data_direction = DMA_NONE;
1813 EXPORT_SYMBOL(iscsit_setup_nop_out);
1815 int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1816 struct iscsi_nopout *hdr)
1818 struct iscsit_cmd *cmd_p = NULL;
1821 * Initiator is expecting a NopIN ping reply..
1823 if (hdr->itt != RESERVED_ITT) {
1825 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1826 (unsigned char *)hdr);
1828 spin_lock_bh(&conn->cmd_lock);
1829 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1830 spin_unlock_bh(&conn->cmd_lock);
1832 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1834 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1835 iscsit_add_cmd_to_response_queue(cmd, conn,
1840 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1841 (unsigned char *)hdr, hdr->cmdsn);
1842 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1844 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1850 * This was a response to a unsolicited NOPIN ping.
1852 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1853 cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1857 iscsit_stop_nopin_response_timer(conn);
1859 cmd_p->i_state = ISTATE_REMOVE;
1860 iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
1862 iscsit_start_nopin_timer(conn);
1866 * Otherwise, initiator is not expecting a NOPIN is response.
1867 * Just ignore for now.
1871 iscsit_free_cmd(cmd, false);
1875 EXPORT_SYMBOL(iscsit_process_nop_out);
1877 static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1880 unsigned char *ping_data = NULL;
1881 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1882 struct kvec *iov = NULL;
1883 u32 payload_length = ntoh24(hdr->dlength);
1886 ret = iscsit_setup_nop_out(conn, cmd, hdr);
1890 * Handle NOP-OUT payload for traditional iSCSI sockets
1892 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1893 u32 checksum, data_crc, padding = 0;
1894 int niov = 0, rx_got, rx_size = payload_length;
1896 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1902 iov = &cmd->iov_misc[0];
1903 iov[niov].iov_base = ping_data;
1904 iov[niov++].iov_len = payload_length;
1906 padding = ((-payload_length) & 3);
1908 pr_debug("Receiving %u additional bytes"
1909 " for padding.\n", padding);
1910 iov[niov].iov_base = &cmd->pad_bytes;
1911 iov[niov++].iov_len = padding;
1914 if (conn->conn_ops->DataDigest) {
1915 iov[niov].iov_base = &checksum;
1916 iov[niov++].iov_len = ISCSI_CRC_LEN;
1917 rx_size += ISCSI_CRC_LEN;
1920 WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
1921 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1922 if (rx_got != rx_size) {
1927 if (conn->conn_ops->DataDigest) {
1928 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
1929 payload_length, padding,
1930 cmd->pad_bytes, &data_crc);
1932 if (checksum != data_crc) {
1933 pr_err("Ping data CRC32C DataDigest"
1934 " 0x%08x does not match computed 0x%08x\n",
1935 checksum, data_crc);
1936 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1937 pr_err("Unable to recover from"
1938 " NOPOUT Ping DataCRC failure while in"
1944 * Silently drop this PDU and let the
1945 * initiator plug the CmdSN gap.
1947 pr_debug("Dropping NOPOUT"
1948 " Command CmdSN: 0x%08x due to"
1949 " DataCRC error.\n", hdr->cmdsn);
1954 pr_debug("Got CRC32C DataDigest"
1955 " 0x%08x for %u bytes of ping data.\n",
1956 checksum, payload_length);
1960 ping_data[payload_length] = '\0';
1962 * Attach ping data to struct iscsit_cmd->buf_ptr.
1964 cmd->buf_ptr = ping_data;
1965 cmd->buf_ptr_size = payload_length;
1967 pr_debug("Got %u bytes of NOPOUT ping"
1968 " data.\n", payload_length);
1969 pr_debug("Ping Data: \"%s\"\n", ping_data);
1972 return iscsit_process_nop_out(conn, cmd, hdr);
1975 iscsit_free_cmd(cmd, false);
1981 static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
1983 switch (iscsi_tmf) {
1984 case ISCSI_TM_FUNC_ABORT_TASK:
1985 return TMR_ABORT_TASK;
1986 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1987 return TMR_ABORT_TASK_SET;
1988 case ISCSI_TM_FUNC_CLEAR_ACA:
1989 return TMR_CLEAR_ACA;
1990 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1991 return TMR_CLEAR_TASK_SET;
1992 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1993 return TMR_LUN_RESET;
1994 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1995 return TMR_TARGET_WARM_RESET;
1996 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1997 return TMR_TARGET_COLD_RESET;
2004 iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2007 struct se_tmr_req *se_tmr;
2008 struct iscsi_tmr_req *tmr_req;
2009 struct iscsi_tm *hdr;
2010 int out_of_order_cmdsn = 0, ret;
2011 u8 function, tcm_function = TMR_UNKNOWN;
2013 hdr = (struct iscsi_tm *) buf;
2014 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2015 function = hdr->flags;
2017 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
2018 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
2019 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
2020 hdr->rtt, hdr->refcmdsn, conn->cid);
2022 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2023 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2024 hdr->rtt != RESERVED_ITT)) {
2025 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
2026 hdr->rtt = RESERVED_ITT;
2029 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
2030 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2031 pr_err("Task Management Request TASK_REASSIGN not"
2032 " issued as immediate command, bad iSCSI Initiator"
2033 "implementation\n");
2034 return iscsit_add_reject_cmd(cmd,
2035 ISCSI_REASON_PROTOCOL_ERROR, buf);
2037 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2038 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
2039 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
2041 cmd->data_direction = DMA_NONE;
2042 cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
2043 if (!cmd->tmr_req) {
2044 return iscsit_add_reject_cmd(cmd,
2045 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2049 __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
2050 conn->sess->se_sess, 0, DMA_NONE,
2051 TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
2052 scsilun_to_int(&hdr->lun),
2055 target_get_sess_cmd(&cmd->se_cmd, true);
2058 * TASK_REASSIGN for ERL=2 / connection stays inside of
2059 * LIO-Target $FABRIC_MOD
2061 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2062 tcm_function = iscsit_convert_tmf(function);
2063 if (tcm_function == TMR_UNKNOWN) {
2064 pr_err("Unknown iSCSI TMR Function:"
2065 " 0x%02x\n", function);
2066 return iscsit_add_reject_cmd(cmd,
2067 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2070 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
2073 return iscsit_add_reject_cmd(cmd,
2074 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2076 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
2078 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
2079 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
2080 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2081 cmd->init_task_tag = hdr->itt;
2082 cmd->targ_xfer_tag = 0xFFFFFFFF;
2083 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2084 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2085 se_tmr = cmd->se_cmd.se_tmr_req;
2086 tmr_req = cmd->tmr_req;
2088 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
2090 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2091 ret = transport_lookup_tmr_lun(&cmd->se_cmd);
2093 se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
2099 case ISCSI_TM_FUNC_ABORT_TASK:
2100 se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
2101 if (se_tmr->response)
2104 case ISCSI_TM_FUNC_ABORT_TASK_SET:
2105 case ISCSI_TM_FUNC_CLEAR_ACA:
2106 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
2107 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
2109 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2110 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
2111 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2115 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2116 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
2117 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2121 case ISCSI_TM_FUNC_TASK_REASSIGN:
2122 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
2124 * Perform sanity checks on the ExpDataSN only if the
2125 * TASK_REASSIGN was successful.
2127 if (se_tmr->response)
2130 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
2131 return iscsit_add_reject_cmd(cmd,
2132 ISCSI_REASON_BOOKMARK_INVALID, buf);
2135 pr_err("Unknown TMR function: 0x%02x, protocol"
2136 " error.\n", function);
2137 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
2141 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2142 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
2143 se_tmr->call_transport = 1;
2145 spin_lock_bh(&conn->cmd_lock);
2146 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2147 spin_unlock_bh(&conn->cmd_lock);
2149 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2150 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2151 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2152 out_of_order_cmdsn = 1;
2153 } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2154 target_put_sess_cmd(&cmd->se_cmd);
2156 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2160 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2162 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2165 * Found the referenced task, send to transport for processing.
2167 if (se_tmr->call_transport)
2168 return transport_generic_handle_tmr(&cmd->se_cmd);
2171 * Could not find the referenced LUN, task, or Task Management
2172 * command not authorized or supported. Change state and
2173 * let the tx_thread send the response.
2175 * For connection recovery, this is also the default action for
2176 * TMR TASK_REASSIGN.
2178 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2179 target_put_sess_cmd(&cmd->se_cmd);
2182 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
2184 /* #warning FIXME: Support Text Command parameters besides SendTargets */
2186 iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2187 struct iscsi_text *hdr)
2189 u32 payload_length = ntoh24(hdr->dlength);
2191 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
2192 pr_err("Unable to accept text parameter length: %u"
2193 "greater than MaxXmitDataSegmentLength %u.\n",
2194 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
2195 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2196 (unsigned char *)hdr);
2199 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
2200 (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
2201 pr_err("Multi sequence text commands currently not supported\n");
2202 return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
2203 (unsigned char *)hdr);
2206 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
2207 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
2208 hdr->exp_statsn, payload_length);
2210 cmd->iscsi_opcode = ISCSI_OP_TEXT;
2211 cmd->i_state = ISTATE_SEND_TEXTRSP;
2212 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2213 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2214 cmd->targ_xfer_tag = 0xFFFFFFFF;
2215 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2216 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2217 cmd->data_direction = DMA_NONE;
2218 kfree(cmd->text_in_ptr);
2219 cmd->text_in_ptr = NULL;
2223 EXPORT_SYMBOL(iscsit_setup_text_cmd);
2226 iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2227 struct iscsi_text *hdr)
2229 unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
2233 cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
2234 if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
2235 pr_err("Unable to locate text_in buffer for sendtargets"
2239 goto empty_sendtargets;
2241 if (strncmp("SendTargets=", text_in, 12) != 0) {
2242 pr_err("Received Text Data that is not"
2243 " SendTargets, cannot continue.\n");
2246 /* '=' confirmed in strncmp */
2247 text_ptr = strchr(text_in, '=');
2249 if (!strncmp("=All", text_ptr, 5)) {
2250 cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
2251 } else if (!strncmp("=iqn.", text_ptr, 5) ||
2252 !strncmp("=eui.", text_ptr, 5)) {
2253 cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
2255 pr_err("Unable to locate valid SendTargets%s value\n",
2260 spin_lock_bh(&conn->cmd_lock);
2261 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2262 spin_unlock_bh(&conn->cmd_lock);
2265 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2267 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2268 cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
2269 (unsigned char *)hdr, hdr->cmdsn);
2270 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2276 return iscsit_execute_cmd(cmd, 0);
2279 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2280 (unsigned char *)hdr);
2282 EXPORT_SYMBOL(iscsit_process_text_cmd);
2285 iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2288 struct iscsi_text *hdr = (struct iscsi_text *)buf;
2289 char *text_in = NULL;
2290 u32 payload_length = ntoh24(hdr->dlength);
2293 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
2297 rx_size = payload_length;
2298 if (payload_length) {
2299 u32 checksum = 0, data_crc = 0;
2301 int niov = 0, rx_got;
2304 rx_size = ALIGN(payload_length, 4);
2305 text_in = kzalloc(rx_size, GFP_KERNEL);
2309 cmd->text_in_ptr = text_in;
2311 memset(iov, 0, sizeof(iov));
2312 iov[niov].iov_base = text_in;
2313 iov[niov++].iov_len = rx_size;
2315 padding = rx_size - payload_length;
2317 pr_debug("Receiving %u additional bytes"
2318 " for padding.\n", padding);
2319 if (conn->conn_ops->DataDigest) {
2320 iov[niov].iov_base = &checksum;
2321 iov[niov++].iov_len = ISCSI_CRC_LEN;
2322 rx_size += ISCSI_CRC_LEN;
2325 WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
2326 rx_got = rx_data(conn, &iov[0], niov, rx_size);
2327 if (rx_got != rx_size)
2330 if (conn->conn_ops->DataDigest) {
2331 iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
2332 text_in, rx_size, 0, NULL,
2335 if (checksum != data_crc) {
2336 pr_err("Text data CRC32C DataDigest"
2337 " 0x%08x does not match computed"
2338 " 0x%08x\n", checksum, data_crc);
2339 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2340 pr_err("Unable to recover from"
2341 " Text Data digest failure while in"
2346 * Silently drop this PDU and let the
2347 * initiator plug the CmdSN gap.
2349 pr_debug("Dropping Text"
2350 " Command CmdSN: 0x%08x due to"
2351 " DataCRC error.\n", hdr->cmdsn);
2356 pr_debug("Got CRC32C DataDigest"
2357 " 0x%08x for %u bytes of text data.\n",
2358 checksum, payload_length);
2361 text_in[payload_length - 1] = '\0';
2362 pr_debug("Successfully read %d bytes of text"
2363 " data.\n", payload_length);
2366 return iscsit_process_text_cmd(conn, cmd, hdr);
2369 kfree(cmd->text_in_ptr);
2370 cmd->text_in_ptr = NULL;
2371 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
2374 int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2376 struct iscsit_conn *conn_p;
2377 struct iscsit_session *sess = conn->sess;
2379 pr_debug("Received logout request CLOSESESSION on CID: %hu"
2380 " for SID: %u.\n", conn->cid, conn->sess->sid);
2382 atomic_set(&sess->session_logout, 1);
2383 atomic_set(&conn->conn_logout_remove, 1);
2384 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2386 iscsit_inc_conn_usage_count(conn);
2387 iscsit_inc_session_usage_count(sess);
2389 spin_lock_bh(&sess->conn_lock);
2390 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2391 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2394 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2395 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2397 spin_unlock_bh(&sess->conn_lock);
2399 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2404 int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2406 struct iscsit_conn *l_conn;
2407 struct iscsit_session *sess = conn->sess;
2409 pr_debug("Received logout request CLOSECONNECTION for CID:"
2410 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2413 * A Logout Request with a CLOSECONNECTION reason code for a CID
2414 * can arrive on a connection with a differing CID.
2416 if (conn->cid == cmd->logout_cid) {
2417 spin_lock_bh(&conn->state_lock);
2418 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2419 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2421 atomic_set(&conn->conn_logout_remove, 1);
2422 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2423 iscsit_inc_conn_usage_count(conn);
2425 spin_unlock_bh(&conn->state_lock);
2428 * Handle all different cid CLOSECONNECTION requests in
2429 * iscsit_logout_post_handler_diffcid() as to give enough
2430 * time for any non immediate command's CmdSN to be
2431 * acknowledged on the connection in question.
2433 * Here we simply make sure the CID is still around.
2435 l_conn = iscsit_get_conn_from_cid(sess,
2438 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2439 iscsit_add_cmd_to_response_queue(cmd, conn,
2444 iscsit_dec_conn_usage_count(l_conn);
2447 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2452 int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2454 struct iscsit_session *sess = conn->sess;
2456 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2457 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2459 if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2460 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2461 " while ERL!=2.\n");
2462 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2463 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2467 if (conn->cid == cmd->logout_cid) {
2468 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2469 " with CID: %hu on CID: %hu, implementation error.\n",
2470 cmd->logout_cid, conn->cid);
2471 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2472 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2476 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2482 iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2485 int cmdsn_ret, logout_remove = 0;
2487 struct iscsi_logout *hdr;
2488 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2490 hdr = (struct iscsi_logout *) buf;
2491 reason_code = (hdr->flags & 0x7f);
2494 spin_lock(&tiqn->logout_stats.lock);
2495 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2496 tiqn->logout_stats.normal_logouts++;
2498 tiqn->logout_stats.abnormal_logouts++;
2499 spin_unlock(&tiqn->logout_stats.lock);
2502 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2503 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2504 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2505 hdr->cid, conn->cid);
2507 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2508 pr_err("Received logout request on connection that"
2509 " is not in logged in state, ignoring request.\n");
2510 iscsit_free_cmd(cmd, false);
2514 cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
2515 cmd->i_state = ISTATE_SEND_LOGOUTRSP;
2516 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2517 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2518 cmd->targ_xfer_tag = 0xFFFFFFFF;
2519 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2520 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2521 cmd->logout_cid = be16_to_cpu(hdr->cid);
2522 cmd->logout_reason = reason_code;
2523 cmd->data_direction = DMA_NONE;
2526 * We need to sleep in these cases (by returning 1) until the Logout
2527 * Response gets sent in the tx thread.
2529 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2530 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2531 be16_to_cpu(hdr->cid) == conn->cid))
2534 spin_lock_bh(&conn->cmd_lock);
2535 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2536 spin_unlock_bh(&conn->cmd_lock);
2538 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2539 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2542 * Immediate commands are executed, well, immediately.
2543 * Non-Immediate Logout Commands are executed in CmdSN order.
2545 if (cmd->immediate_cmd) {
2546 int ret = iscsit_execute_cmd(cmd, 0);
2551 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2552 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2554 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2558 return logout_remove;
2560 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2562 int iscsit_handle_snack(
2563 struct iscsit_conn *conn,
2566 struct iscsi_snack *hdr;
2568 hdr = (struct iscsi_snack *) buf;
2569 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2571 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2572 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2573 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2574 hdr->begrun, hdr->runlength, conn->cid);
2576 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2577 pr_err("Initiator sent SNACK request while in"
2578 " ErrorRecoveryLevel=0.\n");
2579 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2583 * SNACK_DATA and SNACK_R2T are both 0, so check which function to
2584 * call from inside iscsi_send_recovery_datain_or_r2t().
2586 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2588 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2590 be32_to_cpu(hdr->ttt),
2591 be32_to_cpu(hdr->begrun),
2592 be32_to_cpu(hdr->runlength));
2593 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2594 return iscsit_handle_status_snack(conn, hdr->itt,
2595 be32_to_cpu(hdr->ttt),
2596 be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2597 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2598 return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2599 be32_to_cpu(hdr->begrun),
2600 be32_to_cpu(hdr->runlength));
2601 case ISCSI_FLAG_SNACK_TYPE_RDATA:
2602 /* FIXME: Support R-Data SNACK */
2603 pr_err("R-Data SNACK Not Supported.\n");
2604 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2607 pr_err("Unknown SNACK type 0x%02x, protocol"
2608 " error.\n", hdr->flags & 0x0f);
2609 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2615 EXPORT_SYMBOL(iscsit_handle_snack);
2617 static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn)
2619 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2620 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2621 wait_for_completion_interruptible_timeout(
2622 &conn->rx_half_close_comp,
2623 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2627 static int iscsit_handle_immediate_data(
2628 struct iscsit_cmd *cmd,
2629 struct iscsi_scsi_req *hdr,
2632 int iov_ret, rx_got = 0, rx_size = 0;
2633 u32 checksum, iov_count = 0, padding = 0;
2634 struct iscsit_conn *conn = cmd->conn;
2636 void *overflow_buf = NULL;
2638 BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
2639 rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
2640 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
2641 cmd->orig_iov_data_count - 2,
2642 cmd->write_data_done, rx_size);
2644 return IMMEDIATE_DATA_CANNOT_RECOVER;
2646 iov_count = iov_ret;
2647 iov = &cmd->iov_data[0];
2648 if (rx_size < length) {
2650 * Special case: length of immediate data exceeds the data
2651 * buffer size derived from the CDB.
2653 overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
2654 if (!overflow_buf) {
2655 iscsit_unmap_iovec(cmd);
2656 return IMMEDIATE_DATA_CANNOT_RECOVER;
2658 cmd->overflow_buf = overflow_buf;
2659 iov[iov_count].iov_base = overflow_buf;
2660 iov[iov_count].iov_len = length - rx_size;
2665 padding = ((-length) & 3);
2667 iov[iov_count].iov_base = cmd->pad_bytes;
2668 iov[iov_count++].iov_len = padding;
2672 if (conn->conn_ops->DataDigest) {
2673 iov[iov_count].iov_base = &checksum;
2674 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2675 rx_size += ISCSI_CRC_LEN;
2678 WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
2679 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2681 iscsit_unmap_iovec(cmd);
2683 if (rx_got != rx_size) {
2684 iscsit_rx_thread_wait_for_tcp(conn);
2685 return IMMEDIATE_DATA_CANNOT_RECOVER;
2688 if (conn->conn_ops->DataDigest) {
2691 data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
2692 cmd->write_data_done, length, padding,
2695 if (checksum != data_crc) {
2696 pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2697 " does not match computed 0x%08x\n", checksum,
2700 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2701 pr_err("Unable to recover from"
2702 " Immediate Data digest failure while"
2704 iscsit_reject_cmd(cmd,
2705 ISCSI_REASON_DATA_DIGEST_ERROR,
2706 (unsigned char *)hdr);
2707 return IMMEDIATE_DATA_CANNOT_RECOVER;
2709 iscsit_reject_cmd(cmd,
2710 ISCSI_REASON_DATA_DIGEST_ERROR,
2711 (unsigned char *)hdr);
2712 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2715 pr_debug("Got CRC32C DataDigest 0x%08x for"
2716 " %u bytes of Immediate Data\n", checksum,
2721 cmd->write_data_done += length;
2723 if (cmd->write_data_done == cmd->se_cmd.data_length) {
2724 spin_lock_bh(&cmd->istate_lock);
2725 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2726 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2727 spin_unlock_bh(&cmd->istate_lock);
2730 return IMMEDIATE_DATA_NORMAL_OPERATION;
2733 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2734 with active network interface */
2735 static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn)
2737 struct iscsit_cmd *cmd;
2738 struct iscsit_conn *conn_p;
2741 lockdep_assert_held(&conn->sess->conn_lock);
2744 * Only send a Asynchronous Message on connections whos network
2745 * interface is still functional.
2747 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2748 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2749 iscsit_inc_conn_usage_count(conn_p);
2758 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
2760 iscsit_dec_conn_usage_count(conn_p);
2764 cmd->logout_cid = conn->cid;
2765 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2766 cmd->i_state = ISTATE_SEND_ASYNCMSG;
2768 spin_lock_bh(&conn_p->cmd_lock);
2769 list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2770 spin_unlock_bh(&conn_p->cmd_lock);
2772 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2773 iscsit_dec_conn_usage_count(conn_p);
2776 static int iscsit_send_conn_drop_async_message(
2777 struct iscsit_cmd *cmd,
2778 struct iscsit_conn *conn)
2780 struct iscsi_async *hdr;
2782 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2784 hdr = (struct iscsi_async *) cmd->pdu;
2785 hdr->opcode = ISCSI_OP_ASYNC_EVENT;
2786 hdr->flags = ISCSI_FLAG_CMD_FINAL;
2787 cmd->init_task_tag = RESERVED_ITT;
2788 cmd->targ_xfer_tag = 0xFFFFFFFF;
2789 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2790 cmd->stat_sn = conn->stat_sn++;
2791 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2792 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2793 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2794 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2795 hdr->param1 = cpu_to_be16(cmd->logout_cid);
2796 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2797 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2799 pr_debug("Sending Connection Dropped Async Message StatSN:"
2800 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2801 cmd->logout_cid, conn->cid);
2803 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2806 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn)
2808 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2809 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2810 wait_for_completion_interruptible_timeout(
2811 &conn->tx_half_close_comp,
2812 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
2817 iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2818 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2821 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2822 hdr->flags = datain->flags;
2823 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2824 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2825 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2826 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2827 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2828 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2829 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2832 hton24(hdr->dlength, datain->length);
2833 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2834 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2835 (struct scsi_lun *)&hdr->lun);
2837 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2839 hdr->itt = cmd->init_task_tag;
2841 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2842 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2844 hdr->ttt = cpu_to_be32(0xFFFFFFFF);
2846 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2848 hdr->statsn = cpu_to_be32(0xFFFFFFFF);
2850 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2851 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2852 hdr->datasn = cpu_to_be32(datain->data_sn);
2853 hdr->offset = cpu_to_be32(datain->offset);
2855 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2856 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2857 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2858 ntohl(hdr->offset), datain->length, conn->cid);
2860 EXPORT_SYMBOL(iscsit_build_datain_pdu);
2862 static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2864 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2865 struct iscsi_datain datain;
2866 struct iscsi_datain_req *dr;
2868 bool set_statsn = false;
2870 memset(&datain, 0, sizeof(struct iscsi_datain));
2871 dr = iscsit_get_datain_values(cmd, &datain);
2873 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2874 cmd->init_task_tag);
2878 * Be paranoid and double check the logic for now.
2880 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2881 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2882 " datain.length: %u exceeds cmd->data_length: %u\n",
2883 cmd->init_task_tag, datain.offset, datain.length,
2884 cmd->se_cmd.data_length);
2888 atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2890 * Special case for successfully execution w/ both DATAIN
2893 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2894 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2895 datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2897 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2898 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2899 iscsit_increment_maxcmdsn(cmd, conn->sess);
2900 cmd->stat_sn = conn->stat_sn++;
2902 } else if (dr->dr_complete ==
2903 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2907 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2909 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
2913 if (dr->dr_complete) {
2914 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2916 iscsit_free_datain_req(cmd, dr);
2923 iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2924 struct iscsi_logout_rsp *hdr)
2926 struct iscsit_conn *logout_conn = NULL;
2927 struct iscsi_conn_recovery *cr = NULL;
2928 struct iscsit_session *sess = conn->sess;
2930 * The actual shutting down of Sessions and/or Connections
2931 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2932 * is done in scsi_logout_post_handler().
2934 switch (cmd->logout_reason) {
2935 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2936 pr_debug("iSCSI session logout successful, setting"
2937 " logout response to ISCSI_LOGOUT_SUCCESS.\n");
2938 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2940 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2941 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2944 * For CLOSECONNECTION logout requests carrying
2945 * a matching logout CID -> local CID, the reference
2946 * for the local CID will have been incremented in
2947 * iscsi_logout_closeconnection().
2949 * For CLOSECONNECTION logout requests carrying
2950 * a different CID than the connection it arrived
2951 * on, the connection responding to cmd->logout_cid
2952 * is stopped in iscsit_logout_post_handler_diffcid().
2955 pr_debug("iSCSI CID: %hu logout on CID: %hu"
2956 " successful.\n", cmd->logout_cid, conn->cid);
2957 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2959 case ISCSI_LOGOUT_REASON_RECOVERY:
2960 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2961 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2964 * If the connection is still active from our point of view
2965 * force connection recovery to occur.
2967 logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2970 iscsit_connection_reinstatement_rcfr(logout_conn);
2971 iscsit_dec_conn_usage_count(logout_conn);
2974 cr = iscsit_get_inactive_connection_recovery_entry(
2975 conn->sess, cmd->logout_cid);
2977 pr_err("Unable to locate CID: %hu for"
2978 " REMOVECONNFORRECOVERY Logout Request.\n",
2980 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2984 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2986 pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2987 " for recovery for CID: %hu on CID: %hu successful.\n",
2988 cmd->logout_cid, conn->cid);
2989 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2992 pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2993 cmd->logout_reason);
2997 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2998 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2999 hdr->response = cmd->logout_response;
3000 hdr->itt = cmd->init_task_tag;
3001 cmd->stat_sn = conn->stat_sn++;
3002 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3004 iscsit_increment_maxcmdsn(cmd, conn->sess);
3005 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3006 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3008 pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
3009 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
3010 cmd->init_task_tag, cmd->stat_sn, hdr->response,
3011 cmd->logout_cid, conn->cid);
3015 EXPORT_SYMBOL(iscsit_build_logout_rsp);
3018 iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3022 rc = iscsit_build_logout_rsp(cmd, conn,
3023 (struct iscsi_logout_rsp *)&cmd->pdu[0]);
3027 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3031 iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3032 struct iscsi_nopin *hdr, bool nopout_response)
3034 hdr->opcode = ISCSI_OP_NOOP_IN;
3035 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3036 hton24(hdr->dlength, cmd->buf_ptr_size);
3037 if (nopout_response)
3038 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
3039 hdr->itt = cmd->init_task_tag;
3040 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3041 cmd->stat_sn = (nopout_response) ? conn->stat_sn++ :
3043 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3045 if (nopout_response)
3046 iscsit_increment_maxcmdsn(cmd, conn->sess);
3048 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3049 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3051 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
3052 " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
3053 "Solicited" : "Unsolicited", cmd->init_task_tag,
3054 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
3056 EXPORT_SYMBOL(iscsit_build_nopin_rsp);
3059 * Unsolicited NOPIN, either requesting a response or not.
3061 static int iscsit_send_unsolicited_nopin(
3062 struct iscsit_cmd *cmd,
3063 struct iscsit_conn *conn,
3066 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3069 iscsit_build_nopin_rsp(cmd, conn, hdr, false);
3071 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
3072 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
3074 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3078 spin_lock_bh(&cmd->istate_lock);
3079 cmd->i_state = want_response ?
3080 ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
3081 spin_unlock_bh(&cmd->istate_lock);
3087 iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3089 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3091 iscsit_build_nopin_rsp(cmd, conn, hdr, true);
3094 * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr.
3095 * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size.
3097 pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
3099 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3104 static int iscsit_send_r2t(
3105 struct iscsit_cmd *cmd,
3106 struct iscsit_conn *conn)
3108 struct iscsi_r2t *r2t;
3109 struct iscsi_r2t_rsp *hdr;
3112 r2t = iscsit_get_r2t_from_list(cmd);
3116 hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
3117 memset(hdr, 0, ISCSI_HDR_LEN);
3118 hdr->opcode = ISCSI_OP_R2T;
3119 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3120 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3121 (struct scsi_lun *)&hdr->lun);
3122 hdr->itt = cmd->init_task_tag;
3123 if (conn->conn_transport->iscsit_get_r2t_ttt)
3124 conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
3126 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3127 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
3128 hdr->statsn = cpu_to_be32(conn->stat_sn);
3129 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3130 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3131 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
3132 hdr->data_offset = cpu_to_be32(r2t->offset);
3133 hdr->data_length = cpu_to_be32(r2t->xfer_len);
3135 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
3136 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
3137 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
3138 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
3139 r2t->offset, r2t->xfer_len, conn->cid);
3141 spin_lock_bh(&cmd->r2t_lock);
3143 spin_unlock_bh(&cmd->r2t_lock);
3145 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3150 spin_lock_bh(&cmd->dataout_timeout_lock);
3151 iscsit_start_dataout_timer(cmd, conn);
3152 spin_unlock_bh(&cmd->dataout_timeout_lock);
3158 * @recovery: If called from iscsi_task_reassign_complete_write() for
3159 * connection recovery.
3161 int iscsit_build_r2ts_for_cmd(
3162 struct iscsit_conn *conn,
3163 struct iscsit_cmd *cmd,
3167 u32 offset = 0, xfer_len = 0;
3169 spin_lock_bh(&cmd->r2t_lock);
3170 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
3171 spin_unlock_bh(&cmd->r2t_lock);
3175 if (conn->sess->sess_ops->DataSequenceInOrder &&
3177 cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
3179 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
3180 if (conn->sess->sess_ops->DataSequenceInOrder) {
3181 offset = cmd->r2t_offset;
3183 if (first_r2t && recovery) {
3184 int new_data_end = offset +
3185 conn->sess->sess_ops->MaxBurstLength -
3186 cmd->next_burst_len;
3188 if (new_data_end > cmd->se_cmd.data_length)
3189 xfer_len = cmd->se_cmd.data_length - offset;
3192 conn->sess->sess_ops->MaxBurstLength -
3193 cmd->next_burst_len;
3195 int new_data_end = offset +
3196 conn->sess->sess_ops->MaxBurstLength;
3198 if (new_data_end > cmd->se_cmd.data_length)
3199 xfer_len = cmd->se_cmd.data_length - offset;
3201 xfer_len = conn->sess->sess_ops->MaxBurstLength;
3204 if ((s32)xfer_len < 0) {
3205 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3209 cmd->r2t_offset += xfer_len;
3211 if (cmd->r2t_offset == cmd->se_cmd.data_length)
3212 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3214 struct iscsi_seq *seq;
3216 seq = iscsit_get_seq_holder_for_r2t(cmd);
3218 spin_unlock_bh(&cmd->r2t_lock);
3222 offset = seq->offset;
3223 xfer_len = seq->xfer_len;
3225 if (cmd->seq_send_order == cmd->seq_count)
3226 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3228 cmd->outstanding_r2ts++;
3231 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
3232 spin_unlock_bh(&cmd->r2t_lock);
3236 if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
3239 spin_unlock_bh(&cmd->r2t_lock);
3243 EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
3245 void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3246 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
3249 cmd->stat_sn = conn->stat_sn++;
3251 atomic_long_inc(&conn->sess->rsp_pdus);
3253 memset(hdr, 0, ISCSI_HDR_LEN);
3254 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
3255 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3256 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3257 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3258 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3259 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3260 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3261 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3263 hdr->response = cmd->iscsi_response;
3264 hdr->cmd_status = cmd->se_cmd.scsi_status;
3265 hdr->itt = cmd->init_task_tag;
3266 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3268 iscsit_increment_maxcmdsn(cmd, conn->sess);
3269 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3270 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3272 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3273 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3274 cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3275 cmd->se_cmd.scsi_status, conn->cid);
3277 EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3279 static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3281 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3282 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3283 void *data_buf = NULL;
3284 u32 padding = 0, data_buf_len = 0;
3286 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3289 * Attach SENSE DATA payload to iSCSI Response PDU
3291 if (cmd->se_cmd.sense_buffer &&
3292 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3293 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3294 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3295 cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3297 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3298 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3299 data_buf = cmd->sense_buffer;
3300 data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
3303 memset(cmd->sense_buffer +
3304 cmd->se_cmd.scsi_sense_length, 0, padding);
3305 pr_debug("Adding %u bytes of padding to"
3306 " SENSE.\n", padding);
3309 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3311 cmd->se_cmd.scsi_sense_length);
3314 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
3318 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3320 switch (se_tmr->response) {
3321 case TMR_FUNCTION_COMPLETE:
3322 return ISCSI_TMF_RSP_COMPLETE;
3323 case TMR_TASK_DOES_NOT_EXIST:
3324 return ISCSI_TMF_RSP_NO_TASK;
3325 case TMR_LUN_DOES_NOT_EXIST:
3326 return ISCSI_TMF_RSP_NO_LUN;
3327 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3328 return ISCSI_TMF_RSP_NOT_SUPPORTED;
3329 case TMR_FUNCTION_REJECTED:
3331 return ISCSI_TMF_RSP_REJECTED;
3336 iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3337 struct iscsi_tm_rsp *hdr)
3339 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3341 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3342 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3343 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3344 hdr->itt = cmd->init_task_tag;
3345 cmd->stat_sn = conn->stat_sn++;
3346 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3348 iscsit_increment_maxcmdsn(cmd, conn->sess);
3349 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3350 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3352 pr_debug("Built Task Management Response ITT: 0x%08x,"
3353 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3354 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3356 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
3359 iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3361 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3363 iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3365 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3368 #define SENDTARGETS_BUF_LIMIT 32768U
3371 iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
3372 enum iscsit_transport_type network_transport,
3373 int skip_bytes, bool *completed)
3375 char *payload = NULL;
3376 struct iscsit_conn *conn = cmd->conn;
3377 struct iscsi_portal_group *tpg;
3378 struct iscsi_tiqn *tiqn;
3379 struct iscsi_tpg_np *tpg_np;
3380 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3381 int target_name_printed;
3382 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3383 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3386 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3387 SENDTARGETS_BUF_LIMIT);
3389 payload = kzalloc(buffer_len, GFP_KERNEL);
3394 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
3397 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
3398 text_ptr = strchr(text_in, '=');
3400 pr_err("Unable to locate '=' string in text_in:"
3406 * Skip over '=' character..
3411 spin_lock(&tiqn_lock);
3412 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3413 if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
3414 strcmp(tiqn->tiqn, text_ptr)) {
3418 target_name_printed = 0;
3420 spin_lock(&tiqn->tiqn_tpg_lock);
3421 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3423 /* If demo_mode_discovery=0 and generate_node_acls=0
3424 * (demo mode dislabed) do not return
3425 * TargetName+TargetAddress unless a NodeACL exists.
3428 if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3429 (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3430 (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
3431 cmd->conn->sess->sess_ops->InitiatorName))) {
3435 spin_lock(&tpg->tpg_state_lock);
3436 active = (tpg->tpg_state == TPG_STATE_ACTIVE);
3437 spin_unlock(&tpg->tpg_state_lock);
3439 if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
3442 spin_lock(&tpg->tpg_np_lock);
3443 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3445 struct iscsi_np *np = tpg_np->tpg_np;
3446 struct sockaddr_storage *sockaddr;
3448 if (np->np_network_transport != network_transport)
3451 if (!target_name_printed) {
3452 len = sprintf(buf, "TargetName=%s",
3456 if ((len + payload_len) > buffer_len) {
3457 spin_unlock(&tpg->tpg_np_lock);
3458 spin_unlock(&tiqn->tiqn_tpg_lock);
3463 if (skip_bytes && len <= skip_bytes) {
3466 memcpy(payload + payload_len, buf, len);
3468 target_name_printed = 1;
3469 if (len > skip_bytes)
3474 if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr))
3475 sockaddr = &conn->local_sockaddr;
3477 sockaddr = &np->np_sockaddr;
3479 len = sprintf(buf, "TargetAddress="
3485 if ((len + payload_len) > buffer_len) {
3486 spin_unlock(&tpg->tpg_np_lock);
3487 spin_unlock(&tiqn->tiqn_tpg_lock);
3492 if (skip_bytes && len <= skip_bytes) {
3495 memcpy(payload + payload_len, buf, len);
3497 if (len > skip_bytes)
3501 spin_unlock(&tpg->tpg_np_lock);
3503 spin_unlock(&tiqn->tiqn_tpg_lock);
3510 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3513 spin_unlock(&tiqn_lock);
3515 cmd->buf_ptr = payload;
3521 iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3522 struct iscsi_text_rsp *hdr,
3523 enum iscsit_transport_type network_transport)
3525 int text_length, padding;
3526 bool completed = true;
3528 text_length = iscsit_build_sendtargets_response(cmd, network_transport,
3529 cmd->read_data_done,
3531 if (text_length < 0)
3535 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3537 hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
3538 cmd->read_data_done += text_length;
3539 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3540 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
3542 hdr->opcode = ISCSI_OP_TEXT_RSP;
3543 padding = ((-text_length) & 3);
3544 hton24(hdr->dlength, text_length);
3545 hdr->itt = cmd->init_task_tag;
3546 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3547 cmd->stat_sn = conn->stat_sn++;
3548 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3550 iscsit_increment_maxcmdsn(cmd, conn->sess);
3552 * Reset maxcmdsn_inc in multi-part text payload exchanges to
3553 * correctly increment MaxCmdSN for each response answering a
3554 * non immediate text request with a valid CmdSN.
3556 cmd->maxcmdsn_inc = 0;
3557 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3558 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3560 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3561 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
3562 cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
3563 !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
3564 !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
3566 return text_length + padding;
3568 EXPORT_SYMBOL(iscsit_build_text_rsp);
3570 static int iscsit_send_text_rsp(
3571 struct iscsit_cmd *cmd,
3572 struct iscsit_conn *conn)
3574 struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
3577 text_length = iscsit_build_text_rsp(cmd, conn, hdr,
3578 conn->conn_transport->transport_type);
3579 if (text_length < 0)
3582 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3588 iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3589 struct iscsi_reject *hdr)
3591 hdr->opcode = ISCSI_OP_REJECT;
3592 hdr->reason = cmd->reject_reason;
3593 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3594 hton24(hdr->dlength, ISCSI_HDR_LEN);
3595 hdr->ffffffff = cpu_to_be32(0xffffffff);
3596 cmd->stat_sn = conn->stat_sn++;
3597 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3598 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3599 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3602 EXPORT_SYMBOL(iscsit_build_reject);
3604 static int iscsit_send_reject(
3605 struct iscsit_cmd *cmd,
3606 struct iscsit_conn *conn)
3608 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3610 iscsit_build_reject(cmd, conn, hdr);
3612 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3613 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3615 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3620 void iscsit_thread_get_cpumask(struct iscsit_conn *conn)
3623 cpumask_var_t conn_allowed_cpumask;
3626 * bitmap_id is assigned from iscsit_global->ts_bitmap from
3627 * within iscsit_start_kthreads()
3629 * Here we use bitmap_id to determine which CPU that this
3630 * iSCSI connection's RX/TX threads will be scheduled to
3633 if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
3634 ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
3635 for_each_online_cpu(cpu) {
3637 cpumask_set_cpu(cpu, conn->conn_cpumask);
3642 cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
3645 cpumask_clear(conn->conn_cpumask);
3646 ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
3647 for_each_cpu(cpu, conn_allowed_cpumask) {
3649 cpumask_set_cpu(cpu, conn->conn_cpumask);
3650 free_cpumask_var(conn_allowed_cpumask);
3654 free_cpumask_var(conn_allowed_cpumask);
3657 * This should never be reached..
3660 cpumask_setall(conn->conn_cpumask);
3663 static void iscsit_thread_reschedule(struct iscsit_conn *conn)
3666 * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
3667 * connection's RX/TX threads update conn->allowed_cpumask.
3669 if (!cpumask_equal(iscsit_global->allowed_cpumask,
3670 conn->allowed_cpumask)) {
3671 iscsit_thread_get_cpumask(conn);
3672 conn->conn_tx_reset_cpumask = 1;
3673 conn->conn_rx_reset_cpumask = 1;
3674 cpumask_copy(conn->allowed_cpumask,
3675 iscsit_global->allowed_cpumask);
3679 void iscsit_thread_check_cpumask(
3680 struct iscsit_conn *conn,
3681 struct task_struct *p,
3685 * The TX and RX threads maybe call iscsit_thread_check_cpumask()
3686 * at the same time. The RX thread might be faster and return from
3687 * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
3688 * Then the TX thread sets it back to 1.
3689 * The next time the RX thread loops, it sees conn_rx_reset_cpumask
3690 * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
3692 iscsit_thread_reschedule(conn);
3695 * mode == 1 signals iscsi_target_tx_thread() usage.
3696 * mode == 0 signals iscsi_target_rx_thread() usage.
3699 if (!conn->conn_tx_reset_cpumask)
3702 if (!conn->conn_rx_reset_cpumask)
3707 * Update the CPU mask for this single kthread so that
3708 * both TX and RX kthreads are scheduled to run on the
3711 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3713 conn->conn_tx_reset_cpumask = 0;
3715 conn->conn_rx_reset_cpumask = 0;
3717 EXPORT_SYMBOL(iscsit_thread_check_cpumask);
3720 iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3725 case ISTATE_SEND_R2T:
3726 ret = iscsit_send_r2t(cmd, conn);
3731 spin_lock_bh(&conn->cmd_lock);
3732 list_del_init(&cmd->i_conn_node);
3733 spin_unlock_bh(&conn->cmd_lock);
3735 iscsit_free_cmd(cmd, false);
3737 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3738 iscsit_mod_nopin_response_timer(conn);
3739 ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
3743 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3744 ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
3749 pr_err("Unknown Opcode: 0x%02x ITT:"
3750 " 0x%08x, i_state: %d on CID: %hu\n",
3751 cmd->iscsi_opcode, cmd->init_task_tag, state,
3761 EXPORT_SYMBOL(iscsit_immediate_queue);
3764 iscsit_handle_immediate_queue(struct iscsit_conn *conn)
3766 struct iscsit_transport *t = conn->conn_transport;
3767 struct iscsi_queue_req *qr;
3768 struct iscsit_cmd *cmd;
3772 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3773 atomic_set(&conn->check_immediate_queue, 0);
3776 kmem_cache_free(lio_qr_cache, qr);
3778 ret = t->iscsit_immediate_queue(conn, cmd, state);
3787 iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3793 case ISTATE_SEND_DATAIN:
3794 ret = iscsit_send_datain(cmd, conn);
3799 goto check_rsp_state;
3800 else if (ret == 1) {
3802 spin_lock_bh(&cmd->istate_lock);
3803 cmd->i_state = ISTATE_SENT_STATUS;
3804 spin_unlock_bh(&cmd->istate_lock);
3806 if (atomic_read(&conn->check_immediate_queue))
3810 } else if (ret == 2) {
3811 /* Still must send status,
3812 SCF_TRANSPORT_TASK_SENSE was set */
3813 spin_lock_bh(&cmd->istate_lock);
3814 cmd->i_state = ISTATE_SEND_STATUS;
3815 spin_unlock_bh(&cmd->istate_lock);
3816 state = ISTATE_SEND_STATUS;
3817 goto check_rsp_state;
3821 case ISTATE_SEND_STATUS:
3822 case ISTATE_SEND_STATUS_RECOVERY:
3823 ret = iscsit_send_response(cmd, conn);
3825 case ISTATE_SEND_LOGOUTRSP:
3826 ret = iscsit_send_logout(cmd, conn);
3828 case ISTATE_SEND_ASYNCMSG:
3829 ret = iscsit_send_conn_drop_async_message(
3832 case ISTATE_SEND_NOPIN:
3833 ret = iscsit_send_nopin(cmd, conn);
3835 case ISTATE_SEND_REJECT:
3836 ret = iscsit_send_reject(cmd, conn);
3838 case ISTATE_SEND_TASKMGTRSP:
3839 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3842 ret = iscsit_tmr_post_handler(cmd, conn);
3844 iscsit_fall_back_to_erl0(conn->sess);
3846 case ISTATE_SEND_TEXTRSP:
3847 ret = iscsit_send_text_rsp(cmd, conn);
3850 pr_err("Unknown Opcode: 0x%02x ITT:"
3851 " 0x%08x, i_state: %d on CID: %hu\n",
3852 cmd->iscsi_opcode, cmd->init_task_tag,
3860 case ISTATE_SEND_LOGOUTRSP:
3861 if (!iscsit_logout_post_handler(cmd, conn))
3864 case ISTATE_SEND_STATUS:
3865 case ISTATE_SEND_ASYNCMSG:
3866 case ISTATE_SEND_NOPIN:
3867 case ISTATE_SEND_STATUS_RECOVERY:
3868 case ISTATE_SEND_TEXTRSP:
3869 case ISTATE_SEND_TASKMGTRSP:
3870 case ISTATE_SEND_REJECT:
3871 spin_lock_bh(&cmd->istate_lock);
3872 cmd->i_state = ISTATE_SENT_STATUS;
3873 spin_unlock_bh(&cmd->istate_lock);
3876 pr_err("Unknown Opcode: 0x%02x ITT:"
3877 " 0x%08x, i_state: %d on CID: %hu\n",
3878 cmd->iscsi_opcode, cmd->init_task_tag,
3879 cmd->i_state, conn->cid);
3883 if (atomic_read(&conn->check_immediate_queue))
3891 EXPORT_SYMBOL(iscsit_response_queue);
3893 static int iscsit_handle_response_queue(struct iscsit_conn *conn)
3895 struct iscsit_transport *t = conn->conn_transport;
3896 struct iscsi_queue_req *qr;
3897 struct iscsit_cmd *cmd;
3901 while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3904 kmem_cache_free(lio_qr_cache, qr);
3906 ret = t->iscsit_response_queue(conn, cmd, state);
3907 if (ret == 1 || ret < 0)
3914 int iscsi_target_tx_thread(void *arg)
3917 struct iscsit_conn *conn = arg;
3918 bool conn_freed = false;
3921 * Allow ourselves to be interrupted by SIGINT so that a
3922 * connection recovery / failure event can be triggered externally.
3924 allow_signal(SIGINT);
3926 while (!kthread_should_stop()) {
3928 * Ensure that both TX and RX per connection kthreads
3929 * are scheduled to run on the same CPU.
3931 iscsit_thread_check_cpumask(conn, current, 1);
3933 wait_event_interruptible(conn->queues_wq,
3934 !iscsit_conn_all_queues_empty(conn));
3936 if (signal_pending(current))
3940 ret = iscsit_handle_immediate_queue(conn);
3944 ret = iscsit_handle_response_queue(conn);
3947 } else if (ret == -ECONNRESET) {
3950 } else if (ret < 0) {
3957 * Avoid the normal connection failure code-path if this connection
3958 * is still within LOGIN mode, and iscsi_np process context is
3959 * responsible for cleaning up the early connection failure.
3961 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
3962 iscsit_take_action_for_connection_exit(conn, &conn_freed);
3965 while (!kthread_should_stop()) {
3972 static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf)
3974 struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
3975 struct iscsit_cmd *cmd;
3978 switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3979 case ISCSI_OP_SCSI_CMD:
3980 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3984 ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3986 case ISCSI_OP_SCSI_DATA_OUT:
3987 ret = iscsit_handle_data_out(conn, buf);
3989 case ISCSI_OP_NOOP_OUT:
3991 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3992 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3996 ret = iscsit_handle_nop_out(conn, cmd, buf);
3998 case ISCSI_OP_SCSI_TMFUNC:
3999 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4003 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
4006 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
4007 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
4011 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4016 ret = iscsit_handle_text_cmd(conn, cmd, buf);
4018 case ISCSI_OP_LOGOUT:
4019 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4023 ret = iscsit_handle_logout_cmd(conn, cmd, buf);
4025 wait_for_completion_timeout(&conn->conn_logout_comp,
4026 SECONDS_FOR_LOGOUT_COMP * HZ);
4028 case ISCSI_OP_SNACK:
4029 ret = iscsit_handle_snack(conn, buf);
4032 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
4033 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
4034 pr_err("Cannot recover from unknown"
4035 " opcode while ERL=0, closing iSCSI connection.\n");
4038 pr_err("Unable to recover from unknown opcode while OFMarker=No,"
4039 " closing iSCSI connection.\n");
4046 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4049 static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
4053 spin_lock_bh(&conn->state_lock);
4054 ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
4055 spin_unlock_bh(&conn->state_lock);
4060 static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
4063 u8 *buffer, *tmp_buf, opcode;
4064 u32 checksum = 0, digest = 0;
4065 struct iscsi_hdr *hdr;
4068 buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
4072 while (!kthread_should_stop()) {
4074 * Ensure that both TX and RX per connection kthreads
4075 * are scheduled to run on the same CPU.
4077 iscsit_thread_check_cpumask(conn, current, 0);
4079 memset(&iov, 0, sizeof(struct kvec));
4081 iov.iov_base = buffer;
4082 iov.iov_len = ISCSI_HDR_LEN;
4084 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
4085 if (ret != ISCSI_HDR_LEN) {
4086 iscsit_rx_thread_wait_for_tcp(conn);
4090 hdr = (struct iscsi_hdr *) buffer;
4092 iov.iov_len = hdr->hlength * 4;
4093 tmp_buf = krealloc(buffer,
4094 ISCSI_HDR_LEN + iov.iov_len,
4100 iov.iov_base = &buffer[ISCSI_HDR_LEN];
4102 ret = rx_data(conn, &iov, 1, iov.iov_len);
4103 if (ret != iov.iov_len) {
4104 iscsit_rx_thread_wait_for_tcp(conn);
4109 if (conn->conn_ops->HeaderDigest) {
4110 iov.iov_base = &digest;
4111 iov.iov_len = ISCSI_CRC_LEN;
4113 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
4114 if (ret != ISCSI_CRC_LEN) {
4115 iscsit_rx_thread_wait_for_tcp(conn);
4119 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
4120 ISCSI_HDR_LEN, 0, NULL,
4123 if (digest != checksum) {
4124 pr_err("HeaderDigest CRC32C failed,"
4125 " received 0x%08x, computed 0x%08x\n",
4128 * Set the PDU to 0xff so it will intentionally
4129 * hit default in the switch below.
4131 memset(buffer, 0xff, ISCSI_HDR_LEN);
4132 atomic_long_inc(&conn->sess->conn_digest_errors);
4134 pr_debug("Got HeaderDigest CRC32C"
4135 " 0x%08x\n", checksum);
4139 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
4142 opcode = buffer[0] & ISCSI_OPCODE_MASK;
4144 if (conn->sess->sess_ops->SessionType &&
4145 ((!(opcode & ISCSI_OP_TEXT)) ||
4146 (!(opcode & ISCSI_OP_LOGOUT)))) {
4147 pr_err("Received illegal iSCSI Opcode: 0x%02x"
4148 " while in Discovery Session, rejecting.\n", opcode);
4149 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
4154 ret = iscsi_target_rx_opcode(conn, buffer);
4162 int iscsi_target_rx_thread(void *arg)
4165 struct iscsit_conn *conn = arg;
4166 bool conn_freed = false;
4169 * Allow ourselves to be interrupted by SIGINT so that a
4170 * connection recovery / failure event can be triggered externally.
4172 allow_signal(SIGINT);
4174 * Wait for iscsi_post_login_handler() to complete before allowing
4175 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4177 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4178 if (rc < 0 || iscsi_target_check_conn_state(conn))
4181 if (!conn->conn_transport->iscsit_get_rx_pdu)
4184 conn->conn_transport->iscsit_get_rx_pdu(conn);
4186 if (!signal_pending(current))
4187 atomic_set(&conn->transport_failed, 1);
4188 iscsit_take_action_for_connection_exit(conn, &conn_freed);
4192 while (!kthread_should_stop()) {
4200 static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
4202 LIST_HEAD(tmp_list);
4203 struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL;
4204 struct iscsit_session *sess = conn->sess;
4206 * We expect this function to only ever be called from either RX or TX
4207 * thread context via iscsit_close_connection() once the other context
4208 * has been reset -> returned sleeping pre-handler state.
4210 spin_lock_bh(&conn->cmd_lock);
4211 list_splice_init(&conn->conn_cmd_list, &tmp_list);
4213 list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4214 struct se_cmd *se_cmd = &cmd->se_cmd;
4216 if (!se_cmd->se_tfo)
4219 spin_lock_irq(&se_cmd->t_state_lock);
4220 if (se_cmd->transport_state & CMD_T_ABORTED) {
4221 if (!(se_cmd->transport_state & CMD_T_TAS))
4223 * LIO's abort path owns the cleanup for this,
4224 * so put it back on the list and let
4225 * aborted_task handle it.
4227 list_move_tail(&cmd->i_conn_node,
4228 &conn->conn_cmd_list);
4230 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
4233 if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
4235 * We never submitted the cmd to LIO core, so we have
4236 * to tell LIO to perform the completion process.
4238 spin_unlock_irq(&se_cmd->t_state_lock);
4239 target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED);
4242 spin_unlock_irq(&se_cmd->t_state_lock);
4244 spin_unlock_bh(&conn->cmd_lock);
4246 list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4247 list_del_init(&cmd->i_conn_node);
4249 iscsit_increment_maxcmdsn(cmd, sess);
4250 iscsit_free_cmd(cmd, true);
4255 * Wait on commands that were cleaned up via the aborted_task path.
4256 * LLDs that implement iscsit_wait_conn will already have waited for
4259 if (!conn->conn_transport->iscsit_wait_conn) {
4260 target_stop_cmd_counter(conn->cmd_cnt);
4261 target_wait_for_cmds(conn->cmd_cnt);
4265 static void iscsit_stop_timers_for_cmds(
4266 struct iscsit_conn *conn)
4268 struct iscsit_cmd *cmd;
4270 spin_lock_bh(&conn->cmd_lock);
4271 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
4272 if (cmd->data_direction == DMA_TO_DEVICE)
4273 iscsit_stop_dataout_timer(cmd);
4275 spin_unlock_bh(&conn->cmd_lock);
4278 int iscsit_close_connection(
4279 struct iscsit_conn *conn)
4281 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
4282 struct iscsit_session *sess = conn->sess;
4284 pr_debug("Closing iSCSI connection CID %hu on SID:"
4285 " %u\n", conn->cid, sess->sid);
4287 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
4288 * case just in case the RX Thread in iscsi_target_rx_opcode() is
4289 * sleeping and the logout response never got sent because the
4290 * connection failed.
4292 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4293 * to signal logout response TX interrupt completion. Go ahead and skip
4294 * this for iser since isert_rx_opcode() does not wait on logout failure,
4295 * and to avoid iscsit_conn pointer dereference in iser-target code.
4297 if (!conn->conn_transport->rdma_shutdown)
4298 complete(&conn->conn_logout_comp);
4300 if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
4301 if (conn->tx_thread &&
4302 cmpxchg(&conn->tx_thread_active, true, false)) {
4303 send_sig(SIGINT, conn->tx_thread, 1);
4304 kthread_stop(conn->tx_thread);
4306 } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
4307 if (conn->rx_thread &&
4308 cmpxchg(&conn->rx_thread_active, true, false)) {
4309 send_sig(SIGINT, conn->rx_thread, 1);
4310 kthread_stop(conn->rx_thread);
4314 spin_lock(&iscsit_global->ts_bitmap_lock);
4315 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
4317 spin_unlock(&iscsit_global->ts_bitmap_lock);
4319 iscsit_stop_timers_for_cmds(conn);
4320 iscsit_stop_nopin_response_timer(conn);
4321 iscsit_stop_nopin_timer(conn);
4323 if (conn->conn_transport->iscsit_wait_conn)
4324 conn->conn_transport->iscsit_wait_conn(conn);
4327 * During Connection recovery drop unacknowledged out of order
4328 * commands for this connection, and prepare the other commands
4331 * During normal operation clear the out of order commands (but
4332 * do not free the struct iscsi_ooo_cmdsn's) and release all
4333 * struct iscsit_cmds.
4335 if (atomic_read(&conn->connection_recovery)) {
4336 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4337 iscsit_prepare_cmds_for_reallegiance(conn);
4339 iscsit_clear_ooo_cmdsns_for_conn(conn);
4340 iscsit_release_commands_from_conn(conn);
4342 iscsit_free_queue_reqs_for_conn(conn);
4345 * Handle decrementing session or connection usage count if
4346 * a logout response was not able to be sent because the
4347 * connection failed. Fall back to Session Recovery here.
4349 if (atomic_read(&conn->conn_logout_remove)) {
4350 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4351 iscsit_dec_conn_usage_count(conn);
4352 iscsit_dec_session_usage_count(sess);
4354 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4355 iscsit_dec_conn_usage_count(conn);
4357 atomic_set(&conn->conn_logout_remove, 0);
4358 atomic_set(&sess->session_reinstatement, 0);
4359 atomic_set(&sess->session_fall_back_to_erl0, 1);
4362 spin_lock_bh(&sess->conn_lock);
4363 list_del(&conn->conn_list);
4366 * Attempt to let the Initiator know this connection failed by
4367 * sending an Connection Dropped Async Message on another
4368 * active connection.
4370 if (atomic_read(&conn->connection_recovery))
4371 iscsit_build_conn_drop_async_message(conn);
4373 spin_unlock_bh(&sess->conn_lock);
4376 * If connection reinstatement is being performed on this connection,
4377 * up the connection reinstatement semaphore that is being blocked on
4378 * in iscsit_cause_connection_reinstatement().
4380 spin_lock_bh(&conn->state_lock);
4381 if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4382 spin_unlock_bh(&conn->state_lock);
4383 complete(&conn->conn_wait_comp);
4384 wait_for_completion(&conn->conn_post_wait_comp);
4385 spin_lock_bh(&conn->state_lock);
4389 * If connection reinstatement is being performed on this connection
4390 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4391 * connection wait rcfr semaphore that is being blocked on
4392 * an iscsit_connection_reinstatement_rcfr().
4394 if (atomic_read(&conn->connection_wait_rcfr)) {
4395 spin_unlock_bh(&conn->state_lock);
4396 complete(&conn->conn_wait_rcfr_comp);
4397 wait_for_completion(&conn->conn_post_wait_comp);
4398 spin_lock_bh(&conn->state_lock);
4400 atomic_set(&conn->connection_reinstatement, 1);
4401 spin_unlock_bh(&conn->state_lock);
4404 * If any other processes are accessing this connection pointer we
4405 * must wait until they have completed.
4407 iscsit_check_conn_usage_count(conn);
4409 ahash_request_free(conn->conn_tx_hash);
4410 if (conn->conn_rx_hash) {
4411 struct crypto_ahash *tfm;
4413 tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
4414 ahash_request_free(conn->conn_rx_hash);
4415 crypto_free_ahash(tfm);
4419 sock_release(conn->sock);
4421 if (conn->conn_transport->iscsit_free_conn)
4422 conn->conn_transport->iscsit_free_conn(conn);
4424 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4425 conn->conn_state = TARG_CONN_STATE_FREE;
4426 iscsit_free_conn(conn);
4428 spin_lock_bh(&sess->conn_lock);
4429 atomic_dec(&sess->nconn);
4430 pr_debug("Decremented iSCSI connection count to %d from node:"
4431 " %s\n", atomic_read(&sess->nconn),
4432 sess->sess_ops->InitiatorName);
4434 * Make sure that if one connection fails in an non ERL=2 iSCSI
4435 * Session that they all fail.
4437 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4438 !atomic_read(&sess->session_logout))
4439 atomic_set(&sess->session_fall_back_to_erl0, 1);
4442 * If this was not the last connection in the session, and we are
4443 * performing session reinstatement or falling back to ERL=0, call
4444 * iscsit_stop_session() without sleeping to shutdown the other
4445 * active connections.
4447 if (atomic_read(&sess->nconn)) {
4448 if (!atomic_read(&sess->session_reinstatement) &&
4449 !atomic_read(&sess->session_fall_back_to_erl0)) {
4450 spin_unlock_bh(&sess->conn_lock);
4453 if (!atomic_read(&sess->session_stop_active)) {
4454 atomic_set(&sess->session_stop_active, 1);
4455 spin_unlock_bh(&sess->conn_lock);
4456 iscsit_stop_session(sess, 0, 0);
4459 spin_unlock_bh(&sess->conn_lock);
4464 * If this was the last connection in the session and one of the
4465 * following is occurring:
4467 * Session Reinstatement is not being performed, and are falling back
4468 * to ERL=0 call iscsit_close_session().
4470 * Session Logout was requested. iscsit_close_session() will be called
4473 * Session Continuation is not being performed, start the Time2Retain
4474 * handler and check if sleep_on_sess_wait_sem is active.
4476 if (!atomic_read(&sess->session_reinstatement) &&
4477 atomic_read(&sess->session_fall_back_to_erl0)) {
4478 spin_unlock_bh(&sess->conn_lock);
4479 complete_all(&sess->session_wait_comp);
4480 iscsit_close_session(sess, true);
4483 } else if (atomic_read(&sess->session_logout)) {
4484 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4485 sess->session_state = TARG_SESS_STATE_FREE;
4487 if (atomic_read(&sess->session_close)) {
4488 spin_unlock_bh(&sess->conn_lock);
4489 complete_all(&sess->session_wait_comp);
4490 iscsit_close_session(sess, true);
4492 spin_unlock_bh(&sess->conn_lock);
4497 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4498 sess->session_state = TARG_SESS_STATE_FAILED;
4500 if (!atomic_read(&sess->session_continuation))
4501 iscsit_start_time2retain_handler(sess);
4503 if (atomic_read(&sess->session_close)) {
4504 spin_unlock_bh(&sess->conn_lock);
4505 complete_all(&sess->session_wait_comp);
4506 iscsit_close_session(sess, true);
4508 spin_unlock_bh(&sess->conn_lock);
4516 * If the iSCSI Session for the iSCSI Initiator Node exists,
4517 * forcefully shutdown the iSCSI NEXUS.
4519 int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
4521 struct iscsi_portal_group *tpg = sess->tpg;
4522 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4524 if (atomic_read(&sess->nconn)) {
4525 pr_err("%d connection(s) still exist for iSCSI session"
4526 " to %s\n", atomic_read(&sess->nconn),
4527 sess->sess_ops->InitiatorName);
4531 spin_lock_bh(&se_tpg->session_lock);
4532 atomic_set(&sess->session_logout, 1);
4533 atomic_set(&sess->session_reinstatement, 1);
4534 iscsit_stop_time2retain_timer(sess);
4535 spin_unlock_bh(&se_tpg->session_lock);
4537 if (sess->sess_ops->ErrorRecoveryLevel == 2)
4538 iscsit_free_connection_recovery_entries(sess);
4541 * transport_deregister_session_configfs() will clear the
4542 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4543 * can be setting it again with __transport_register_session() in
4544 * iscsi_post_login_handler() again after the iscsit_stop_session()
4545 * completes in iscsi_np context.
4547 transport_deregister_session_configfs(sess->se_sess);
4550 * If any other processes are accessing this session pointer we must
4551 * wait until they have completed. If we are in an interrupt (the
4552 * time2retain handler) and contain and active session usage count we
4553 * restart the timer and exit.
4555 if (iscsit_check_session_usage_count(sess, can_sleep)) {
4556 atomic_set(&sess->session_logout, 0);
4557 iscsit_start_time2retain_handler(sess);
4561 transport_deregister_session(sess->se_sess);
4563 iscsit_free_all_ooo_cmdsns(sess);
4565 spin_lock_bh(&se_tpg->session_lock);
4566 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4567 sess->session_state = TARG_SESS_STATE_FREE;
4568 pr_debug("Released iSCSI session from node: %s\n",
4569 sess->sess_ops->InitiatorName);
4572 tpg->tpg_tiqn->tiqn_nsessions--;
4574 pr_debug("Decremented number of active iSCSI Sessions on"
4575 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4577 ida_free(&sess_ida, sess->session_index);
4578 kfree(sess->sess_ops);
4579 sess->sess_ops = NULL;
4580 spin_unlock_bh(&se_tpg->session_lock);
4586 static void iscsit_logout_post_handler_closesession(
4587 struct iscsit_conn *conn)
4589 struct iscsit_session *sess = conn->sess;
4592 * Traditional iscsi/tcp will invoke this logic from TX thread
4593 * context during session logout, so clear tx_thread_active and
4594 * sleep if iscsit_close_connection() has not already occured.
4596 * Since iser-target invokes this logic from it's own workqueue,
4597 * always sleep waiting for RX/TX thread shutdown to complete
4598 * within iscsit_close_connection().
4600 if (!conn->conn_transport->rdma_shutdown) {
4601 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4606 atomic_set(&conn->conn_logout_remove, 0);
4607 complete(&conn->conn_logout_comp);
4609 iscsit_dec_conn_usage_count(conn);
4610 atomic_set(&sess->session_close, 1);
4611 iscsit_stop_session(sess, sleep, sleep);
4612 iscsit_dec_session_usage_count(sess);
4615 static void iscsit_logout_post_handler_samecid(
4616 struct iscsit_conn *conn)
4620 if (!conn->conn_transport->rdma_shutdown) {
4621 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4626 atomic_set(&conn->conn_logout_remove, 0);
4627 complete(&conn->conn_logout_comp);
4629 iscsit_cause_connection_reinstatement(conn, sleep);
4630 iscsit_dec_conn_usage_count(conn);
4633 static void iscsit_logout_post_handler_diffcid(
4634 struct iscsit_conn *conn,
4637 struct iscsit_conn *l_conn;
4638 struct iscsit_session *sess = conn->sess;
4639 bool conn_found = false;
4644 spin_lock_bh(&sess->conn_lock);
4645 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4646 if (l_conn->cid == cid) {
4647 iscsit_inc_conn_usage_count(l_conn);
4652 spin_unlock_bh(&sess->conn_lock);
4658 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4660 spin_lock_bh(&l_conn->state_lock);
4661 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4662 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4663 spin_unlock_bh(&l_conn->state_lock);
4665 iscsit_cause_connection_reinstatement(l_conn, 1);
4666 iscsit_dec_conn_usage_count(l_conn);
4670 * Return of 0 causes the TX thread to restart.
4672 int iscsit_logout_post_handler(
4673 struct iscsit_cmd *cmd,
4674 struct iscsit_conn *conn)
4678 switch (cmd->logout_reason) {
4679 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4680 switch (cmd->logout_response) {
4681 case ISCSI_LOGOUT_SUCCESS:
4682 case ISCSI_LOGOUT_CLEANUP_FAILED:
4684 iscsit_logout_post_handler_closesession(conn);
4688 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4689 if (conn->cid == cmd->logout_cid) {
4690 switch (cmd->logout_response) {
4691 case ISCSI_LOGOUT_SUCCESS:
4692 case ISCSI_LOGOUT_CLEANUP_FAILED:
4694 iscsit_logout_post_handler_samecid(conn);
4698 switch (cmd->logout_response) {
4699 case ISCSI_LOGOUT_SUCCESS:
4700 iscsit_logout_post_handler_diffcid(conn,
4703 case ISCSI_LOGOUT_CID_NOT_FOUND:
4704 case ISCSI_LOGOUT_CLEANUP_FAILED:
4711 case ISCSI_LOGOUT_REASON_RECOVERY:
4712 switch (cmd->logout_response) {
4713 case ISCSI_LOGOUT_SUCCESS:
4714 case ISCSI_LOGOUT_CID_NOT_FOUND:
4715 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4716 case ISCSI_LOGOUT_CLEANUP_FAILED:
4728 EXPORT_SYMBOL(iscsit_logout_post_handler);
4730 void iscsit_fail_session(struct iscsit_session *sess)
4732 struct iscsit_conn *conn;
4734 spin_lock_bh(&sess->conn_lock);
4735 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4736 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4737 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4739 spin_unlock_bh(&sess->conn_lock);
4741 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4742 sess->session_state = TARG_SESS_STATE_FAILED;
4745 void iscsit_stop_session(
4746 struct iscsit_session *sess,
4748 int connection_sleep)
4750 u16 conn_count = atomic_read(&sess->nconn);
4751 struct iscsit_conn *conn, *conn_tmp = NULL;
4754 spin_lock_bh(&sess->conn_lock);
4756 if (connection_sleep) {
4757 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4759 if (conn_count == 0)
4762 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4765 iscsit_inc_conn_usage_count(conn_tmp);
4768 iscsit_inc_conn_usage_count(conn);
4770 spin_unlock_bh(&sess->conn_lock);
4771 iscsit_cause_connection_reinstatement(conn, 1);
4772 spin_lock_bh(&sess->conn_lock);
4774 iscsit_dec_conn_usage_count(conn);
4776 iscsit_dec_conn_usage_count(conn_tmp);
4780 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4781 iscsit_cause_connection_reinstatement(conn, 0);
4784 if (session_sleep && atomic_read(&sess->nconn)) {
4785 spin_unlock_bh(&sess->conn_lock);
4786 wait_for_completion(&sess->session_wait_comp);
4788 spin_unlock_bh(&sess->conn_lock);
4791 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4793 struct iscsit_session *sess;
4794 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4795 struct se_session *se_sess, *se_sess_tmp;
4796 LIST_HEAD(free_list);
4797 int session_count = 0;
4799 spin_lock_bh(&se_tpg->session_lock);
4800 if (tpg->nsessions && !force) {
4801 spin_unlock_bh(&se_tpg->session_lock);
4805 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4807 sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4809 spin_lock(&sess->conn_lock);
4810 if (atomic_read(&sess->session_fall_back_to_erl0) ||
4811 atomic_read(&sess->session_logout) ||
4812 atomic_read(&sess->session_close) ||
4813 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4814 spin_unlock(&sess->conn_lock);
4817 iscsit_inc_session_usage_count(sess);
4818 atomic_set(&sess->session_reinstatement, 1);
4819 atomic_set(&sess->session_fall_back_to_erl0, 1);
4820 atomic_set(&sess->session_close, 1);
4821 spin_unlock(&sess->conn_lock);
4823 list_move_tail(&se_sess->sess_list, &free_list);
4825 spin_unlock_bh(&se_tpg->session_lock);
4827 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4828 sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4830 list_del_init(&se_sess->sess_list);
4831 iscsit_stop_session(sess, 1, 1);
4832 iscsit_dec_session_usage_count(sess);
4836 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4837 " Group: %hu\n", session_count, tpg->tpgt);
4841 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4842 MODULE_VERSION("4.1.x");
4844 MODULE_LICENSE("GPL");
4846 module_init(iscsi_target_init_module);
4847 module_exit(iscsi_target_cleanup_module);