@@ -483,9 +483,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
483
483
void iscsit_aborted_task (struct iscsi_conn * conn , struct iscsi_cmd * cmd )
484
484
{
485
485
spin_lock_bh (& conn -> cmd_lock );
486
- if (!list_empty (& cmd -> i_conn_node ) &&
487
- !(cmd -> se_cmd .transport_state & CMD_T_FABRIC_STOP ))
488
- list_del_init (& cmd -> i_conn_node );
486
+ list_del_init (& cmd -> i_conn_node );
489
487
spin_unlock_bh (& conn -> cmd_lock );
490
488
491
489
__iscsit_free_cmd (cmd , true);
@@ -4071,7 +4069,8 @@ int iscsi_target_rx_thread(void *arg)
4071
4069
4072
4070
static void iscsit_release_commands_from_conn (struct iscsi_conn * conn )
4073
4071
{
4074
- LIST_HEAD (tmp_list );
4072
+ LIST_HEAD (tmp_cmd_list );
4073
+ LIST_HEAD (tmp_tmr_list );
4075
4074
struct iscsi_cmd * cmd = NULL , * cmd_tmp = NULL ;
4076
4075
struct iscsi_session * sess = conn -> sess ;
4077
4076
/*
@@ -4080,21 +4079,57 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
4080
4079
* has been reset -> returned sleeping pre-handler state.
4081
4080
*/
4082
4081
spin_lock_bh (& conn -> cmd_lock );
4083
- list_splice_init (& conn -> conn_cmd_list , & tmp_list );
4082
+ list_splice_init (& conn -> conn_cmd_list , & tmp_cmd_list );
4084
4083
4085
- list_for_each_entry (cmd , & tmp_list , i_conn_node ) {
4084
+ list_for_each_entry_safe (cmd , cmd_tmp , & tmp_cmd_list , i_conn_node ) {
4086
4085
struct se_cmd * se_cmd = & cmd -> se_cmd ;
4087
4086
4088
4087
if (se_cmd -> se_tfo != NULL ) {
4089
4088
spin_lock_irq (& se_cmd -> t_state_lock );
4090
4089
se_cmd -> transport_state |= CMD_T_FABRIC_STOP ;
4091
4090
spin_unlock_irq (& se_cmd -> t_state_lock );
4092
4091
}
4092
+
4093
+ if (se_cmd -> se_cmd_flags & SCF_SCSI_TMR_CDB )
4094
+ list_move_tail (& cmd -> i_conn_node , & tmp_tmr_list );
4093
4095
}
4094
4096
spin_unlock_bh (& conn -> cmd_lock );
4095
4097
4096
- list_for_each_entry_safe (cmd , cmd_tmp , & tmp_list , i_conn_node ) {
4098
+ /*
4099
+ * We must wait for TMRs to be processed first. Any commands that were
4100
+ * aborted by those TMRs will have been freed and removed from the
4101
+ * tmp_cmd_list once we have finished traversing tmp_tmr_list.
4102
+ */
4103
+ list_for_each_entry_safe (cmd , cmd_tmp , & tmp_tmr_list , i_conn_node ) {
4104
+ struct se_cmd * se_cmd = & cmd -> se_cmd ;
4105
+
4106
+ spin_lock_bh (& conn -> cmd_lock );
4097
4107
list_del_init (& cmd -> i_conn_node );
4108
+ spin_unlock_bh (& conn -> cmd_lock );
4109
+
4110
+ iscsit_increment_maxcmdsn (cmd , sess );
4111
+ pr_debug ("%s: freeing TMR icmd 0x%px cmd 0x%px\n" ,
4112
+ __func__ , cmd , se_cmd );
4113
+ iscsit_free_cmd (cmd , true);
4114
+ pr_debug ("%s: TMR freed\n" , __func__ );
4115
+ }
4116
+
4117
+ list_for_each_entry_safe (cmd , cmd_tmp , & tmp_cmd_list , i_conn_node ) {
4118
+ struct se_cmd * se_cmd = & cmd -> se_cmd ;
4119
+
4120
+ /*
4121
+ * We shouldn't be freeing any aborted commands here. Those
4122
+ * commands should be freed by iscsit_aborted_task, and the
4123
+ * last reference will be released by target_put_cmd_and_wait,
4124
+ * called from core_tmr_drain_tmr_list or core_tmr_abort_task.
4125
+ */
4126
+ spin_lock_irq (& se_cmd -> t_state_lock );
4127
+ WARN_ON (se_cmd -> transport_state & CMD_T_ABORTED );
4128
+ spin_unlock_irq (& se_cmd -> t_state_lock );
4129
+
4130
+ spin_lock_bh (& conn -> cmd_lock );
4131
+ list_del_init (& cmd -> i_conn_node );
4132
+ spin_unlock_bh (& conn -> cmd_lock );
4098
4133
4099
4134
iscsit_increment_maxcmdsn (cmd , sess );
4100
4135
iscsit_free_cmd (cmd , true);
0 commit comments