summaryrefslogtreecommitdiff
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-03-25 03:53:42 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-03-25 03:53:42 -0300
commit03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch)
treefa581f6dc1c0596391690d1f67eceef3af8246dc /drivers/scsi/qla2xxx
parentd4e493caf788ef44982e131ff9c786546904d934 (diff)
Linux-libre 4.5-gnu
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h83
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c188
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c126
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c265
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c165
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c699
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h93
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c114
14 files changed, 1692 insertions, 280 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6b942d9e5..6992ebc50 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -824,6 +824,41 @@ static struct bin_attribute sysfs_reset_attr = {
};
static ssize_t
+qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ int type;
+ int rval = 0;
+ port_id_t did;
+
+ type = simple_strtol(buf, NULL, 10);
+
+ did.b.domain = (type & 0x00ff0000) >> 16;
+ did.b.area = (type & 0x0000ff00) >> 8;
+ did.b.al_pa = (type & 0x000000ff);
+
+ ql_log(ql_log_info, vha, 0x70e3, "portid=%02x%02x%02x done\n",
+ did.b.domain, did.b.area, did.b.al_pa);
+
+ ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
+
+ rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
+ return count;
+}
+
+static struct bin_attribute sysfs_issue_logo_attr = {
+ .attr = {
+ .name = "issue_logo",
+ .mode = S_IWUSR,
+ },
+ .size = 0,
+ .write = qla2x00_issue_logo,
+};
+
+static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -937,6 +972,7 @@ static struct sysfs_entry {
{ "vpd", &sysfs_vpd_attr, 1 },
{ "sfp", &sysfs_sfp_attr, 1 },
{ "reset", &sysfs_reset_attr, },
+ { "issue_logo", &sysfs_issue_logo_attr, },
{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
{ NULL },
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34dc9a356..cd0d94ea7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -14,25 +14,24 @@
* | Module Init and Probe | 0x017f | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e-0x0170 |
- * | Mailbox commands | 0x118d | 0x1115-0x1116 |
- * | | | 0x111a-0x111b |
+ * | Mailbox commands | 0x1192 | |
+ * | | | |
* | Device Discovery | 0x2016 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
* | | | 0x2099-0x20a4 |
- * | Queue Command and IO tracing | 0x3075 | 0x300b |
+ * | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
- * | Async Events | 0x508a | 0x502b-0x502f |
- * | | | 0x5047 |
+ * | Async Events | 0x5089 | 0x502b-0x502f |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
* | | | 0x507b,0x505f |
* | Timer Routines | 0x6012 | |
- * | User Space Interactions | 0x70e2 | 0x7018,0x702e |
+ * | User Space Interactions | 0x70e65 | 0x7018,0x702e |
* | | | 0x7020,0x7024 |
* | | | 0x7039,0x7045 |
* | | | 0x7073-0x7075 |
@@ -60,15 +59,11 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc00c | |
- * | Misc | 0xd300 | 0xd016-0xd017 |
- * | | | 0xd021,0xd024 |
- * | | | 0xd025,0xd029 |
- * | | | 0xd02a,0xd02e |
- * | | | 0xd031-0xd0ff |
+ * | Misc | 0xd301 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
* | Target Mode | 0xe080 | |
- * | Target Mode Management | 0xf096 | 0xf002 |
+ * | Target Mode Management | 0xf09b | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000d | |
* ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 388d79088..9872f3429 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -259,7 +259,7 @@
#define LOOP_DOWN_TIME 255 /* 240 */
#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
-#define DEFAULT_OUTSTANDING_COMMANDS 1024
+#define DEFAULT_OUTSTANDING_COMMANDS 4096
#define MIN_OUTSTANDING_COMMANDS 128
/* ISP request and response entry counts (37-65535) */
@@ -267,11 +267,13 @@
#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */
#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_83XX 4096 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
+#define EXTENDED_EXCH_ENTRY_CNT 32768 /* Entries for offload case */
struct req_que;
struct qla_tgt_sess;
@@ -309,6 +311,14 @@ struct srb_cmd {
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
+struct els_logo_payload {
+ uint8_t opcode;
+ uint8_t rsvd[3];
+ uint8_t s_id[3];
+ uint8_t rsvd1[1];
+ uint8_t wwpn[WWN_SIZE];
+};
+
/*
* SRB extensions.
*/
@@ -322,6 +332,15 @@ struct srb_iocb {
uint16_t data[2];
} logio;
struct {
+#define ELS_DCMD_TIMEOUT 20
+#define ELS_DCMD_LOGO 0x5
+ uint32_t flags;
+ uint32_t els_cmd;
+ struct completion comp;
+ struct els_logo_payload *els_logo_pyld;
+ dma_addr_t els_logo_pyld_dma;
+ } els_logo;
+ struct {
/*
* Values for flags field below are as
* defined in tsk_mgmt_entry struct
@@ -382,7 +401,7 @@ struct srb_iocb {
#define SRB_FXIOCB_DCMD 10
#define SRB_FXIOCB_BCMD 11
#define SRB_ABT_CMD 12
-
+#define SRB_ELS_DCMD 13
typedef struct srb {
atomic_t ref_count;
@@ -891,6 +910,7 @@ struct mbx_cmd_32 {
#define MBC_DISABLE_VI 0x24 /* Disable VI operation. */
#define MBC_ENABLE_VI 0x25 /* Enable VI operation. */
#define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */
+#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/
#define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */
#define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */
#define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */
@@ -2695,11 +2715,16 @@ struct isp_operations {
struct scsi_qla_host;
+
+#define QLA83XX_RSPQ_MSIX_ENTRY_NUMBER 1 /* refer to qla83xx_msix_entries */
+
struct qla_msix_entry {
int have_irq;
uint32_t vector;
uint16_t entry;
struct rsp_que *rsp;
+ struct irq_affinity_notify irq_notify;
+ int cpuid;
};
#define WATCH_INTERVAL 1 /* number of seconds */
@@ -2910,12 +2935,15 @@ struct qlt_hw_data {
uint32_t num_qfull_cmds_dropped;
spinlock_t q_full_lock;
uint32_t leak_exchg_thresh_hold;
+ spinlock_t sess_lock;
+ int rspq_vector_cpuid;
+ spinlock_t atio_lock ____cacheline_aligned;
};
#define MAX_QFULL_CMDS_ALLOC 8192
#define Q_FULL_THRESH_HOLD_PERCENT 90
#define Q_FULL_THRESH_HOLD(ha) \
- ((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
+ ((ha->cur_fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
@@ -2962,10 +2990,12 @@ struct qla_hw_data {
uint32_t isp82xx_no_md_cap:1;
uint32_t host_shutting_down:1;
uint32_t idc_compl_status:1;
-
uint32_t mr_reset_hdlr_active:1;
uint32_t mr_intr_valid:1;
+
uint32_t fawwpn_enabled:1;
+ uint32_t exlogins_enabled:1;
+ uint32_t exchoffld_enabled:1;
/* 35 bits */
} flags;
@@ -3237,6 +3267,21 @@ struct qla_hw_data {
void *async_pd;
dma_addr_t async_pd_dma;
+#define ENABLE_EXTENDED_LOGIN BIT_7
+
+ /* Extended Logins */
+ void *exlogin_buf;
+ dma_addr_t exlogin_buf_dma;
+ int exlogin_size;
+
+#define ENABLE_EXCHANGE_OFFLD BIT_2
+
+ /* Exchange Offload */
+ void *exchoffld_buf;
+ dma_addr_t exchoffld_buf_dma;
+ int exchoffld_size;
+ int exchoffld_count;
+
void *swl;
/* These are used by mailbox operations. */
@@ -3279,8 +3324,14 @@ struct qla_hw_data {
#define RISC_START_ADDRESS_2100 0x1000
#define RISC_START_ADDRESS_2300 0x800
#define RISC_START_ADDRESS_2400 0x100000
- uint16_t fw_xcb_count;
- uint16_t fw_iocb_count;
+
+ uint16_t orig_fw_tgt_xcb_count;
+ uint16_t cur_fw_tgt_xcb_count;
+ uint16_t orig_fw_xcb_count;
+ uint16_t cur_fw_xcb_count;
+ uint16_t orig_fw_iocb_count;
+ uint16_t cur_fw_iocb_count;
+ uint16_t fw_max_fcf_count;
uint32_t fw_shared_ram_start;
uint32_t fw_shared_ram_end;
@@ -3323,6 +3374,9 @@ struct qla_hw_data {
uint32_t chain_offset;
struct dentry *dfs_dir;
struct dentry *dfs_fce;
+ struct dentry *dfs_tgt_counters;
+ struct dentry *dfs_fw_resource_cnt;
+
dma_addr_t fce_dma;
void *fce;
uint32_t fce_bufs;
@@ -3480,6 +3534,18 @@ struct qla_hw_data {
int allow_cna_fw_dump;
};
+struct qla_tgt_counters {
+ uint64_t qla_core_sbt_cmd;
+ uint64_t core_qla_que_buf;
+ uint64_t qla_core_ret_ctio;
+ uint64_t core_qla_snd_status;
+ uint64_t qla_core_ret_sta_ctio;
+ uint64_t core_qla_free_cmd;
+ uint64_t num_q_full_sent;
+ uint64_t num_alloc_iocb_failed;
+ uint64_t num_term_xchg_sent;
+};
+
/*
* Qlogic scsi host structure
*/
@@ -3595,6 +3661,10 @@ typedef struct scsi_qla_host {
atomic_t generation_tick;
/* Time when global fcport update has been scheduled */
int total_fcport_update_gen;
+ /* List of pending LOGOs, protected by tgt_mutex */
+ struct list_head logo_list;
+ /* List of pending PLOGI acks, protected by hw lock */
+ struct list_head plogi_ack_list;
uint32_t vp_abort_cnt;
@@ -3632,6 +3702,7 @@ typedef struct scsi_qla_host {
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
+ struct qla_tgt_counters tgt_counters;
} scsi_qla_host_t;
#define SET_VP_IDX 1
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 15cf074ff..cd8b96a4b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -13,6 +13,85 @@ static struct dentry *qla2x00_dfs_root;
static atomic_t qla2x00_dfs_root_count;
static int
+qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+{
+ struct scsi_qla_host *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+
+ seq_puts(s, "FW Resource count\n\n");
+ seq_printf(s, "Original TGT exchg count[%d]\n",
+ ha->orig_fw_tgt_xcb_count);
+ seq_printf(s, "current TGT exchg count[%d]\n",
+ ha->cur_fw_tgt_xcb_count);
+ seq_printf(s, "original Initiator Exchange count[%d]\n",
+ ha->orig_fw_xcb_count);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n",
+ ha->cur_fw_xcb_count);
+ seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
+ seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
+ seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
+ seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
+
+ return 0;
+}
+
+static int
+qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
+{
+ struct scsi_qla_host *vha = inode->i_private;
+ return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
+}
+
+static const struct file_operations dfs_fw_resource_cnt_ops = {
+ .open = qla_dfs_fw_resource_cnt_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
+{
+ struct scsi_qla_host *vha = s->private;
+
+ seq_puts(s, "Target Counters\n");
+ seq_printf(s, "qla_core_sbt_cmd = %lld\n",
+ vha->tgt_counters.qla_core_sbt_cmd);
+ seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
+ vha->tgt_counters.qla_core_ret_sta_ctio);
+ seq_printf(s, "qla_core_ret_ctio = %lld\n",
+ vha->tgt_counters.qla_core_ret_ctio);
+ seq_printf(s, "core_qla_que_buf = %lld\n",
+ vha->tgt_counters.core_qla_que_buf);
+ seq_printf(s, "core_qla_snd_status = %lld\n",
+ vha->tgt_counters.core_qla_snd_status);
+ seq_printf(s, "core_qla_free_cmd = %lld\n",
+ vha->tgt_counters.core_qla_free_cmd);
+ seq_printf(s, "num alloc iocb failed = %lld\n",
+ vha->tgt_counters.num_alloc_iocb_failed);
+ seq_printf(s, "num term exchange sent = %lld\n",
+ vha->tgt_counters.num_term_xchg_sent);
+ seq_printf(s, "num Q full sent = %lld\n",
+ vha->tgt_counters.num_q_full_sent);
+
+ return 0;
+}
+
+static int
+qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
+{
+ struct scsi_qla_host *vha = inode->i_private;
+ return single_open(file, qla_dfs_tgt_counters_show, vha);
+}
+
+static const struct file_operations dfs_tgt_counters_ops = {
+ .open = qla_dfs_tgt_counters_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
{
scsi_qla_host_t *vha = s->private;
@@ -146,6 +225,22 @@ create_dir:
atomic_inc(&qla2x00_dfs_root_count);
create_nodes:
+ ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
+ S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
+ if (!ha->dfs_fw_resource_cnt) {
+ ql_log(ql_log_warn, vha, 0x00fd,
+ "Unable to create debugFS fw_resource_count node.\n");
+ goto out;
+ }
+
+ ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
+ ha->dfs_dir, vha, &dfs_tgt_counters_ops);
+ if (!ha->dfs_tgt_counters) {
+ ql_log(ql_log_warn, vha, 0xd301,
+ "Unable to create debugFS tgt_counters node.\n");
+ goto out;
+ }
+
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
@@ -161,6 +256,17 @@ int
qla2x00_dfs_remove(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+
+ if (ha->dfs_fw_resource_cnt) {
+ debugfs_remove(ha->dfs_fw_resource_cnt);
+ ha->dfs_fw_resource_cnt = NULL;
+ }
+
+ if (ha->dfs_tgt_counters) {
+ debugfs_remove(ha->dfs_tgt_counters);
+ ha->dfs_tgt_counters = NULL;
+ }
+
if (ha->dfs_fce) {
debugfs_remove(ha->dfs_fce);
ha->dfs_fce = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 7686bfe9a..0103e468e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,6 +44,8 @@ extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
+extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
+
extern void qla2x00_update_fcports(scsi_qla_host_t *);
extern int qla2x00_abort_isp(scsi_qla_host_t *);
@@ -117,6 +119,8 @@ extern int ql2xdontresethba;
extern uint64_t ql2xmaxlun;
extern int ql2xmdcapmask;
extern int ql2xmdenable;
+extern int ql2xexlogins;
+extern int ql2xexchoffld;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -135,6 +139,10 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
+extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *);
+extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *);
+extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *);
+extern void qla2x00_free_exchoffld_buffer(struct qla_hw_data *);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
@@ -323,8 +331,7 @@ extern int
qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *);
extern int
-qla2x00_get_resource_cnts(scsi_qla_host_t *, uint16_t *, uint16_t *,
- uint16_t *, uint16_t *, uint16_t *, uint16_t *);
+qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
@@ -766,4 +773,11 @@ extern int qla8044_abort_isp(scsi_qla_host_t *);
extern int qla8044_check_fw_alive(struct scsi_qla_host *);
extern void qlt_host_reset_handler(struct qla_hw_data *ha);
+extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *,
+ uint16_t *);
+extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr);
+extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t);
+extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *);
+
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e197c6f39..692a7570b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1766,10 +1766,10 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
(ql2xmultique_tag || ql2xmaxqueues > 1)))
req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
else {
- if (ha->fw_xcb_count <= ha->fw_iocb_count)
- req->num_outstanding_cmds = ha->fw_xcb_count;
+ if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
+ req->num_outstanding_cmds = ha->cur_fw_xcb_count;
else
- req->num_outstanding_cmds = ha->fw_iocb_count;
+ req->num_outstanding_cmds = ha->cur_fw_iocb_count;
}
req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
@@ -1843,9 +1843,23 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x00ca,
"Starting firmware.\n");
+ if (ql2xexlogins)
+ ha->flags.exlogins_enabled = 1;
+
+ if (ql2xexchoffld)
+ ha->flags.exchoffld_enabled = 1;
+
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
+ rval = qla2x00_set_exlogins_buffer(vha);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
+ rval = qla2x00_set_exchoffld_buffer(vha);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
if (IS_P3P_TYPE(ha))
@@ -1864,9 +1878,7 @@ enable_82xx_npiv:
ha->max_npiv_vports =
MIN_MULTI_ID_FABRIC - 1;
}
- qla2x00_get_resource_cnts(vha, NULL,
- &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
- &ha->max_npiv_vports, NULL);
+ qla2x00_get_resource_cnts(vha);
/*
* Allocate the array of outstanding commands
@@ -2248,7 +2260,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (IS_FWI2_CAPABLE(ha)) {
mid_init_cb->options = cpu_to_le16(BIT_1);
mid_init_cb->init_cb.execution_throttle =
- cpu_to_le16(ha->fw_xcb_count);
+ cpu_to_le16(ha->cur_fw_xcb_count);
/* D-Port Status */
if (IS_DPORT_CAPABLE(ha))
mid_init_cb->init_cb.firmware_options_1 |=
@@ -3053,6 +3065,26 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_READY);
ql_dbg(ql_dbg_disc, vha, 0x2069,
"LOOP READY.\n");
+
+ /*
+ * Process any ATIO queue entries that came in
+ * while we weren't online.
+ */
+ if (qla_tgt_mode_enabled(vha)) {
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
+ spin_lock_irqsave(&ha->tgt.atio_lock,
+ flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(
+ &ha->tgt.atio_lock, flags);
+ } else {
+ spin_lock_irqsave(&ha->hardware_lock,
+ flags);
+ qlt_24xx_process_atio_queue(vha, 1);
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ }
+ }
}
}
@@ -4907,7 +4939,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
- unsigned long flags;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
@@ -4929,17 +4960,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
- vha->flags.online = 1;
-
- /*
- * Process any ATIO queue entries that came in
- * while we weren't online.
- */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- if (qla_tgt_mode_enabled(vha))
- qlt_24xx_process_atio_queue(vha);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index fee9eb7c8..a6b7f1588 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -258,6 +258,8 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
(sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
+ if (sp->type == SRB_ELS_DCMD)
+ init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
}
static inline int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c49df34e9..b41265a75 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1868,6 +1868,7 @@ skip_cmd_array:
}
queuing_error:
+ vha->tgt_counters.num_alloc_iocb_failed++;
return pkt;
}
@@ -2010,6 +2011,190 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
}
static void
+qla2x00_els_dcmd_sp_free(void *ptr, void *data)
+{
+ struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+ kfree(sp->fcport);
+
+ if (elsio->u.els_logo.els_logo_pyld)
+ dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ elsio->u.els_logo.els_logo_pyld,
+ elsio->u.els_logo.els_logo_pyld_dma);
+
+ del_timer(&elsio->timer);
+ qla2x00_rel_sp(vha, sp);
+}
+
+static void
+qla2x00_els_dcmd_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ ql_dbg(ql_dbg_io, vha, 0x3069,
+ "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
+ sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+
+ /* Abort the exchange */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ ql_dbg(ql_dbg_io, vha, 0x3070,
+ "mbx abort_command failed.\n");
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0x3071,
+ "mbx abort_command success.\n");
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ complete(&lio->u.els_logo.comp);
+}
+
+static void
+qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ fc_port_t *fcport = sp->fcport;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = fcport->vha;
+
+ ql_dbg(ql_dbg_io, vha, 0x3072,
+ "%s hdl=%x, portid=%02x%02x%02x done\n",
+ sp->name, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ complete(&lio->u.els_logo.comp);
+}
+
+int
+qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
+ port_id_t remote_did)
+{
+ srb_t *sp;
+ fc_port_t *fcport = NULL;
+ struct srb_iocb *elsio = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ struct els_logo_payload logo_pyld;
+ int rval = QLA_SUCCESS;
+
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ kfree(fcport);
+ ql_log(ql_log_info, vha, 0x70e6,
+ "SRB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ elsio = &sp->u.iocb_cmd;
+ fcport->loop_id = 0xFFFF;
+ fcport->d_id.b.domain = remote_did.b.domain;
+ fcport->d_id.b.area = remote_did.b.area;
+ fcport->d_id.b.al_pa = remote_did.b.al_pa;
+
+ ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ sp->type = SRB_ELS_DCMD;
+ sp->name = "ELS_DCMD";
+ sp->fcport = fcport;
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
+ sp->done = qla2x00_els_dcmd_sp_done;
+ sp->free = qla2x00_els_dcmd_sp_free;
+
+ elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
+ DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
+ GFP_KERNEL);
+
+ if (!elsio->u.els_logo.els_logo_pyld) {
+ sp->free(vha, sp);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
+
+ elsio->u.els_logo.els_cmd = els_opcode;
+ logo_pyld.opcode = els_opcode;
+ logo_pyld.s_id[0] = vha->d_id.b.al_pa;
+ logo_pyld.s_id[1] = vha->d_id.b.area;
+ logo_pyld.s_id[2] = vha->d_id.b.domain;
+ host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
+ memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
+
+ memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
+ sizeof(struct els_logo_payload));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ sp->free(vha, sp);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_io, vha, 0x3074,
+ "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
+ sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ wait_for_completion(&elsio->u.els_logo.comp);
+
+ sp->free(vha, sp);
+ return rval;
+}
+
+static void
+qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
+{
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+ els_iocb->entry_type = ELS_IOCB_TYPE;
+ els_iocb->entry_count = 1;
+ els_iocb->sys_define = 0;
+ els_iocb->entry_status = 0;
+ els_iocb->handle = sp->handle;
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ els_iocb->tx_dsd_count = 1;
+ els_iocb->vp_index = vha->vp_idx;
+ els_iocb->sof_type = EST_SOFI3;
+ els_iocb->rx_dsd_count = 0;
+ els_iocb->opcode = elsio->u.els_logo.els_cmd;
+
+ els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->control_flags = 0;
+
+ els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
+ els_iocb->tx_address[0] =
+ cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
+ els_iocb->tx_address[1] =
+ cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
+ els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
+
+ els_iocb->rx_byte_count = 0;
+ els_iocb->rx_address[0] = 0;
+ els_iocb->rx_address[1] = 0;
+ els_iocb->rx_len = 0;
+
+ sp->fcport->vha->qla_stats.control_requests++;
+}
+
+static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
@@ -2623,6 +2808,9 @@ qla2x00_start_sp(srb_t *sp)
qlafx00_abort_iocb(sp, pkt) :
qla24xx_abort_iocb(sp, pkt);
break;
+ case SRB_ELS_DCMD:
+ qla24xx_els_logo_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0e59731f9..4af95479a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -18,6 +18,10 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
+static void qla_irq_affinity_notify(struct irq_affinity_notify *,
+ const cpumask_t *);
+static void qla_irq_affinity_release(struct kref *);
+
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -1418,6 +1422,12 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
case SRB_CT_CMD:
type = "ct pass-through";
break;
+ case SRB_ELS_DCMD:
+ type = "Driver ELS logo";
+ ql_dbg(ql_dbg_user, vha, 0x5047,
+ "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
+ sp->done(vha, sp, 0);
+ return;
default:
ql_dbg(ql_dbg_user, vha, 0x503e,
"Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
@@ -2542,6 +2552,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!vha->flags.online)
return;
+ if (rsp->msix->cpuid != smp_processor_id()) {
+ /* if kernel does not notify qla of IRQ's CPU change,
+ * then set it here.
+ */
+ rsp->msix->cpuid = smp_processor_id();
+ ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
+ }
+
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@@ -2587,8 +2605,14 @@ process_err:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
- /* ensure that the ATIO queue is empty */
- qlt_24xx_process_atio_queue(vha);
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ /* ensure that the ATIO queue is empty */
+ qlt_handle_abts_recv(vha, (response_t *)pkt);
+ break;
+ } else {
+ /* drop through */
+ qlt_24xx_process_atio_queue(vha, 1);
+ }
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case NOTIFY_ACK_TYPE:
@@ -2755,13 +2779,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
- case INTR_ATIO_QUE_UPDATE:
- qlt_24xx_process_atio_queue(vha);
+ case INTR_ATIO_QUE_UPDATE:{
+ unsigned long flags2;
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+ qlt_24xx_process_atio_queue(vha, 1);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
break;
- case INTR_ATIO_RSP_QUE_UPDATE:
- qlt_24xx_process_atio_queue(vha);
+ }
+ case INTR_ATIO_RSP_QUE_UPDATE: {
+ unsigned long flags2;
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+ qlt_24xx_process_atio_queue(vha, 1);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+
qla24xx_process_response_queue(vha, rsp);
break;
+ }
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2920,13 +2953,22 @@ qla24xx_msix_default(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
- case INTR_ATIO_QUE_UPDATE:
- qlt_24xx_process_atio_queue(vha);
+ case INTR_ATIO_QUE_UPDATE:{
+ unsigned long flags2;
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+ qlt_24xx_process_atio_queue(vha, 1);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
break;
- case INTR_ATIO_RSP_QUE_UPDATE:
- qlt_24xx_process_atio_queue(vha);
+ }
+ case INTR_ATIO_RSP_QUE_UPDATE: {
+ unsigned long flags2;
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+ qlt_24xx_process_atio_queue(vha, 1);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+
qla24xx_process_response_queue(vha, rsp);
break;
+ }
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2973,8 +3015,11 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- if (qentry->have_irq)
+ if (qentry->have_irq) {
+ /* un-register irq cpu affinity notification */
+ irq_set_affinity_notifier(qentry->vector, NULL);
free_irq(qentry->vector, qentry->rsp);
+ }
}
pci_disable_msix(ha->pdev);
kfree(ha->msix_entries);
@@ -3037,6 +3082,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->entry = entries[i].entry;
qentry->have_irq = 0;
qentry->rsp = NULL;
+ qentry->irq_notify.notify = qla_irq_affinity_notify;
+ qentry->irq_notify.release = qla_irq_affinity_release;
+ qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
@@ -3055,6 +3103,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
+
+ /* Register for CPU affinity notification. */
+ irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
+
+ /* Schedule work (ie. trigger a notification) to read cpu
+ * mask for this specific irq.
+ * kref_get is required because
+ * irq_affinity_notify() will do
+ * kref_put().
+ */
+ kref_get(&qentry->irq_notify.kref);
+ schedule_work(&qentry->irq_notify.work);
}
/*
@@ -3234,3 +3294,47 @@ int qla25xx_request_irq(struct rsp_que *rsp)
msix->rsp = rsp;
return ret;
}
+
+
+/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
+static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct qla_msix_entry *e =
+ container_of(notify, struct qla_msix_entry, irq_notify);
+ struct qla_hw_data *ha;
+ struct scsi_qla_host *base_vha;
+
+ /* user is recommended to set mask to just 1 cpu */
+ e->cpuid = cpumask_first(mask);
+
+ ha = e->rsp->hw;
+ base_vha = pci_get_drvdata(ha->pdev);
+
+ ql_dbg(ql_dbg_init, base_vha, 0xffff,
+ "%s: host %ld : vector %d cpu %d \n", __func__,
+ base_vha->host_no, e->vector, e->cpuid);
+
+ if (e->have_irq) {
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
+ (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
+ ha->tgt.rspq_vector_cpuid = e->cpuid;
+ ql_dbg(ql_dbg_init, base_vha, 0xffff,
+ "%s: host%ld: rspq vector %d cpu %d runtime change\n",
+ __func__, base_vha->host_no, e->vector, e->cpuid);
+ }
+ }
+}
+
+static void qla_irq_affinity_release(struct kref *ref)
+{
+ struct irq_affinity_notify *notify =
+ container_of(ref, struct irq_affinity_notify, kref);
+ struct qla_msix_entry *e =
+ container_of(notify, struct qla_msix_entry, irq_notify);
+ struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
+
+ ql_dbg(ql_dbg_init, base_vha, 0xffff,
+ "%s: host%ld: vector %d cpu %d \n", __func__,
+ base_vha->host_no, e->vector, e->cpuid);
+}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb11e04be..87e675830 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -489,6 +489,13 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
EXTENDED_BB_CREDITS);
} else
mcp->mb[4] = 0;
+
+ if (ha->flags.exlogins_enabled)
+ mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
+
+ if (ha->flags.exchoffld_enabled)
+ mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
+
mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
mcp->in_mb |= MBX_1;
} else {
@@ -521,6 +528,226 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
}
/*
+ * qla_get_exlogin_status
+ * Get extended login status
+ * uses the memory offload control/status Mailbox
+ *
+ * Input:
+ * ha: adapter state pointer.
+ * fwopt: firmware options
+ *
+ * Returns:
+ * qla2x00 local function status
+ *
+ * Context:
+ * Kernel context.
+ */
+#define FETCH_XLOGINS_STAT 0x8
+int
+qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
+ uint16_t *ex_logins_cnt)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
+ "Entered %s\n", __func__);
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
+ mcp->mb[1] = FETCH_XLOGINS_STAT;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_10|MBX_4|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
+ } else {
+ *buf_sz = mcp->mb[4];
+ *ex_logins_cnt = mcp->mb[10];
+
+ ql_log(ql_log_info, vha, 0x1190,
+ "buffer size 0x%x, exchange login count=%d\n",
+ mcp->mb[4], mcp->mb[10]);
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla_set_exlogin_mem_cfg
+ * set extended login memory configuration
+ * Mbx needs to be issues before init_cb is set
+ *
+ * Input:
+ * ha: adapter state pointer.
+ * buffer: buffer pointer
+ * phys_addr: physical address of buffer
+ * size: size of buffer
+ * TARGET_QUEUE_LOCK must be released
+ * ADAPTER_STATE_LOCK must be release
+ *
+ * Returns:
+ * qla2x00 local funxtion status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+#define CONFIG_XLOGINS_MEM 0x3
+int
+qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+ int configured_count;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
+ "Entered %s.\n", __func__);
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
+ mcp->mb[1] = CONFIG_XLOGINS_MEM;
+ mcp->mb[2] = MSW(phys_addr);
+ mcp->mb[3] = LSW(phys_addr);
+ mcp->mb[6] = MSW(MSD(phys_addr));
+ mcp->mb[7] = LSW(MSD(phys_addr));
+ mcp->mb[8] = MSW(ha->exlogin_size);
+ mcp->mb[9] = LSW(ha->exlogin_size);
+ mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_11|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
+ } else {
+ configured_count = mcp->mb[11];
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla_get_exchoffld_status
+ * Get exchange offload status
+ * uses the memory offload control/status Mailbox
+ *
+ * Input:
+ * ha: adapter state pointer.
+ * fwopt: firmware options
+ *
+ * Returns:
+ * qla2x00 local function status
+ *
+ * Context:
+ * Kernel context.
+ */
+#define FETCH_XCHOFFLD_STAT 0x2
+int
+qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
+ uint16_t *ex_logins_cnt)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
+ "Entered %s\n", __func__);
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
+ mcp->mb[1] = FETCH_XCHOFFLD_STAT;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_10|MBX_4|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
+ } else {
+ *buf_sz = mcp->mb[4];
+ *ex_logins_cnt = mcp->mb[10];
+
+ ql_log(ql_log_info, vha, 0x118e,
+ "buffer size 0x%x, exchange offload count=%d\n",
+ mcp->mb[4], mcp->mb[10]);
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla_set_exchoffld_mem_cfg
+ * Set exchange offload memory configuration
+ * Mbx needs to be issues before init_cb is set
+ *
+ * Input:
+ * ha: adapter state pointer.
+ * buffer: buffer pointer
+ * phys_addr: physical address of buffer
+ * size: size of buffer
+ * TARGET_QUEUE_LOCK must be released
+ * ADAPTER_STATE_LOCK must be release
+ *
+ * Returns:
+ * qla2x00 local funxtion status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+#define CONFIG_XCHOFFLD_MEM 0x3
+int
+qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
+ "Entered %s.\n", __func__);
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
+ mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
+ mcp->mb[2] = MSW(phys_addr);
+ mcp->mb[3] = LSW(phys_addr);
+ mcp->mb[6] = MSW(MSD(phys_addr));
+ mcp->mb[7] = LSW(MSD(phys_addr));
+ mcp->mb[8] = MSW(ha->exlogin_size);
+ mcp->mb[9] = LSW(ha->exlogin_size);
+ mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_11|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
* qla2x00_get_fw_version
* Get firmware version.
*
@@ -594,6 +821,16 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
"%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
__func__, mcp->mb[17], mcp->mb[16]);
+
+ if (ha->fw_attributes_h & 0x4)
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
+ "%s: Firmware supports Extended Login 0x%x\n",
+ __func__, ha->fw_attributes_h);
+
+ if (ha->fw_attributes_h & 0x8)
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
+ "%s: Firmware supports Exchange Offload 0x%x\n",
+ __func__, ha->fw_attributes_h);
}
if (IS_QLA27XX(ha)) {
@@ -2383,10 +2620,9 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
* Kernel context.
*/
int
-qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
- uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
- uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
+qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
@@ -2414,19 +2650,16 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
mcp->mb[11], mcp->mb[12]);
- if (cur_xchg_cnt)
- *cur_xchg_cnt = mcp->mb[3];
- if (orig_xchg_cnt)
- *orig_xchg_cnt = mcp->mb[6];
- if (cur_iocb_cnt)
- *cur_iocb_cnt = mcp->mb[7];
- if (orig_iocb_cnt)
- *orig_iocb_cnt = mcp->mb[10];
- if (vha->hw->flags.npiv_supported && max_npiv_vports)
- *max_npiv_vports = mcp->mb[11];
- if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
- IS_QLA27XX(vha->hw)) && max_fcfs)
- *max_fcfs = mcp->mb[12];
+ ha->orig_fw_tgt_xcb_count = mcp->mb[1];
+ ha->cur_fw_tgt_xcb_count = mcp->mb[2];
+ ha->cur_fw_xcb_count = mcp->mb[3];
+ ha->orig_fw_xcb_count = mcp->mb[6];
+ ha->cur_fw_iocb_count = mcp->mb[7];
+ ha->orig_fw_iocb_count = mcp->mb[10];
+ if (ha->flags.npiv_supported)
+ ha->max_npiv_vports = mcp->mb[11];
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ ha->fw_max_fcf_count = mcp->mb[12];
}
return (rval);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 38dc318c6..39f084d2b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -221,6 +221,18 @@ MODULE_PARM_DESC(ql2xmdenable,
"0 - MiniDump disabled. "
"1 (Default) - MiniDump enabled.");
+int ql2xexlogins = 0;
+module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xexlogins,
+ "Number of extended Logins. "
+ "0 (Default)- Disabled.");
+
+int ql2xexchoffld = 0;
+module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xexchoffld,
+ "Number of exchanges to offload. "
+ "0 (Default)- Disabled.");
+
/*
* SCSI host template entry points
*/
@@ -2330,6 +2342,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->tgt.enable_class_2 = ql2xenableclass2;
INIT_LIST_HEAD(&ha->tgt.q_full_list);
spin_lock_init(&ha->tgt.q_full_lock);
+ spin_lock_init(&ha->tgt.sess_lock);
+ spin_lock_init(&ha->tgt.atio_lock);
+
/* Clear our data area */
ha->bars = bars;
@@ -2474,7 +2489,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_83XX;
- rsp_length = RESPONSE_ENTRY_CNT_2300;
+ rsp_length = RESPONSE_ENTRY_CNT_83XX;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@@ -2504,8 +2519,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- req_length = REQUEST_ENTRY_CNT_24XX;
- rsp_length = RESPONSE_ENTRY_CNT_2300;
+ req_length = REQUEST_ENTRY_CNT_83XX;
+ rsp_length = RESPONSE_ENTRY_CNT_83XX;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@@ -3134,6 +3149,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha->flags.online = 0;
+ /* free DMA memory */
+ if (ha->exlogin_buf)
+ qla2x00_free_exlogin_buffer(ha);
+
+ /* free DMA memory */
+ if (ha->exchoffld_buf)
+ qla2x00_free_exchoffld_buffer(ha);
+
qla2x00_destroy_deferred_work(ha);
qlt_remove_target(ha, base_vha);
@@ -3593,6 +3616,140 @@ fail:
return -ENOMEM;
}
+int
+qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint16_t size, max_cnt, temp;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Return if we don't need to alloacate any extended logins */
+ if (!ql2xexlogins)
+ return QLA_SUCCESS;
+
+ ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
+ max_cnt = 0;
+ rval = qla_get_exlogin_status(vha, &size, &max_cnt);
+ if (rval != QLA_SUCCESS) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
+ "Failed to get exlogin status.\n");
+ return rval;
+ }
+
+ temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
+ ha->exlogin_size = (size * temp);
+ ql_log(ql_log_info, vha, 0xd024,
+ "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
+ max_cnt, size, temp);
+
+ ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n",
+ ha->exlogin_size);
+
+ /* Get consistent memory for extended logins */
+ ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
+ ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
+ if (!ha->exlogin_buf) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
+ "Failed to allocate memory for exlogin_buf_dma.\n");
+ return -ENOMEM;
+ }
+
+ /* Now configure the dma buffer */
+ rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0x00cf,
+ "Setup extended login buffer ****FAILED****.\n");
+ qla2x00_free_exlogin_buffer(ha);
+ }
+
+ return rval;
+}
+
+/*
+* qla2x00_free_exlogin_buffer
+*
+* Input:
+* ha = adapter block pointer
+*/
+void
+qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
+{
+ if (ha->exlogin_buf) {
+ dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
+ ha->exlogin_buf, ha->exlogin_buf_dma);
+ ha->exlogin_buf = NULL;
+ ha->exlogin_size = 0;
+ }
+}
+
+int
+qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint16_t size, max_cnt, temp;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Return if we don't need to alloacate any extended logins */
+ if (!ql2xexchoffld)
+ return QLA_SUCCESS;
+
+ ql_log(ql_log_info, vha, 0xd014,
+ "Exchange offload count: %d.\n", ql2xexlogins);
+
+ max_cnt = 0;
+ rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
+ if (rval != QLA_SUCCESS) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
+ "Failed to get exlogin status.\n");
+ return rval;
+ }
+
+ temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld;
+ ha->exchoffld_size = (size * temp);
+ ql_log(ql_log_info, vha, 0xd016,
+ "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
+ max_cnt, size, temp);
+
+ ql_log(ql_log_info, vha, 0xd017,
+ "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size);
+
+ /* Get consistent memory for extended logins */
+ ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
+ ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
+ if (!ha->exchoffld_buf) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+ "Failed to allocate memory for exchoffld_buf_dma.\n");
+ return -ENOMEM;
+ }
+
+ /* Now configure the dma buffer */
+ rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma);
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0xd02e,
+ "Setup exchange offload buffer ****FAILED****.\n");
+ qla2x00_free_exchoffld_buffer(ha);
+ }
+
+ return rval;
+}
+
+/*
+* qla2x00_free_exchoffld_buffer
+*
+* Input:
+* ha = adapter block pointer
+*/
+void
+qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
+{
+ if (ha->exchoffld_buf) {
+ dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
+ ha->exchoffld_buf, ha->exchoffld_buf_dma);
+ ha->exchoffld_buf = NULL;
+ ha->exchoffld_size = 0;
+ }
+}
+
/*
* qla2x00_free_fw_dump
* Frees fw dump stuff.
@@ -3772,6 +3929,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->list);
INIT_LIST_HEAD(&vha->qla_cmd_list);
INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
+ INIT_LIST_HEAD(&vha->logo_list);
+ INIT_LIST_HEAD(&vha->plogi_ack_list);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 75514a15b..ee967becd 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -100,12 +100,12 @@ enum fcp_resp_rsp_codes {
*/
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
- struct atio_from_isp *pkt);
+ struct atio_from_isp *pkt, uint8_t);
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
- *cmd, struct atio_from_isp *atio, int ha_locked);
+ *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
struct qla_tgt_srr_imm *imm, int ha_lock);
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
@@ -118,10 +118,13 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *ntfy,
uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *imm, int ha_locked);
/*
* Global Variables
*/
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+static struct kmem_cache *qla_tgt_plogi_cachep;
static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
@@ -226,8 +229,8 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
-static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
- struct atio_from_isp *atio)
+static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint8_t ha_locked)
{
ql_dbg(ql_dbg_tgt, vha, 0xe072,
"%s: qla_target(%d): type %x ox_id %04x\n",
@@ -248,7 +251,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
atio->u.isp24.fcp_hdr.d_id[2]);
break;
}
- qlt_24xx_atio_pkt(host, atio);
+ qlt_24xx_atio_pkt(host, atio, ha_locked);
break;
}
@@ -271,7 +274,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
break;
}
}
- qlt_24xx_atio_pkt(host, atio);
+ qlt_24xx_atio_pkt(host, atio, ha_locked);
break;
}
@@ -282,7 +285,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
break;
}
- return;
+ return false;
}
void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
@@ -389,6 +392,131 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
}
+/*
+ * All qlt_plogi_ack_t operations are protected by hardware_lock
+ */
+
+/*
+ * This is a zero-base ref-counting solution, since hardware_lock
+ * guarantees that ref_count is not modified concurrently.
+ * Upon successful return content of iocb is undefined
+ */
+static qlt_plogi_ack_t *
+qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
+ struct imm_ntfy_from_isp *iocb)
+{
+ qlt_plogi_ack_t *pla;
+
+ list_for_each_entry(pla, &vha->plogi_ack_list, list) {
+ if (pla->id.b24 == id->b24) {
+ qlt_send_term_imm_notif(vha, &pla->iocb, 1);
+ pla->iocb = *iocb;
+ return pla;
+ }
+ }
+
+ pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
+ if (!pla) {
+ ql_dbg(ql_dbg_async, vha, 0x5088,
+ "qla_target(%d): Allocation of plogi_ack failed\n",
+ vha->vp_idx);
+ return NULL;
+ }
+
+ pla->iocb = *iocb;
+ pla->id = *id;
+ list_add_tail(&pla->list, &vha->plogi_ack_list);
+
+ return pla;
+}
+
+static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla)
+{
+ BUG_ON(!pla->ref_count);
+ pla->ref_count--;
+
+ if (pla->ref_count)
+ return;
+
+ ql_dbg(ql_dbg_async, vha, 0x5089,
+ "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
+ " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name,
+ pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1],
+ pla->iocb.u.isp24.port_id[0],
+ le16_to_cpu(pla->iocb.u.isp24.nport_handle),
+ pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id);
+ qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0);
+
+ list_del(&pla->list);
+ kmem_cache_free(qla_tgt_plogi_cachep, pla);
+}
+
+static void
+qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla,
+ struct qla_tgt_sess *sess, qlt_plogi_link_t link)
+{
+ /* Inc ref_count first because link might already be pointing at pla */
+ pla->ref_count++;
+
+ if (sess->plogi_link[link])
+ qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
+ "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
+ " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name,
+ pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2],
+ pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0],
+ pla->ref_count);
+
+ sess->plogi_link[link] = pla;
+}
+
+typedef struct {
+ /* These fields must be initialized by the caller */
+ port_id_t id;
+ /*
+ * number of cmds dropped while we were waiting for
+ * initiator to ack LOGO initialize to 1 if LOGO is
+ * triggered by a command, otherwise, to 0
+ */
+ int cmd_count;
+
+ /* These fields are used by callee */
+ struct list_head list;
+} qlt_port_logo_t;
+
+static void
+qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
+{
+ qlt_port_logo_t *tmp;
+ int res;
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+
+ list_for_each_entry(tmp, &vha->logo_list, list) {
+ if (tmp->id.b24 == logo->id.b24) {
+ tmp->cmd_count += logo->cmd_count;
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ return;
+ }
+ }
+
+ list_add_tail(&logo->list, &vha->logo_list);
+
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ list_del(&logo->list);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
+ "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
+ logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
+ logo->cmd_count, res);
+}
+
static void qlt_free_session_done(struct work_struct *work)
{
struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
@@ -402,14 +530,21 @@ static void qlt_free_session_done(struct work_struct *work)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
- " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
+ " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
sess->logout_on_delete, sess->keep_nport_handle,
- sess->plogi_ack_needed);
+ sess->send_els_logo);
BUG_ON(!tgt);
+ if (sess->send_els_logo) {
+ qlt_port_logo_t logo;
+ logo.id = sess->s_id;
+ logo.cmd_count = 0;
+ qlt_send_first_logo(vha, &logo);
+ }
+
if (sess->logout_on_delete) {
int rc;
@@ -455,9 +590,34 @@ static void qlt_free_session_done(struct work_struct *work)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (sess->plogi_ack_needed)
- qlt_send_notify_ack(vha, &sess->tm_iocb,
- 0, 0, 0, 0, 0, 0);
+ {
+ qlt_plogi_ack_t *own =
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
+ qlt_plogi_ack_t *con =
+ sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
+
+ if (con) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
+ "se_sess %p / sess %p port %8phC is gone,"
+ " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
+ sess->se_sess, sess, sess->port_name,
+ own ? "releasing own PLOGI" :
+ "no own PLOGI pending",
+ own ? own->ref_count : -1,
+ con->iocb.u.isp24.port_name, con->ref_count);
+ qlt_plogi_ack_unref(vha, con);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
+ "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
+ sess->se_sess, sess, sess->port_name,
+ own ? "releasing own PLOGI" :
+ "no own PLOGI pending",
+ own ? own->ref_count : -1);
+ }
+
+ if (own)
+ qlt_plogi_ack_unref(vha, own);
+ }
list_del(&sess->sess_list_entry);
@@ -476,7 +636,7 @@ static void qlt_free_session_done(struct work_struct *work)
wake_up_all(&tgt->waitQ);
}
-/* ha->hardware_lock supposed to be held on entry */
+/* ha->tgt.sess_lock supposed to be held on entry */
void qlt_unreg_sess(struct qla_tgt_sess *sess)
{
struct scsi_qla_host *vha = sess->vha;
@@ -492,7 +652,7 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
}
EXPORT_SYMBOL(qlt_unreg_sess);
-/* ha->hardware_lock supposed to be held on entry */
+
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
struct qla_hw_data *ha = vha->hw;
@@ -502,12 +662,15 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
int res = 0;
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ unsigned long flags;
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
if (loop_id == 0xFFFF) {
/* Global event */
atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
#if 0 /* FIXME: do we need to choose a session here? */
if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
@@ -534,7 +697,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
sess = NULL;
#endif
} else {
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
ql_dbg(ql_dbg_tgt, vha, 0xe000,
@@ -556,7 +721,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
iocb, QLA24XX_MGMT_SEND_NACK);
}
-/* ha->hardware_lock supposed to be held on entry */
+/* ha->tgt.sess_lock supposed to be held on entry */
static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
bool immediate)
{
@@ -600,7 +765,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
sess->expires - jiffies);
}
-/* ha->hardware_lock supposed to be held on entry */
+/* ha->tgt.sess_lock supposed to be held on entry */
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
{
struct qla_tgt_sess *sess;
@@ -636,12 +801,12 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
"qla_target(%d): get_id_list() failed: %x\n",
vha->vp_idx, rc);
- res = -1;
+ res = -EBUSY;
goto out_free_id_list;
}
id_iter = (char *)gid_list;
- res = -1;
+ res = -ENOENT;
for (i = 0; i < entries; i++) {
struct gid_list_info *gid = (struct gid_list_info *)id_iter;
if ((gid->al_pa == s_id[2]) &&
@@ -660,7 +825,7 @@ out_free_id_list:
return res;
}
-/* ha->hardware_lock supposed to be held on entry */
+/* ha->tgt.sess_lock supposed to be held on entry */
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
{
BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
@@ -678,7 +843,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
struct qla_tgt_sess *sess;
unsigned long flags, elapsed;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
while (!list_empty(&tgt->del_sess_list)) {
sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
del_list_entry);
@@ -699,7 +864,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
break;
}
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
/*
@@ -717,7 +882,7 @@ static struct qla_tgt_sess *qlt_create_sess(
unsigned char be_sid[3];
/* Check to avoid double sessions */
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
sess_list_entry) {
if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
@@ -732,7 +897,7 @@ static struct qla_tgt_sess *qlt_create_sess(
/* Cannot undelete at this point */
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
- spin_unlock_irqrestore(&ha->hardware_lock,
+ spin_unlock_irqrestore(&ha->tgt.sess_lock,
flags);
return NULL;
}
@@ -749,12 +914,12 @@ static struct qla_tgt_sess *qlt_create_sess(
qlt_do_generation_tick(vha, &sess->generation);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return sess;
}
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess) {
@@ -799,7 +964,7 @@ static struct qla_tgt_sess *qlt_create_sess(
}
/*
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
- * access across ->hardware_lock reaquire.
+ * access across ->tgt.sess_lock reaquire.
*/
kref_get(&sess->se_sess->sess_kref);
@@ -807,11 +972,11 @@ static struct qla_tgt_sess *qlt_create_sess(
BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
vha->vha_tgt.qla_tgt->sess_count++;
qlt_do_generation_tick(vha, &sess->generation);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
"qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
@@ -842,23 +1007,23 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
if (qla_ini_mode_enabled(vha))
return;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (tgt->tgt_stop) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
}
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
if (!sess) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_create_sess(vha, fcport, false);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
} else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
/* Point of no return */
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
} else {
kref_get(&sess->se_sess->sess_kref);
@@ -887,7 +1052,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
sess->local = 0;
}
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
/*
@@ -899,6 +1064,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
+ unsigned long flags;
if (!vha->hw->tgt.tgt_ops)
return;
@@ -906,15 +1072,19 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
if (!tgt)
return;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
return;
}
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
if (!sess) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
return;
}
if (max_gen - sess->generation < 0) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
"Ignoring stale deletion request for se_sess %p / sess %p"
" for port %8phC, req_gen %d, sess_gen %d\n",
@@ -927,6 +1097,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
sess->local = 1;
qlt_schedule_sess_for_deletion(sess, false);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -984,10 +1155,10 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
* Lock is needed, because we still can get an incoming packet.
*/
mutex_lock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt->tgt_stop = 1;
qlt_clear_tgt_db(tgt);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
mutex_unlock(&qla_tgt_mutex);
@@ -1040,7 +1211,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
mutex_lock(&vha->vha_tgt.tgt_mutex);
spin_lock_irqsave(&ha->hardware_lock, flags);
- while (tgt->irq_cmd_count != 0) {
+ while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
udelay(2);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1309,7 +1480,7 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
if (tag == cmd->atio.u.isp24.exchange_addr) {
- cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->aborted = 1;
spin_unlock(&vha->cmd_list_lock);
return 1;
}
@@ -1351,7 +1522,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
cmd_lun = scsilun_to_int(
(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
if (cmd_key == key && cmd_lun == lun)
- cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->aborted = 1;
}
spin_unlock(&vha->cmd_list_lock);
}
@@ -1435,6 +1606,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
uint32_t tag = abts->exchange_addr_to_abort;
uint8_t s_id[3];
int rc;
+ unsigned long flags;
if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
@@ -1462,6 +1634,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
s_id[1] = abts->fcp_hdr_le.s_id[1];
s_id[2] = abts->fcp_hdr_le.s_id[0];
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
if (!sess) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
@@ -1469,12 +1642,17 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
vha->vp_idx);
rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
if (rc != 0) {
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
false);
}
return;
}
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
@@ -1560,15 +1738,15 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
+ if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
/*
- * Either a chip reset is active or this request was from
+ * Either the port is not online or this request was from
* previous life, just abort the processing.
*/
ql_dbg(ql_dbg_async, vha, 0xe100,
- "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
- qla2x00_reset_active(vha), mcmd->reset_count,
- ha->chip_reset);
+ "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
+ vha->flags.online, qla2x00_reset_active(vha),
+ mcmd->reset_count, ha->chip_reset);
ha->tgt.tgt_ops->free_mcmd(mcmd);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return;
@@ -1578,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
0, 0, 0, 0, 0, 0);
else {
- if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
else
@@ -2487,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
/* no need to terminate. FW already freed exchange. */
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
else
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return 0;
}
@@ -2510,17 +2688,22 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+ if (xmit_type == QLA_TGT_XMIT_STATUS)
+ vha->tgt_counters.core_qla_snd_status++;
+ else
+ vha->tgt_counters.core_qla_que_buf++;
+
+ if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
/*
- * Either a chip reset is active or this request was from
+ * Either the port is not online or this request was from
* previous life, just abort the processing.
*/
cmd->state = QLA_TGT_STATE_PROCESSED;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
ql_dbg(ql_dbg_async, vha, 0xe101,
- "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
- qla2x00_reset_active(vha), cmd->reset_count,
- ha->chip_reset);
+ "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
+ vha->flags.online, qla2x00_reset_active(vha),
+ cmd->reset_count, ha->chip_reset);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return 0;
}
@@ -2651,18 +2834,18 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
+ if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
(cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
/*
- * Either a chip reset is active or this request was from
+ * Either the port is not online or this request was from
* previous life, just abort the processing.
*/
cmd->state = QLA_TGT_STATE_NEED_DATA;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
ql_dbg(ql_dbg_async, vha, 0xe102,
- "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
- qla2x00_reset_active(vha), cmd->reset_count,
- ha->chip_reset);
+ "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
+ vha->flags.online, qla2x00_reset_active(vha),
+ cmd->reset_count, ha->chip_reset);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return 0;
}
@@ -2957,12 +3140,13 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ret = 1;
}
+ vha->tgt_counters.num_term_xchg_sent++;
pkt->entry_count = 1;
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
@@ -2989,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
}
static void qlt_send_term_exchange(struct scsi_qla_host *vha,
- struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
+ int ul_abort)
{
unsigned long flags = 0;
int rc;
@@ -3009,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
done:
- if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
- !cmd->cmd_sent_to_fw)) {
+ if (cmd && !ul_abort && !cmd->aborted) {
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
@@ -3028,7 +3212,7 @@ static void qlt_init_term_exchange(struct scsi_qla_host *vha)
struct qla_tgt_cmd *cmd, *tcmd;
vha->hw->tgt.leak_exchg_thresh_hold =
- (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
+ (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
cmd = tcmd = NULL;
if (!list_empty(&vha->hw->tgt.q_full_list)) {
@@ -3058,7 +3242,7 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
ql_dbg(ql_dbg_tgt, vha, 0xe079,
"Chip reset due to exchange starvation: %d/%d.\n",
- total_leaked, vha->hw->fw_xcb_count);
+ total_leaked, vha->hw->cur_fw_xcb_count);
if (IS_P3P_TYPE(vha->hw))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -3069,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
}
-void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt *tgt = cmd->tgt;
struct scsi_qla_host *vha = tgt->vha;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
"qla_target(%d): terminating exchange for aborted cmd=%p "
"(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
se_cmd->tag);
- cmd->state = QLA_TGT_STATE_ABORTED;
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ /*
+ * It's normal to see 2 calls in this path:
+ * 1) XFER Rdy completion + CMD_T_ABORT
+ * 2) TCM TMR - drain_state_list
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "multiple abort. %p transport_state %x, t_state %x,"
+ " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
+ return EIO;
+ }
+ cmd->aborted = 1;
cmd->cmd_flags |= BIT_6;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
+ return 0;
}
EXPORT_SYMBOL(qlt_abort_cmd);
@@ -3098,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->cmd_in_wq);
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(cmd->vha, cmd);
+
if (!cmd->q_full)
qlt_decr_num_pend_cmds(cmd->vha);
@@ -3215,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
term = 1;
if (term)
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
return term;
}
@@ -3300,9 +3504,6 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
ha->tgt.tgt_ops->handle_data(cmd);
return;
- } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
- ql_dbg(ql_dbg_io, vha, 0xff02,
- "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
} else {
ql_dbg(ql_dbg_io, vha, 0xff03,
"HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
@@ -3398,13 +3599,27 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
case CTIO_PORT_LOGGED_OUT:
case CTIO_PORT_UNAVAILABLE:
+ {
+ int logged_out =
+ (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
- "qla_target(%d): CTIO with PORT LOGGED "
- "OUT (29) or PORT UNAVAILABLE (28) status %x "
+ "qla_target(%d): CTIO with %s status %x "
"received (state %x, se_cmd %p)\n", vha->vp_idx,
+ logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
status, cmd->state, se_cmd);
- break;
+ if (logged_out && cmd->sess) {
+ /*
+ * Session is already logged out, but we need
+ * to notify initiator, who's not aware of this
+ */
+ cmd->sess->logout_on_delete = 0;
+ cmd->sess->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(cmd->sess, true);
+ }
+ break;
+ }
case CTIO_SRR_RECEIVED:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
"qla_target(%d): CTIO with SRR_RECEIVED"
@@ -3454,14 +3669,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
}
- /* "cmd->state == QLA_TGT_STATE_ABORTED" means
+ /* "cmd->aborted" means
* cmd is already aborted/terminated, we don't
* need to terminate again. The exchange is already
* cleaned up/freed at FW level. Just cleanup at driver
* level.
*/
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
- (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ (!cmd->aborted)) {
cmd->cmd_flags |= BIT_13;
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
return;
@@ -3479,7 +3694,7 @@ skip_term:
ha->tgt.tgt_ops->handle_data(cmd);
return;
- } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ } else if (cmd->aborted) {
cmd->cmd_flags |= BIT_18;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
@@ -3491,7 +3706,7 @@ skip_term:
}
if (unlikely(status != CTIO_SUCCESS) &&
- (cmd->state != QLA_TGT_STATE_ABORTED)) {
+ !cmd->aborted) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
dump_stack();
}
@@ -3553,13 +3768,14 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
if (tgt->tgt_stop)
goto out_term;
- if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ if (cmd->aborted) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
"cmd with tag %u is aborted\n",
cmd->atio.u.isp24.exchange_addr);
goto out_term;
}
+ spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
@@ -3589,9 +3805,9 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
/*
* Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
*/
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
out_term:
@@ -3602,12 +3818,15 @@ out_term:
*/
cmd->cmd_flags |= BIT_2;
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
- ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static void qlt_do_work(struct work_struct *work)
@@ -3692,10 +3911,8 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
goto out_term;
}
- mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has an extra creation ref. */
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (!sess)
goto out_term;
@@ -3723,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &op->atio, 1);
+ qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
@@ -3787,13 +4004,24 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
cmd->cmd_in_wq = 1;
cmd->cmd_flags |= BIT_0;
+ cmd->se_cmd.cpuid = ha->msix_count ?
+ ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
spin_lock(&vha->cmd_list_lock);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
spin_unlock(&vha->cmd_list_lock);
INIT_WORK(&cmd->work, qlt_do_work);
- queue_work(qla_tgt_wq, &cmd->work);
+ if (ha->msix_count) {
+ if (cmd->atio.u.isp24.fcp_cmnd.rddata)
+ queue_work_on(smp_processor_id(), qla_tgt_wq,
+ &cmd->work);
+ else
+ queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
+ &cmd->work);
+ } else {
+ queue_work(qla_tgt_wq, &cmd->work);
+ }
return 0;
}
@@ -3917,13 +4145,18 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
struct qla_tgt_sess *sess;
uint32_t lun, unpacked_lun;
int fn;
+ unsigned long flags;
tgt = vha->vha_tgt.qla_tgt;
lun = a->u.isp24.fcp_cmnd.lun;
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
a->u.isp24.fcp_hdr.s_id);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
if (!sess) {
@@ -3987,10 +4220,14 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess;
int loop_id;
+ unsigned long flags;
loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
if (sess == NULL) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
"qla_target(%d): task abort for unexisting "
@@ -4022,15 +4259,6 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
}
}
-static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
- struct imm_ntfy_from_isp *b)
-{
- struct imm_ntfy_from_isp tmp;
- memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
- memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
- memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
-}
-
/*
* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
*
@@ -4040,11 +4268,13 @@ static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
*/
static struct qla_tgt_sess *
qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
- port_id_t port_id, uint16_t loop_id)
+ port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess)
{
struct qla_tgt_sess *sess = NULL, *other_sess;
uint64_t other_wwn;
+ *conflict_sess = NULL;
+
list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
other_wwn = wwn_to_u64(other_sess->port_name);
@@ -4072,9 +4302,10 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
} else {
/*
* Another wwn used to have our s_id/loop_id
- * combo - kill the session, but don't log out
+ * kill the session, but don't free the loop_id
*/
- sess->logout_on_delete = 0;
+ other_sess->keep_nport_handle = 1;
+ *conflict_sess = other_sess;
qlt_schedule_sess_for_deletion(other_sess,
true);
}
@@ -4119,7 +4350,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
if (cmd_key == key) {
- cmd->state = QLA_TGT_STATE_ABORTED;
+ cmd->aborted = 1;
count++;
}
}
@@ -4136,12 +4367,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL;
+ struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL;
uint64_t wwn;
port_id_t port_id;
uint16_t loop_id;
uint16_t wd3_lo;
int res = 0;
+ qlt_plogi_ack_t *pla;
+ unsigned long flags;
wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4165,27 +4398,20 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
/* Mark all stale commands in qla_tgt_wq for deletion */
abort_cmds_for_s_id(vha, &port_id);
- if (wwn)
+ if (wwn) {
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
sess = qlt_find_sess_invalidate_other(tgt, wwn,
- port_id, loop_id);
+ port_id, loop_id, &conflict_sess);
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
+ }
- if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
+ if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) {
res = 1;
break;
}
- if (sess->plogi_ack_needed) {
- /*
- * Initiator sent another PLOGI before last PLOGI could
- * finish. Swap plogi iocbs and terminate old one
- * without acking, new one will get acked when session
- * deletion completes.
- */
- ql_log(ql_log_warn, sess->vha, 0xf094,
- "sess %p received double plogi.\n", sess);
-
- qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
-
+ pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
+ if (!pla) {
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
@@ -4194,13 +4420,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = 0;
- /*
- * Save immediate Notif IOCB for Ack when sess is done
- * and being deleted.
- */
- memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
- sess->plogi_ack_needed = 1;
+ if (conflict_sess)
+ qlt_plogi_ack_link(vha, pla, conflict_sess,
+ QLT_PLOGI_LINK_CONFLICT);
+
+ if (!sess)
+ break;
+ qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
/*
* Under normal circumstances we want to release nport handle
* during LOGO process to avoid nport handle leaks inside FW.
@@ -4227,9 +4454,21 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
case ELS_PRLI:
wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
- if (wwn)
+ if (wwn) {
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
- loop_id);
+ loop_id, &conflict_sess);
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
+ }
+
+ if (conflict_sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
+ "PRLI with conflicting sess %p port %8phC\n",
+ conflict_sess, conflict_sess->port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
if (sess != NULL) {
if (sess->deleted) {
@@ -4554,7 +4793,7 @@ out_reject:
dump_stack();
} else {
cmd->cmd_flags |= BIT_9;
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -4733,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
sctio, sctio->srr_id);
list_del(&sctio->srr_list_entry);
qlt_send_term_exchange(vha, sctio->cmd,
- &sctio->cmd->atio, 1);
+ &sctio->cmd->atio, 1, 0);
kfree(sctio);
}
}
@@ -4899,11 +5138,14 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
atio->u.isp24.fcp_hdr.s_id);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
@@ -4916,6 +5158,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
return -ENOMEM;
}
+ vha->tgt_counters.num_q_full_sent++;
pkt->entry_count = 1;
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
@@ -5129,11 +5372,12 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
- struct atio_from_isp *atio)
+ struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
+ unsigned long flags;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_io, vha, 0x3064,
@@ -5145,7 +5389,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
* Otherwise, some commands can stuck.
*/
- tgt->irq_cmd_count++;
+ tgt->atio_irq_cmd_count++;
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
@@ -5155,7 +5399,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"qla_target(%d): ATIO_TYPE7 "
"received with UNKNOWN exchange address, "
"sending QUEUE_FULL\n", vha->vp_idx);
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
break;
}
@@ -5164,7 +5412,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
rc = qlt_chk_qfull_thresh_hold(vha, atio);
if (rc != 0) {
- tgt->irq_cmd_count--;
+ tgt->atio_irq_cmd_count--;
return;
}
rc = qlt_handle_cmd_for_atio(vha, atio);
@@ -5173,11 +5421,20 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
}
if (unlikely(rc != 0)) {
if (rc == -ESRCH) {
+ if (!ha_locked)
+ spin_lock_irqsave
+ (&ha->hardware_lock, flags);
+
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
#endif
+
+ if (!ha_locked)
+ spin_unlock_irqrestore
+ (&ha->hardware_lock, flags);
+
} else {
if (tgt->tgt_stop) {
ql_dbg(ql_dbg_tgt, vha, 0xe059,
@@ -5189,7 +5446,13 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"qla_target(%d): Unable to send "
"command to target, sending BUSY "
"status.\n", vha->vp_idx);
+ if (!ha_locked)
+ spin_lock_irqsave(
+ &ha->hardware_lock, flags);
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ if (!ha_locked)
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
}
}
}
@@ -5206,7 +5469,12 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
break;
}
ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
break;
}
@@ -5217,7 +5485,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
break;
}
- tgt->irq_cmd_count--;
+ tgt->atio_irq_cmd_count--;
}
/* ha->hardware_lock supposed to be held on entry */
@@ -5277,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, 0);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
#endif
} else {
if (tgt->tgt_stop) {
@@ -5286,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
"command to target, sending TERM "
"EXCHANGE for rsp\n");
qlt_send_term_exchange(vha, NULL,
- atio, 1);
+ atio, 1, 0);
} else {
ql_dbg(ql_dbg_tgt, vha, 0xe060,
"qla_target(%d): Unable to send "
@@ -5534,12 +5802,16 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
int rc, global_resets;
uint16_t loop_id = 0;
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+
retry:
global_resets =
atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
if (rc != 0) {
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
if ((s_id[0] == 0xFF) &&
(s_id[1] == 0xFC)) {
/*
@@ -5550,17 +5822,27 @@ retry:
"Unable to find initiator with S_ID %x:%x:%x",
s_id[0], s_id[1], s_id[2]);
} else
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
+ ql_log(ql_log_info, vha, 0xf071,
"qla_target(%d): Unable to find "
"initiator with S_ID %x:%x:%x",
vha->vp_idx, s_id[0], s_id[1],
s_id[2]);
+
+ if (rc == -ENOENT) {
+ qlt_port_logo_t logo;
+ sid_to_portid(s_id, &logo.id);
+ logo.cmd_count = 1;
+ qlt_send_first_logo(vha, &logo);
+ }
+
return NULL;
}
fcport = qlt_get_port_database(vha, loop_id);
- if (!fcport)
+ if (!fcport) {
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
return NULL;
+ }
if (global_resets !=
atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
@@ -5575,6 +5857,8 @@ retry:
sess = qlt_create_sess(vha, fcport, true);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
kfree(fcport);
return sess;
}
@@ -5585,15 +5869,15 @@ static void qlt_abort_work(struct qla_tgt *tgt,
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess = NULL;
- unsigned long flags;
+ unsigned long flags = 0, flags2 = 0;
uint32_t be_s_id;
uint8_t s_id[3];
int rc;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
if (tgt->tgt_stop)
- goto out_term;
+ goto out_term2;
s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
@@ -5602,41 +5886,47 @@ static void qlt_abort_work(struct qla_tgt *tgt,
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
(unsigned char *)&be_s_id);
if (!sess) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
- mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has got an extra creation ref */
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
if (!sess)
- goto out_term;
+ goto out_term2;
} else {
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
sess = NULL;
- goto out_term;
+ goto out_term2;
}
kref_get(&sess->se_sess->sess_kref);
}
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
if (tgt->tgt_stop)
goto out_term;
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
if (rc != 0)
goto out_term;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
+out_term2:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
out_term:
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
}
static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5653,7 +5943,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
int fn;
void *iocb;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (tgt->tgt_stop)
goto out_term;
@@ -5661,14 +5951,12 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
if (!sess) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has got an extra creation ref */
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (!sess)
goto out_term;
} else {
@@ -5690,14 +5978,14 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
goto out_term;
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
out_term:
- qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
+ qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static void qlt_sess_work_fn(struct work_struct *work)
@@ -6002,6 +6290,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
if (!tgt) {
ql_dbg(ql_dbg_tgt, vha, 0xe069,
@@ -6020,6 +6309,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qla24xx_disable_vp(vha);
qla24xx_enable_vp(vha);
} else {
+ if (ha->msix_entries) {
+ ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ "%s: host%ld : vector %d cpu %d\n",
+ __func__, vha->host_no,
+ ha->msix_entries[rspq_ent].vector,
+ ha->msix_entries[rspq_ent].cpuid);
+
+ ha->tgt.rspq_vector_cpuid =
+ ha->msix_entries[rspq_ent].cpuid;
+ }
+
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
qla2x00_wait_for_hba_online(base_vha);
@@ -6131,7 +6431,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
* @ha: SCSI driver HA context
*/
void
-qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
struct atio_from_isp *pkt;
@@ -6144,7 +6444,8 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
cnt = pkt->u.raw.entry_count;
- qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+ qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
+ ha_locked);
for (i = 0; i < cnt; i++) {
ha->tgt.atio_ring_index++;
@@ -6265,10 +6566,21 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
{
struct qla_hw_data *ha = vha->hw;
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
if (ha->tgt.node_name_set) {
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
+
+ /* disable ZIO at start time. */
+ if (!vha->flags.init_done) {
+ uint32_t tmp;
+ tmp = le32_to_cpu(icb->firmware_options_2);
+ tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ icb->firmware_options_2 = cpu_to_le32(tmp);
+ }
}
void
@@ -6359,6 +6671,15 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
+
+ /* disable ZIO at start time. */
+ if (!vha->flags.init_done) {
+ uint32_t tmp;
+ tmp = le32_to_cpu(icb->firmware_options_2);
+ tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
+ icb->firmware_options_2 = cpu_to_le32(tmp);
+ }
+
}
void
@@ -6428,16 +6749,59 @@ qla83xx_msix_atio_q(int irq, void *dev_id)
ha = rsp->hw;
vha = pci_get_drvdata(ha->pdev);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
- qlt_24xx_process_atio_queue(vha);
- qla24xx_process_response_queue(vha, rsp);
+ qlt_24xx_process_atio_queue(vha, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
return IRQ_HANDLED;
}
+static void
+qlt_handle_abts_recv_work(struct work_struct *work)
+{
+ struct qla_tgt_sess_op *op = container_of(work,
+ struct qla_tgt_sess_op, work);
+ scsi_qla_host_t *vha = op->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
+ return;
+
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
+{
+ struct qla_tgt_sess_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_ATOMIC);
+
+ if (!op) {
+ /* do not reach for ATIO queue here. This is best effort err
+ * recovery at this point.
+ */
+ qlt_response_pkt_all_vps(vha, pkt);
+ return;
+ }
+
+ memcpy(&op->atio, pkt, sizeof(*pkt));
+ op->vha = vha;
+ op->chip_reset = vha->hw->chip_reset;
+ INIT_WORK(&op->work, qlt_handle_abts_recv_work);
+ queue_work(qla_tgt_wq, &op->work);
+ return;
+}
+
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
@@ -6532,13 +6896,25 @@ int __init qlt_init(void)
return -ENOMEM;
}
+ qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
+ sizeof(qlt_plogi_ack_t),
+ __alignof__(qlt_plogi_ack_t),
+ 0, NULL);
+
+ if (!qla_tgt_plogi_cachep) {
+ ql_log(ql_log_fatal, NULL, 0xe06d,
+ "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
+ ret = -ENOMEM;
+ goto out_mgmt_cmd_cachep;
+ }
+
qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
if (!qla_tgt_mgmt_cmd_mempool) {
ql_log(ql_log_fatal, NULL, 0xe06e,
"mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
ret = -ENOMEM;
- goto out_mgmt_cmd_cachep;
+ goto out_plogi_cachep;
}
qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
@@ -6555,6 +6931,8 @@ int __init qlt_init(void)
out_cmd_mempool:
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_plogi_cachep:
+ kmem_cache_destroy(qla_tgt_plogi_cachep);
out_mgmt_cmd_cachep:
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
return ret;
@@ -6567,5 +6945,6 @@ void qlt_exit(void)
destroy_workqueue(qla_tgt_wq);
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+ kmem_cache_destroy(qla_tgt_plogi_cachep);
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index bca584ae4..22a6a767f 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -787,7 +787,7 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
-#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
+
/* Special handles */
#define QLA_TGT_NULL_HANDLE 0
@@ -835,6 +835,7 @@ struct qla_tgt {
* HW lock.
*/
int irq_cmd_count;
+ int atio_irq_cmd_count;
int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
@@ -883,6 +884,7 @@ struct qla_tgt {
struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
+ uint32_t chip_reset;
struct atio_from_isp atio;
struct work_struct work;
struct list_head cmd_list;
@@ -896,6 +898,19 @@ enum qla_sess_deletion {
QLA_SESS_DELETION_IN_PROGRESS = 2,
};
+typedef enum {
+ QLT_PLOGI_LINK_SAME_WWN,
+ QLT_PLOGI_LINK_CONFLICT,
+ QLT_PLOGI_LINK_MAX
+} qlt_plogi_link_t;
+
+typedef struct {
+ struct list_head list;
+ struct imm_ntfy_from_isp iocb;
+ port_id_t id;
+ int ref_count;
+} qlt_plogi_ack_t;
+
/*
* Equivilant to IT Nexus (Initiator-Target)
*/
@@ -907,8 +922,8 @@ struct qla_tgt_sess {
unsigned int deleted:2;
unsigned int local:1;
unsigned int logout_on_delete:1;
- unsigned int plogi_ack_needed:1;
unsigned int keep_nport_handle:1;
+ unsigned int send_els_logo:1;
unsigned char logout_completed;
@@ -925,11 +940,39 @@ struct qla_tgt_sess {
uint8_t port_name[WWN_SIZE];
struct work_struct free_work;
- union {
- struct imm_ntfy_from_isp tm_iocb;
- };
+ qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
};
+typedef enum {
+ /*
+ * BIT_0 - Atio Arrival / schedule to work
+ * BIT_1 - qlt_do_work
+ * BIT_2 - qlt_do work failed
+ * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
+ * BIT_4 - read respond/tcm_qla2xx_queue_data_in
+ * BIT_5 - status respond / tcm_qla2xx_queue_status
+ * BIT_6 - tcm request to abort/Term exchange.
+ * pre_xmit_response->qlt_send_term_exchange
+ * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
+ * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
+ * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
+ * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
+
+ * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
+ * BIT_13 - Bad completion -
+ * qlt_ctio_do_completion --> qlt_term_ctio_exchange
+ * BIT_14 - Back end data received/sent.
+ * BIT_15 - SRR prepare ctio
+ * BIT_16 - complete free
+ * BIT_17 - flush - qlt_abort_cmd_on_host_reset
+ * BIT_18 - completion w/abort status
+ * BIT_19 - completion w/unknown status
+ * BIT_20 - tcm_qla2xxx_free_cmd
+ */
+ CMD_FLAG_DATA_WORK = BIT_11,
+ CMD_FLAG_DATA_WORK_FREE = BIT_21,
+} cmd_flags_t;
+
struct qla_tgt_cmd {
struct se_cmd se_cmd;
struct qla_tgt_sess *sess;
@@ -939,6 +982,7 @@ struct qla_tgt_cmd {
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+ spinlock_t cmd_lock;
/* to save extra sess dereferences */
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
@@ -949,6 +993,7 @@ struct qla_tgt_cmd {
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
+ unsigned int aborted:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -972,30 +1017,8 @@ struct qla_tgt_cmd {
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
- /* BIT_0 - Atio Arrival / schedule to work
- * BIT_1 - qlt_do_work
- * BIT_2 - qlt_do work failed
- * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
- * BIT_4 - read respond/tcm_qla2xx_queue_data_in
- * BIT_5 - status respond / tcm_qla2xx_queue_status
- * BIT_6 - tcm request to abort/Term exchange.
- * pre_xmit_response->qlt_send_term_exchange
- * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
- * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
- * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
- * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
- * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
- * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
- * BIT_13 - Bad completion -
- * qlt_ctio_do_completion --> qlt_term_ctio_exchange
- * BIT_14 - Back end data received/sent.
- * BIT_15 - SRR prepare ctio
- * BIT_16 - complete free
- * BIT_17 - flush - qlt_abort_cmd_on_host_reset
- * BIT_18 - completion w/abort status
- * BIT_19 - completion w/unknown status
- */
- uint32_t cmd_flags;
+
+ cmd_flags_t cmd_flags;
};
struct qla_tgt_sess_work_param {
@@ -1120,13 +1143,21 @@ static inline uint32_t sid_to_key(const uint8_t *s_id)
return key;
}
+static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
+{
+ memset(p, 0, sizeof(*p));
+ p->b.domain = s_id[0];
+ p->b.area = s_id[1];
+ p->b.al_pa = s_id[2];
+}
+
/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
-extern void qlt_abort_cmd(struct qla_tgt_cmd *);
+extern int qlt_abort_cmd(struct qla_tgt_cmd *);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1135,7 +1166,7 @@ extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
-extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
extern void qlt_24xx_config_rings(struct scsi_qla_host *);
extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_24xx *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 81af294f1..1808a01cf 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -284,6 +284,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
WARN_ON(cmd->cmd_flags & BIT_16);
+ cmd->vha->tgt_counters.qla_core_ret_sta_ctio++;
cmd->cmd_flags |= BIT_16;
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@@ -295,9 +296,14 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
+ cmd->vha->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
+
+ BUG_ON(cmd->cmd_flags & BIT_20);
+ cmd->cmd_flags |= BIT_20;
+
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
- queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
/*
@@ -342,9 +348,9 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
BUG_ON(!sess);
vha = sess->vha;
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
target_sess_cmd_list_set_waiting(se_sess);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
return 1;
}
@@ -358,9 +364,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
BUG_ON(!sess);
vha = sess->vha;
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
qlt_unreg_sess(sess);
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
@@ -372,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+
+ if (cmd->aborted) {
+ /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+ * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+ * already kick start the free.
+ */
+ pr_debug("write_pending aborted cmd[%p] refcount %d "
+ "transport_state %x, t_state %x, se_cmd_flags %x\n",
+ cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+ cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
+ return 0;
+ }
cmd->cmd_flags |= BIT_3;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -403,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
- 3 * HZ);
+ 50);
return 0;
}
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -442,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
if (bidi)
flags |= TARGET_SCF_BIDI_OP;
+ if (se_cmd->cpuid != WORK_CPU_UNBOUND)
+ flags |= TARGET_SCF_USE_CPUID;
+
sess = cmd->sess;
if (!sess) {
pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
@@ -454,6 +477,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
return -EINVAL;
}
+ cmd->vha->tgt_counters.qla_core_sbt_cmd++;
return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr,
data_dir, flags);
@@ -462,13 +486,26 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
*/
cmd->cmd_in_wq = 0;
- cmd->cmd_flags |= BIT_11;
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
+ if (cmd->aborted) {
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ cmd->vha->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
* Check if se_cmd has already been aborted via LUN_RESET, and
@@ -500,7 +537,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
cmd->cmd_flags |= BIT_10;
cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
- queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
@@ -542,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+ if (cmd->aborted) {
+ /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+ * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+ * already kick start the free.
+ */
+ pr_debug("queue_data_in aborted cmd[%p] refcount %d "
+ "transport_state %x, t_state %x, se_cmd_flags %x\n",
+ cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+ cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
+ return 0;
+ }
+
cmd->cmd_flags |= BIT_4;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -633,17 +684,40 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd);
}
+
+#define DATA_WORK_NOT_FREE(_flags) \
+ (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
+ CMD_FLAG_DATA_WORK)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- qlt_abort_cmd(cmd);
+ unsigned long flags;
+
+ if (qlt_abort_cmd(cmd))
+ return;
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if ((cmd->state == QLA_TGT_STATE_NEW)||
+ ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
+ DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
+
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ /* Cmd have not reached firmware.
+ * Use this trigger to free it. */
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ return;
+
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
/*
- * Expected to be called with struct qla_hw_data->hardware_lock held
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
{
@@ -697,13 +771,13 @@ static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
if (!sess)
return;
- assert_spin_locked(&sess->vha->hw->hardware_lock);
+ assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
}
static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
{
- assert_spin_locked(&sess->vha->hw->hardware_lock);
+ assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
target_sess_cmd_list_set_waiting(sess->se_sess);
}
@@ -1077,7 +1151,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
}
/*
- * Expected to be called with struct qla_hw_data->hardware_lock held
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
scsi_qla_host_t *vha,
@@ -1116,7 +1190,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
}
/*
- * Expected to be called with struct qla_hw_data->hardware_lock held
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static void tcm_qla2xxx_set_sess_by_s_id(
struct tcm_qla2xxx_lport *lport,
@@ -1182,7 +1256,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
}
/*
- * Expected to be called with struct qla_hw_data->hardware_lock held
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
scsi_qla_host_t *vha,
@@ -1221,7 +1295,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
}
/*
- * Expected to be called with struct qla_hw_data->hardware_lock held
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
static void tcm_qla2xxx_set_sess_by_loop_id(
struct tcm_qla2xxx_lport *lport,
@@ -1285,7 +1359,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
}
/*
- * Should always be called with qla_hw_data->hardware_lock held.
+ * Should always be called with qla_hw_data->tgt.sess_lock held.
*/
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
@@ -1353,7 +1427,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
struct qla_tgt_sess *sess = qla_tgt_sess;
unsigned char port_name[36];
unsigned long flags;
- int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
+ int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
TCM_QLA2XXX_DEFAULT_TAGS;
lport = vha->vha_tgt.target_lport_ptr;
@@ -1401,12 +1475,12 @@ static int tcm_qla2xxx_check_initiator_node_acl(
* And now setup the new se_nacl and session pointers into our HW lport
* mappings for fabric S_ID and LOOP_ID.
*/
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
qla_tgt_sess, s_id);
tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
qla_tgt_sess, loop_id);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
/*
* Finally register the new FC Nexus with TCM
*/