From d0b2f91bede3bd5e3d24dd6803e56eee959c1797 Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Thu, 20 Oct 2016 00:10:27 -0300 Subject: Linux-libre 4.8.2-gnu --- drivers/scsi/53c700.c | 10 +- drivers/scsi/53c700.h | 15 +- drivers/scsi/Kconfig | 19 +- drivers/scsi/Makefile | 1 + drivers/scsi/aacraid/commctrl.c | 7 +- drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 14 +- drivers/scsi/bnx2fc/bnx2fc_io.c | 2 +- drivers/scsi/bnx2i/bnx2i_hwi.c | 2 +- drivers/scsi/cxgbi/Makefile | 2 + drivers/scsi/cxgbi/cxgb3i/Kbuild | 1 + drivers/scsi/cxgbi/cxgb3i/Kconfig | 1 + drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 164 +- drivers/scsi/cxgbi/cxgb4i/Kbuild | 1 + drivers/scsi/cxgbi/cxgb4i/Kconfig | 1 + drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 203 +- drivers/scsi/cxgbi/libcxgbi.c | 734 ++---- drivers/scsi/cxgbi/libcxgbi.h | 188 +- drivers/scsi/cxlflash/main.c | 106 +- drivers/scsi/cxlflash/main.h | 6 +- drivers/scsi/cxlflash/sislite.h | 6 + drivers/scsi/fcoe/fcoe.c | 374 +-- drivers/scsi/fcoe/fcoe.h | 1 + drivers/scsi/fcoe/fcoe_ctlr.c | 245 +- drivers/scsi/fcoe/fcoe_sysfs.c | 39 + drivers/scsi/fcoe/fcoe_transport.c | 4 +- drivers/scsi/fnic/fnic_fcs.c | 14 +- drivers/scsi/fnic/fnic_fip.h | 8 - drivers/scsi/hisi_sas/hisi_sas.h | 2 +- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 73 +- drivers/scsi/hosts.c | 2 +- drivers/scsi/hpsa.c | 83 +- drivers/scsi/ibmvscsi/ibmvfc.c | 2 + drivers/scsi/ibmvscsi/ibmvfc.h | 2 +- drivers/scsi/ibmvscsi/ibmvscsi.h | 2 +- drivers/scsi/ibmvscsi_tgt/Makefile | 3 + drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 4087 ++++++++++++++++++++++++++++++ drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h | 346 +++ drivers/scsi/ibmvscsi_tgt/libsrp.c | 427 ++++ drivers/scsi/ibmvscsi_tgt/libsrp.h | 123 + drivers/scsi/ipr.c | 26 +- drivers/scsi/ipr.h | 4 + drivers/scsi/libfc/fc_exch.c | 10 +- drivers/scsi/libfc/fc_lport.c | 24 +- drivers/scsi/libfc/fc_rport.c | 49 +- drivers/scsi/libsas/sas_ata.c | 12 +- drivers/scsi/lpfc/lpfc.h | 27 +- drivers/scsi/lpfc/lpfc_attr.c | 237 +- drivers/scsi/lpfc/lpfc_attr.h | 116 + drivers/scsi/lpfc/lpfc_crtn.h | 7 +- drivers/scsi/lpfc/lpfc_ct.c | 4 + drivers/scsi/lpfc/lpfc_els.c | 290 ++- drivers/scsi/lpfc/lpfc_hw.h | 36 +- drivers/scsi/lpfc/lpfc_hw4.h | 41 +- drivers/scsi/lpfc/lpfc_ids.h | 122 + drivers/scsi/lpfc/lpfc_init.c | 292 +-- drivers/scsi/lpfc/lpfc_scsi.c | 14 +- drivers/scsi/lpfc/lpfc_scsi.h | 3 +- drivers/scsi/lpfc/lpfc_sli.c | 116 +- drivers/scsi/lpfc/lpfc_sli.h | 3 +- drivers/scsi/lpfc/lpfc_sli4.h | 4 +- drivers/scsi/lpfc/lpfc_version.h | 2 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 27 +- drivers/scsi/mpt3sas/mpt3sas_transport.c | 5 + drivers/scsi/osd/osd_initiator.c | 37 +- drivers/scsi/pm8001/pm8001_init.c | 2 +- drivers/scsi/qla2xxx/qla_attr.c | 170 +- drivers/scsi/qla2xxx/qla_bsg.c | 93 +- drivers/scsi/qla2xxx/qla_bsg.h | 13 + drivers/scsi/qla2xxx/qla_dbg.c | 50 +- drivers/scsi/qla2xxx/qla_def.h | 12 +- drivers/scsi/qla2xxx/qla_fw.h | 2 +- drivers/scsi/qla2xxx/qla_gbl.h | 5 +- drivers/scsi/qla2xxx/qla_init.c | 39 +- drivers/scsi/qla2xxx/qla_isr.c | 31 +- drivers/scsi/qla2xxx/qla_mbx.c | 123 +- drivers/scsi/qla2xxx/qla_nx.h | 1 - drivers/scsi/qla2xxx/qla_os.c | 127 +- drivers/scsi/qla2xxx/qla_target.c | 16 +- drivers/scsi/qla2xxx/qla_tmpl.c | 9 +- drivers/scsi/qla2xxx/qla_version.h | 2 +- drivers/scsi/scsi_debug.c | 93 +- drivers/scsi/scsi_devinfo.c | 4 + drivers/scsi/scsi_priv.h | 1 + drivers/scsi/scsi_transport_sas.c | 16 - drivers/scsi/sd.c | 26 +- drivers/scsi/ses.c | 5 +- drivers/scsi/snic/snic_disc.c | 4 +- drivers/scsi/snic/snic_fwint.h | 2 +- drivers/scsi/sr.c | 3 +- drivers/scsi/storvsc_drv.c | 2 + drivers/scsi/ufs/Kconfig | 16 + drivers/scsi/ufs/Makefile | 2 + drivers/scsi/ufs/tc-dwc-g210-pci.c | 181 ++ drivers/scsi/ufs/tc-dwc-g210-pltfrm.c | 113 + drivers/scsi/ufs/tc-dwc-g210.c | 319 +++ drivers/scsi/ufs/tc-dwc-g210.h | 19 + drivers/scsi/ufs/ufshcd-dwc.c | 154 ++ drivers/scsi/ufs/ufshcd-dwc.h | 26 + drivers/scsi/ufs/ufshcd-pltfrm.c | 2 +- drivers/scsi/ufs/ufshcd.c | 92 +- drivers/scsi/ufs/ufshcd.h | 7 + drivers/scsi/ufs/ufshci-dwc.h | 36 + drivers/scsi/ufs/ufshci.h | 11 + drivers/scsi/ufs/unipro.h | 39 + drivers/scsi/vmw_pvscsi.c | 2 +- drivers/scsi/vmw_pvscsi.h | 2 +- drivers/scsi/wd7000.c | 6 +- drivers/scsi/wd719x.c | 2 +- 108 files changed, 8610 insertions(+), 2098 deletions(-) create mode 100644 drivers/scsi/ibmvscsi_tgt/Makefile create mode 100644 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c create mode 100644 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h create mode 100644 drivers/scsi/ibmvscsi_tgt/libsrp.c create mode 100644 drivers/scsi/ibmvscsi_tgt/libsrp.h create mode 100644 drivers/scsi/lpfc/lpfc_attr.h create mode 100644 drivers/scsi/lpfc/lpfc_ids.h create mode 100644 drivers/scsi/ufs/tc-dwc-g210-pci.c create mode 100644 drivers/scsi/ufs/tc-dwc-g210-pltfrm.c create mode 100644 drivers/scsi/ufs/tc-dwc-g210.c create mode 100644 drivers/scsi/ufs/tc-dwc-g210.h create mode 100644 drivers/scsi/ufs/ufshcd-dwc.c create mode 100644 drivers/scsi/ufs/ufshcd-dwc.h create mode 100644 drivers/scsi/ufs/ufshci-dwc.h (limited to 'drivers/scsi') diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 3ddc85e6e..95e32a47f 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1120,9 +1120,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, "reselection is tag %d, slot %p(%d)\n", hostdata->msgin[2], slot, slot->tag); } else { - struct scsi_cmnd *SCp; + struct NCR_700_Device_Parameters *p = SDp->hostdata; + struct scsi_cmnd *SCp = p->current_cmnd; - SCp = SDp->current_cmnd; if(unlikely(SCp == NULL)) { sdev_printk(KERN_ERR, SDp, "no saved request for untagged cmd\n"); @@ -1825,9 +1825,11 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n", slot->tag, slot); } else { + struct NCR_700_Device_Parameters *p = SCp->device->hostdata; + slot->tag = SCSI_NO_TAG; /* save current command for reselection */ - SCp->device->current_cmnd = SCp; + p->current_cmnd = SCp; } /* sanity check: some of the commands generated by the mid-layer * have an eccentric idea of their sc_data_direction */ @@ -1892,7 +1894,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) slot->SG[i].ins = bS_to_host(SCRIPT_RETURN); slot->SG[i].pAddr = 0; dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); - DEBUG((" SETTING %08lx to %x\n", + DEBUG((" SETTING %p to %x\n", (&slot->pSG[i].ins), slot->SG[i].ins)); } diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h index e06bdfeab..f34c916b9 100644 --- a/drivers/scsi/53c700.h +++ b/drivers/scsi/53c700.h @@ -82,6 +82,7 @@ struct NCR_700_Device_Parameters { * cmnd[1], this could be in static storage */ unsigned char cmnd[MAX_COMMAND_SIZE]; __u8 depth; + struct scsi_cmnd *current_cmnd; /* currently active command */ }; @@ -423,23 +424,25 @@ struct NCR_700_Host_Parameters { #define script_patch_32(dev, script, symbol, value) \ { \ int i; \ + dma_addr_t da = value; \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ - __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \ + __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + da; \ (script)[A_##symbol##_used[i]] = bS_to_host(val); \ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ - DEBUG((" script, patching %s at %d to 0x%lx\n", \ - #symbol, A_##symbol##_used[i], (value))); \ + DEBUG((" script, patching %s at %d to %pad\n", \ + #symbol, A_##symbol##_used[i], &da)); \ } \ } #define script_patch_32_abs(dev, script, symbol, value) \ { \ int i; \ + dma_addr_t da = value; \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ - (script)[A_##symbol##_used[i]] = bS_to_host(value); \ + (script)[A_##symbol##_used[i]] = bS_to_host(da); \ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ - DEBUG((" script, patching %s at %d to 0x%lx\n", \ - #symbol, A_##symbol##_used[i], (value))); \ + DEBUG((" script, patching %s at %d to %pad\n", \ + #symbol, A_##symbol##_used[i], &da)); \ } \ } diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 98e5d51a3..7d1b4317e 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -838,6 +838,23 @@ config SCSI_IBMVSCSI To compile this driver as a module, choose M here: the module will be called ibmvscsi. +config SCSI_IBMVSCSIS + tristate "IBM Virtual SCSI Server support" + depends on PPC_PSERIES && TARGET_CORE && SCSI && PCI + help + This is the IBM POWER Virtual SCSI Target Server + This driver uses the SRP protocol for communication betwen servers + guest and/or the host that run on the same server. + More information on VSCSI protocol can be found at www.power.org + + The userspace configuration needed to initialize the driver can be + be found here: + + https://github.com/powervm/ibmvscsis/wiki/Configuration + + To compile this driver as a module, choose M here: the + module will be called ibmvscsis. + config SCSI_IBMVFC tristate "IBM Virtual FC support" depends on PPC_PSERIES && SCSI @@ -1433,7 +1450,7 @@ config SCSI_U14_34F_MAX_TAGS config SCSI_ULTRASTOR tristate "UltraStor SCSI support" - depends on X86 && ISA && SCSI + depends on X86 && ISA && SCSI && ISA_DMA_API ---help--- This is support for the UltraStor 14F, 24F and 34F SCSI-2 host adapter family. This driver is explained in section 3.12 of the diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 862ab4efa..d5397987e 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -128,6 +128,7 @@ obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o obj-$(CONFIG_SCSI_NSP32) += nsp32.o obj-$(CONFIG_SCSI_IPR) += ipr.o obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ +obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi_tgt/ obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_STEX) += stex.o diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index c424e8bc2..5648b715f 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -644,15 +644,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) } } else { struct user_sgmap* usg; - usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) - + sizeof(struct sgmap), GFP_KERNEL); + usg = kmemdup(upsg, + actual_fibsize - sizeof(struct aac_srb) + + sizeof(struct sgmap), GFP_KERNEL); if (!usg) { dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); rcode = -ENOMEM; goto cleanup; } - memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) - + sizeof(struct sgmap)); actual_fibsize = actual_fibsize64; for (i = 0; i < usg->count; i++) { diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index a18819939..a5052dd8d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -57,7 +57,7 @@ static struct scsi_host_template bnx2fc_shost_template; static struct fc_function_template bnx2fc_transport_function; static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ; static struct fc_function_template bnx2fc_vport_xport_function; -static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); +static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode); static void __bnx2fc_destroy(struct bnx2fc_interface *interface); static int bnx2fc_destroy(struct net_device *net_device); static int bnx2fc_enable(struct net_device *netdev); @@ -486,7 +486,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, __skb_queue_tail(&bg->fcoe_rx_list, skb); if (bg->fcoe_rx_list.qlen == 1) - wake_up_process(bg->thread); + wake_up_process(bg->kthread); spin_unlock(&bg->fcoe_rx_list.lock); @@ -2260,7 +2260,7 @@ enum bnx2fc_create_link_state { * Returns: 0 for success */ static int _bnx2fc_create(struct net_device *netdev, - enum fip_state fip_mode, + enum fip_mode fip_mode, enum bnx2fc_create_link_state link_state) { struct fcoe_ctlr_device *cdev; @@ -2412,7 +2412,7 @@ mod_err: * * Returns: 0 for success */ -static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) +static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode) { return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP); } @@ -2715,7 +2715,7 @@ static int __init bnx2fc_mod_init(void) } wake_up_process(l2_thread); spin_lock_bh(&bg->fcoe_rx_list.lock); - bg->thread = l2_thread; + bg->kthread = l2_thread; spin_unlock_bh(&bg->fcoe_rx_list.lock); for_each_possible_cpu(cpu) { @@ -2788,8 +2788,8 @@ static void __exit bnx2fc_mod_exit(void) /* Destroy global thread */ bg = &bnx2fc_global; spin_lock_bh(&bg->fcoe_rx_list.lock); - l2_thread = bg->thread; - bg->thread = NULL; + l2_thread = bg->kthread; + bg->kthread = NULL; while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) kfree_skb(skb); diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 026f394a3..8f24d60f0 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1758,7 +1758,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { /* Only for task management function */ io_req->fcp_rsp_code = rq_data[3]; - printk(KERN_ERR PFX "fcp_rsp_code = %d\n", + BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); } diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index fb072cc5e..42921dbba 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -2417,7 +2417,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); if (!ep) { printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending " - "offload request, unexpected complection\n"); + "offload request, unexpected completion\n"); return; } diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile index 86007e344..a73781ac1 100644 --- a/drivers/scsi/cxgbi/Makefile +++ b/drivers/scsi/cxgbi/Makefile @@ -1,2 +1,4 @@ +ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb + obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/ diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild index 961a12f6d..663c52e05 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kbuild +++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild @@ -1,3 +1,4 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3 +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig index e4603985d..f68c871b1 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig @@ -5,6 +5,7 @@ config SCSI_CXGB3_ISCSI select ETHERNET select NET_VENDOR_CHELSIO select CHELSIO_T3 + select CHELSIO_LIB select SCSI_ISCSI_ATTRS ---help--- This driver supports iSCSI offload for the Chelsio T3 devices. diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index e22a268fd..33e83464e 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -1028,7 +1028,7 @@ cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { * cxgb3i_ofld_init - allocate and initialize resources for each adapter found * @cdev: cxgbi adapter */ -int cxgb3i_ofld_init(struct cxgbi_device *cdev) +static int cxgb3i_ofld_init(struct cxgbi_device *cdev) { struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; struct adap_ports port; @@ -1076,64 +1076,69 @@ static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | V_ULPTX_CMD(ULP_MEM_WRITE)); - req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | - V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); + req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) | + V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1)); } -static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, - unsigned int idx, unsigned int npods, - struct cxgbi_gather_list *gl) +static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) { - struct cxgbi_device *cdev = csk->cdev; - struct cxgbi_ddp_info *ddp = cdev->ddp; - unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; - int i; + return ((struct t3cdev *)cdev->lldev)->ulp_iscsi; +} - log_debug(1 << CXGBI_DBG_DDP, - "csk 0x%p, idx %u, npods %u, gl 0x%p.\n", - csk, idx, npods, gl); +static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo) +{ + unsigned int idx = ttinfo->idx; + unsigned int npods = ttinfo->npods; + struct scatterlist *sg = ttinfo->sgl; + struct cxgbi_pagepod *ppod; + struct ulp_mem_io *req; + unsigned int sg_off; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + int i; - for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { + for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) { struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + - PPOD_SIZE, 0, GFP_ATOMIC); + IPPOD_SIZE, 0, GFP_ATOMIC); if (!skb) return -ENOMEM; - ulp_mem_io_set_hdr(skb, pm_addr); - cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + - sizeof(struct ulp_mem_io)), - hdr, gl, i * PPOD_PAGES_MAX); + req = (struct ulp_mem_io *)skb->head; + ppod = (struct cxgbi_pagepod *)(req + 1); + sg_off = i * PPOD_PAGES_MAX; + cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg, + &sg_off); skb->priority = CPL_PRIORITY_CONTROL; - cxgb3_ofld_send(cdev->lldev, skb); + cxgb3_ofld_send(ppm->lldev, skb); } return 0; } -static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, - unsigned int idx, unsigned int npods) +static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm, + struct cxgbi_task_tag_info *ttinfo) { - struct cxgbi_device *cdev = chba->cdev; - struct cxgbi_ddp_info *ddp = cdev->ddp; - unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; + unsigned int idx = ttinfo->idx; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + unsigned int npods = ttinfo->npods; int i; log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n", - cdev, idx, npods, tag); + "cdev 0x%p, clear idx %u, npods %u.\n", + cdev, idx, npods); - for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { + for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) { struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + - PPOD_SIZE, 0, GFP_ATOMIC); + IPPOD_SIZE, 0, GFP_ATOMIC); if (!skb) { - pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n", - tag, idx, i, npods); + pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n", + cdev, idx, i, npods); continue; } ulp_mem_io_set_hdr(skb, pm_addr); skb->priority = CPL_PRIORITY_CONTROL; - cxgb3_ofld_send(cdev->lldev, skb); + cxgb3_ofld_send(ppm->lldev, skb); } } @@ -1203,82 +1208,68 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, } /** - * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource - * @cdev: cxgb3i adapter - * release all the resource held by the ddp pagepod manager for a given - * adapter if needed - */ - -static void t3_ddp_cleanup(struct cxgbi_device *cdev) -{ - struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; - - if (cxgbi_ddp_cleanup(cdev)) { - pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev); - tdev->ulp_iscsi = NULL; - } -} - -/** - * ddp_init - initialize the cxgb3 adapter's ddp resource + * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource * @cdev: cxgb3i adapter * initialize the ddp pagepod manager for a given adapter */ static int cxgb3i_ddp_init(struct cxgbi_device *cdev) { struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; - struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; + struct net_device *ndev = cdev->ports[0]; + struct cxgbi_tag_format tformat; + unsigned int ppmax, tagmask = 0; struct ulp_iscsi_info uinfo; - unsigned int pgsz_factor[4]; int i, err; - if (ddp) { - kref_get(&ddp->refcnt); - pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n", - tdev, tdev->ulp_iscsi); - cdev->ddp = ddp; - return -EALREADY; - } - err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); if (err < 0) { - pr_err("%s, failed to get iscsi param err=%d.\n", - tdev->name, err); + pr_err("%s, failed to get iscsi param %d.\n", + ndev->name, err); return err; } + if (uinfo.llimit >= uinfo.ulimit) { + pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n", + ndev->name, uinfo.llimit, uinfo.ulimit); + return -EACCES; + } - err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit, - uinfo.max_txsz, uinfo.max_rxsz); - if (err < 0) - return err; + ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT; + tagmask = cxgbi_tagmask_set(ppmax); - ddp = cdev->ddp; + pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n", + ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask, + tagmask); - uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; - cxgbi_ddp_page_size_factor(pgsz_factor); + memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); for (i = 0; i < 4; i++) - uinfo.pgsz_factor[i] = pgsz_factor[i]; - uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); + tformat.pgsz_order[i] = uinfo.pgsz_factor[i]; + cxgbi_tagmask_check(tagmask, &tformat); - err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); - if (err < 0) { - pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n", - tdev->name, err); - cxgbi_ddp_cleanup(cdev); - return err; + cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat, ppmax, + uinfo.llimit, uinfo.llimit, 0); + if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) { + uinfo.tagmask = tagmask; + uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT); + + err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); + if (err < 0) { + pr_err("T3 %s fail to set iscsi param %d.\n", + ndev->name, err); + cdev->flags |= CXGBI_FLAG_DDP_OFF; + } + err = 0; } - tdev->ulp_iscsi = ddp; cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; - cdev->csk_ddp_set = ddp_set_map; - cdev->csk_ddp_clear = ddp_clear_map; - - pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " - "%u/%u.\n", - tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, - ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, - ddp->max_rxsz, uinfo.max_rxsz); + cdev->csk_ddp_set_map = ddp_set_map; + cdev->csk_ddp_clear_map = ddp_clear_map; + cdev->cdev2ppm = cdev2ppm; + cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); + return 0; } @@ -1325,7 +1316,6 @@ static void cxgb3i_dev_open(struct t3cdev *t3dev) cdev->rx_credit_thres = cxgb3i_rx_credit_thres; cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); - cdev->dev_ddp_cleanup = t3_ddp_cleanup; cdev->itp = &cxgb3i_iscsi_transport; err = cxgb3i_ddp_init(cdev); diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild index 374586437..38e03c280 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kbuild +++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild @@ -1,3 +1,4 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 +ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig index 8c4e42303..594f593c8 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig @@ -5,6 +5,7 @@ config SCSI_CXGB4_ISCSI select ETHERNET select NET_VENDOR_CHELSIO select CHELSIO_T4 + select CHELSIO_LIB select SCSI_ISCSI_ATTRS ---help--- This driver supports iSCSI offload for the Chelsio T4 devices. diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 339f6b7f4..e4ba2d261 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1503,7 +1503,7 @@ rel_resource_without_clip: return -EINVAL; } -cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { +static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = do_act_establish, [CPL_ACT_OPEN_RPL] = do_act_open_rpl, [CPL_PEER_CLOSE] = do_peer_close, @@ -1519,7 +1519,7 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_RX_DATA] = do_rx_data, }; -int cxgb4i_ofld_init(struct cxgbi_device *cdev) +static int cxgb4i_ofld_init(struct cxgbi_device *cdev) { int rc; @@ -1543,24 +1543,22 @@ int cxgb4i_ofld_init(struct cxgbi_device *cdev) return 0; } -/* - * functions to program the pagepod in h/w - */ -#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ -static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, - struct ulp_mem_io *req, - unsigned int wr_len, unsigned int dlen, - unsigned int pm_addr) +static inline void +ulp_mem_io_set_hdr(struct cxgbi_device *cdev, + struct ulp_mem_io *req, + unsigned int wr_len, unsigned int dlen, + unsigned int pm_addr, + int tid) { + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); - INIT_ULPTX_WR(req, wr_len, 0, 0); - if (is_t4(lldi->adapter_type)) - req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | - (ULP_MEMIO_ORDER_F)); - else - req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | - (T5_ULP_MEMIO_IMM_F)); + INIT_ULPTX_WR(req, wr_len, 0, tid); + req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | + FW_WR_ATOMIC_V(0)); + req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | + ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | + T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); @@ -1569,82 +1567,89 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, idata->len = htonl(dlen); } -static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, - struct cxgbi_pagepod_hdr *hdr, unsigned int idx, - unsigned int npods, - struct cxgbi_gather_list *gl, - unsigned int gl_pidx) +static struct sk_buff * +ddp_ppod_init_idata(struct cxgbi_device *cdev, + struct cxgbi_ppm *ppm, + unsigned int idx, unsigned int npods, + unsigned int tid) { - struct cxgbi_ddp_info *ddp = cdev->ddp; - struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); - struct sk_buff *skb; - struct ulp_mem_io *req; - struct ulptx_idata *idata; - struct cxgbi_pagepod *ppod; - unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit; - unsigned int dlen = PPOD_SIZE * npods; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; + unsigned int dlen = npods << PPOD_SIZE_SHIFT; unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_idata) + dlen, 16); - unsigned int i; + struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC); - skb = alloc_wr(wr_len, 0, GFP_ATOMIC); if (!skb) { - pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", - cdev, idx, npods); - return -ENOMEM; + pr_err("%s: %s idx %u, npods %u, OOM.\n", + __func__, ppm->ndev->name, idx, npods); + return NULL; } - req = (struct ulp_mem_io *)skb->head; - set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); - ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); + ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, + pm_addr, tid); + + return skb; +} + +static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo, + unsigned int idx, unsigned int npods, + struct scatterlist **sg_pp, + unsigned int *sg_off) +{ + struct cxgbi_device *cdev = csk->cdev; + struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, + csk->tid); + struct ulp_mem_io *req; + struct ulptx_idata *idata; + struct cxgbi_pagepod *ppod; + int i; + + if (!skb) + return -ENOMEM; + + req = (struct ulp_mem_io *)skb->head; idata = (struct ulptx_idata *)(req + 1); ppod = (struct cxgbi_pagepod *)(idata + 1); - for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { - if (!hdr && !gl) - cxgbi_ddp_ppod_clear(ppod); - else - cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx); - } + for (i = 0; i < npods; i++, ppod++) + cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); + + cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE); + cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL); + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + + spin_lock_bh(&csk->lock); + cxgbi_sock_skb_entail(csk, skb); + spin_unlock_bh(&csk->lock); - cxgb4_ofld_send(cdev->ports[port_id], skb); return 0; } -static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, - unsigned int idx, unsigned int npods, - struct cxgbi_gather_list *gl) +static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, + struct cxgbi_task_tag_info *ttinfo) { + unsigned int pidx = ttinfo->idx; + unsigned int npods = ttinfo->npods; unsigned int i, cnt; int err = 0; + struct scatterlist *sg = ttinfo->sgl; + unsigned int offset = 0; - for (i = 0; i < npods; i += cnt, idx += cnt) { - cnt = npods - i; - if (cnt > ULPMEM_IDATA_MAX_NPPODS) - cnt = ULPMEM_IDATA_MAX_NPPODS; - err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, - idx, cnt, gl, 4 * i); - if (err < 0) - break; - } - return err; -} - -static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, - unsigned int idx, unsigned int npods) -{ - unsigned int i, cnt; - int err; + ttinfo->cid = csk->port_id; - for (i = 0; i < npods; i += cnt, idx += cnt) { + for (i = 0; i < npods; i += cnt, pidx += cnt) { cnt = npods - i; + if (cnt > ULPMEM_IDATA_MAX_NPPODS) cnt = ULPMEM_IDATA_MAX_NPPODS; - err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL, - idx, cnt, NULL, 0); + err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, + &sg, &offset); if (err < 0) break; } + + return err; } static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, @@ -1710,48 +1715,46 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, return 0; } +static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) +{ + return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) + (cxgbi_cdev_priv(cdev)))->iscsi_ppm); +} + static int cxgb4i_ddp_init(struct cxgbi_device *cdev) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); - struct cxgbi_ddp_info *ddp = cdev->ddp; - unsigned int tagmask, pgsz_factor[4]; - int err; - - if (ddp) { - kref_get(&ddp->refcnt); - pr_warn("cdev 0x%p, ddp 0x%p already set up.\n", - cdev, cdev->ddp); - return -EALREADY; + struct net_device *ndev = cdev->ports[0]; + struct cxgbi_tag_format tformat; + unsigned int ppmax; + int i; + + if (!lldi->vr->iscsi.size) { + pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); + return -EACCES; } - err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start, - lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1, - lldi->iscsi_iolen, lldi->iscsi_iolen); - if (err < 0) - return err; + cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; + ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT; - ddp = cdev->ddp; + memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); + for (i = 0; i < 4; i++) + tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) + & 0xF; + cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); - tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; - cxgbi_ddp_page_size_factor(pgsz_factor); - cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); + cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax, + lldi->iscsi_llimit, lldi->vr->iscsi.start, 2); cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; - cdev->csk_ddp_set = ddp_set_map; - cdev->csk_ddp_clear = ddp_clear_map; - - pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n", - cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits, - cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask); - pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " - " %u/%u.\n", - cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, - ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen, - ddp->max_rxsz, lldi->iscsi_iolen); - pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n", - cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size, - ddp->max_rxsz); + cdev->csk_ddp_set_map = ddp_set_map; + cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->cdev2ppm = cdev2ppm; + return 0; } diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index ead83a24b..d1421139e 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -64,6 +64,14 @@ static DEFINE_MUTEX(cdev_mutex); static LIST_HEAD(cdev_rcu_list); static DEFINE_SPINLOCK(cdev_rcu_lock); +static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age) +{ + if (age) + *age = sw_tag & 0x7FFF; + if (idx) + *idx = (sw_tag >> 16) & 0x7FFF; +} + int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, unsigned int max_conn) { @@ -113,12 +121,7 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) "cdev 0x%p, p# %u.\n", cdev, cdev->nports); cxgbi_hbas_remove(cdev); cxgbi_device_portmap_cleanup(cdev); - if (cdev->dev_ddp_cleanup) - cdev->dev_ddp_cleanup(cdev); - else - cxgbi_ddp_cleanup(cdev); - if (cdev->ddp) - cxgbi_ddp_cleanup(cdev); + cxgbi_ppm_release(cdev->cdev2ppm(cdev)); if (cdev->pmap.max_connect) cxgbi_free_big_mem(cdev->pmap.port_csk); kfree(cdev); @@ -1182,525 +1185,245 @@ out_err: goto done; } -/* - * Direct Data Placement - - * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted - * final destination host-memory buffers based on the Initiator Task Tag (ITT) - * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. - * The host memory address is programmed into h/w in the format of pagepod - * entries. - * The location of the pagepod entry is encoded into ddp tag which is used as - * the base for ITT/TTT. - */ - -static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; -static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; -static unsigned char page_idx = DDP_PGIDX_MAX; - -static unsigned char sw_tag_idx_bits; -static unsigned char sw_tag_age_bits; - -/* - * Direct-Data Placement page size adjustment - */ -static int ddp_adjust_page_table(void) +static inline void +scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl, + unsigned int *sgcnt, unsigned int *dlen, + unsigned int prot) { - int i; - unsigned int base_order, order; - - if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { - pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", - PAGE_SIZE, 1UL << ddp_page_shift[0]); - return -EINVAL; - } - - base_order = get_order(1UL << ddp_page_shift[0]); - order = get_order(1UL << PAGE_SHIFT); + struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc); - for (i = 0; i < DDP_PGIDX_MAX; i++) { - /* first is the kernel page size, then just doubling */ - ddp_page_order[i] = order - base_order + i; - ddp_page_shift[i] = PAGE_SHIFT + i; - } - return 0; + *sgl = sdb->table.sgl; + *sgcnt = sdb->table.nents; + *dlen = sdb->length; + /* Caution: for protection sdb, sdb->length is invalid */ } -static int ddp_find_page_index(unsigned long pgsz) +void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod, + struct cxgbi_task_tag_info *ttinfo, + struct scatterlist **sg_pp, unsigned int *sg_off) { + struct scatterlist *sg = sg_pp ? *sg_pp : NULL; + unsigned int offset = sg_off ? *sg_off : 0; + dma_addr_t addr = 0UL; + unsigned int len = 0; int i; - for (i = 0; i < DDP_PGIDX_MAX; i++) { - if (pgsz == (1UL << ddp_page_shift[i])) - return i; - } - pr_info("ddp page size %lu not supported.\n", pgsz); - return DDP_PGIDX_MAX; -} + memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); -static void ddp_setup_host_page_size(void) -{ - if (page_idx == DDP_PGIDX_MAX) { - page_idx = ddp_find_page_index(PAGE_SIZE); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } - if (page_idx == DDP_PGIDX_MAX) { - pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); - if (ddp_adjust_page_table() < 0) { - pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); - return; + for (i = 0; i < PPOD_PAGES_MAX; i++) { + if (sg) { + ppod->addr[i] = cpu_to_be64(addr + offset); + offset += PAGE_SIZE; + if (offset == (len + sg->offset)) { + offset = 0; + sg = sg_next(sg); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + } } - page_idx = ddp_find_page_index(PAGE_SIZE); + } else { + ppod->addr[i] = 0ULL; } - pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); } -} - -void cxgbi_ddp_page_size_factor(int *pgsz_factor) -{ - int i; - - for (i = 0; i < DDP_PGIDX_MAX; i++) - pgsz_factor[i] = ddp_page_order[i]; -} -EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); - -/* - * DDP setup & teardown - */ - -void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, - struct cxgbi_pagepod_hdr *hdr, - struct cxgbi_gather_list *gl, unsigned int gidx) -{ - int i; - - memcpy(ppod, hdr, sizeof(*hdr)); - for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { - ppod->addr[i] = gidx < gl->nelem ? - cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; - } -} -EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); - -void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) -{ - memset(ppod, 0, sizeof(*ppod)); -} -EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); - -static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, - unsigned int start, unsigned int max, - unsigned int count, - struct cxgbi_gather_list *gl) -{ - unsigned int i, j, k; - /* not enough entries */ - if ((max - start) < count) { - log_debug(1 << CXGBI_DBG_DDP, - "NOT enough entries %u+%u < %u.\n", start, count, max); - return -EBUSY; + /* + * the fifth address needs to be repeated in the next ppod, so do + * not move sg + */ + if (sg_pp) { + *sg_pp = sg; + *sg_off = offset; } - max -= count; - spin_lock(&ddp->map_lock); - for (i = start; i < max;) { - for (j = 0, k = i; j < count; j++, k++) { - if (ddp->gl_map[k]) - break; - } - if (j == count) { - for (j = 0, k = i; j < count; j++, k++) - ddp->gl_map[k] = gl; - spin_unlock(&ddp->map_lock); - return i; + if (offset == len) { + offset = 0; + sg = sg_next(sg); + if (sg) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); } - i += j + 1; } - spin_unlock(&ddp->map_lock); - log_debug(1 << CXGBI_DBG_DDP, - "NO suitable entries %u available.\n", count); - return -EBUSY; -} - -static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, - int start, int count) -{ - spin_lock(&ddp->map_lock); - memset(&ddp->gl_map[start], 0, - count * sizeof(struct cxgbi_gather_list *)); - spin_unlock(&ddp->map_lock); + ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; } +EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod); -static inline void ddp_gl_unmap(struct pci_dev *pdev, - struct cxgbi_gather_list *gl) -{ - int i; +/* + * APIs interacting with open-iscsi libraries + */ - for (i = 0; i < gl->nelem; i++) - dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, - PCI_DMA_FROMDEVICE); -} +static unsigned char padding[4]; -static inline int ddp_gl_map(struct pci_dev *pdev, - struct cxgbi_gather_list *gl) +void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, + struct cxgbi_tag_format *tformat, unsigned int ppmax, + unsigned int llimit, unsigned int start, + unsigned int rsvd_factor) { - int i; + int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev, + cdev->lldev, tformat, ppmax, llimit, start, + rsvd_factor); - for (i = 0; i < gl->nelem; i++) { - gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, - PAGE_SIZE, - PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { - log_debug(1 << CXGBI_DBG_DDP, - "page %d 0x%p, 0x%p dma mapping err.\n", - i, gl->pages[i], pdev); - goto unmap; - } - } - return i; -unmap: - if (i) { - unsigned int nelem = gl->nelem; + if (err >= 0) { + struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); - gl->nelem = i; - ddp_gl_unmap(pdev, gl); - gl->nelem = nelem; + if (ppm->ppmax < 1024 || + ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) + cdev->flags |= CXGBI_FLAG_DDP_OFF; + err = 0; + } else { + cdev->flags |= CXGBI_FLAG_DDP_OFF; } - return -EINVAL; -} - -static void ddp_release_gl(struct cxgbi_gather_list *gl, - struct pci_dev *pdev) -{ - ddp_gl_unmap(pdev, gl); - kfree(gl); } +EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup); -static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, - struct scatterlist *sgl, - unsigned int sgcnt, - struct pci_dev *pdev, - gfp_t gfp) +static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents) { - struct cxgbi_gather_list *gl; + int i; + int last_sgidx = nents - 1; struct scatterlist *sg = sgl; - struct page *sgpage = sg_page(sg); - unsigned int sglen = sg->length; - unsigned int sgoffset = sg->offset; - unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> - PAGE_SHIFT; - int i = 1, j = 0; - - if (xferlen < DDP_THRESHOLD) { - log_debug(1 << CXGBI_DBG_DDP, - "xfer %u < threshold %u, no ddp.\n", - xferlen, DDP_THRESHOLD); - return NULL; - } - - gl = kzalloc(sizeof(struct cxgbi_gather_list) + - npages * (sizeof(dma_addr_t) + - sizeof(struct page *)), gfp); - if (!gl) { - log_debug(1 << CXGBI_DBG_DDP, - "xfer %u, %u pages, OOM.\n", xferlen, npages); - return NULL; - } - log_debug(1 << CXGBI_DBG_DDP, - "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); - - gl->pages = (struct page **)&gl->phys_addr[npages]; - gl->nelem = npages; - gl->length = xferlen; - gl->offset = sgoffset; - gl->pages[0] = sgpage; - - for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; - i++, sg = sg_next(sg)) { - struct page *page = sg_page(sg); - - if (sgpage == page && sg->offset == sgoffset + sglen) - sglen += sg->length; - else { - /* make sure the sgl is fit for ddp: - * each has the same page size, and - * all of the middle pages are used completely - */ - if ((j && sgoffset) || ((i != sgcnt - 1) && - ((sglen + sgoffset) & ~PAGE_MASK))) { - log_debug(1 << CXGBI_DBG_DDP, - "page %d/%u, %u + %u.\n", - i, sgcnt, sgoffset, sglen); - goto error_out; - } + for (i = 0; i < nents; i++, sg = sg_next(sg)) { + unsigned int len = sg->length + sg->offset; - j++; - if (j == gl->nelem || sg->offset) { - log_debug(1 << CXGBI_DBG_DDP, - "page %d/%u, offset %u.\n", - j, gl->nelem, sg->offset); - goto error_out; - } - gl->pages[j] = page; - sglen = sg->length; - sgoffset = sg->offset; - sgpage = page; - } - } - gl->nelem = ++j; - - if (ddp_gl_map(pdev, gl) < 0) - goto error_out; - - return gl; - -error_out: - kfree(gl); - return NULL; -} - -static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) -{ - struct cxgbi_device *cdev = chba->cdev; - struct cxgbi_ddp_info *ddp = cdev->ddp; - u32 idx; - - idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; - if (idx < ddp->nppods) { - struct cxgbi_gather_list *gl = ddp->gl_map[idx]; - unsigned int npods; - - if (!gl || !gl->nelem) { - pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", - tag, idx, gl, gl ? gl->nelem : 0); - return; - } - npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; - log_debug(1 << CXGBI_DBG_DDP, - "tag 0x%x, release idx %u, npods %u.\n", - tag, idx, npods); - cdev->csk_ddp_clear(chba, tag, idx, npods); - ddp_unmark_entries(ddp, idx, npods); - ddp_release_gl(gl, ddp->pdev); - } else - pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); -} - -static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, - u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, - gfp_t gfp) -{ - struct cxgbi_device *cdev = csk->cdev; - struct cxgbi_ddp_info *ddp = cdev->ddp; - struct cxgbi_tag_format *tformat = &cdev->tag_format; - struct cxgbi_pagepod_hdr hdr; - unsigned int npods; - int idx = -1; - int err = -ENOMEM; - u32 tag; - - npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; - if (ddp->idx_last == ddp->nppods) - idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, - npods, gl); - else { - idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, - ddp->nppods, npods, - gl); - if (idx < 0 && ddp->idx_last >= npods) { - idx = ddp_find_unused_entries(ddp, 0, - min(ddp->idx_last + npods, ddp->nppods), - npods, gl); + if ((sg->offset & 0x3) || (i && sg->offset) || + ((i != last_sgidx) && len != PAGE_SIZE)) { + log_debug(1 << CXGBI_DBG_DDP, + "sg %u/%u, %u,%u, not aligned.\n", + i, nents, sg->offset, sg->length); + goto err_out; } } - if (idx < 0) { - log_debug(1 << CXGBI_DBG_DDP, - "xferlen %u, gl %u, npods %u NO DDP.\n", - gl->length, gl->nelem, npods); - return idx; - } - - tag = cxgbi_ddp_tag_base(tformat, sw_tag); - tag |= idx << PPOD_IDX_SHIFT; - - hdr.rsvd = 0; - hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); - hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); - hdr.max_offset = htonl(gl->length); - hdr.page_offset = htonl(gl->offset); - - err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); - if (err < 0) - goto unmark_entries; - - ddp->idx_last = idx; - log_debug(1 << CXGBI_DBG_DDP, - "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", - gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, - npods); - *tagp = tag; return 0; - -unmark_entries: - ddp_unmark_entries(ddp, idx, npods); - return err; +err_out: + return -EINVAL; } -int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, - unsigned int sw_tag, unsigned int xferlen, - struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) +static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn, + struct cxgbi_task_data *tdata, u32 sw_tag, + unsigned int xferlen) { + struct cxgbi_sock *csk = cconn->cep->csk; struct cxgbi_device *cdev = csk->cdev; - struct cxgbi_tag_format *tformat = &cdev->tag_format; - struct cxgbi_gather_list *gl; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + struct scatterlist *sgl = ttinfo->sgl; + unsigned int sgcnt = ttinfo->nents; + unsigned int sg_offset = sgl->offset; int err; - if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || - xferlen < DDP_THRESHOLD) { + if (cdev->flags & CXGBI_FLAG_DDP_OFF) { log_debug(1 << CXGBI_DBG_DDP, - "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); + "cdev 0x%p DDP off.\n", cdev); return -EINVAL; } - if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { + if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt || + ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) { log_debug(1 << CXGBI_DBG_DDP, - "sw_tag 0x%x NOT usable.\n", sw_tag); + "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", + ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX, + xferlen, ttinfo->nents); return -EINVAL; } - gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); - if (!gl) - return -ENOMEM; + /* make sure the buffer is suitable for ddp */ + if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0) + return -EINVAL; - err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); - if (err < 0) - ddp_release_gl(gl, cdev->pdev); + ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >> + PAGE_SHIFT; - return err; -} + /* + * the ddp tag will be used for the itt in the outgoing pdu, + * the itt genrated by libiscsi is saved in the ppm and can be + * retrieved via the ddp tag + */ + err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, + &ttinfo->tag, (unsigned long)sw_tag); + if (err < 0) { + cconn->ddp_full++; + return err; + } + ttinfo->npods = err; -static void ddp_destroy(struct kref *kref) -{ - struct cxgbi_ddp_info *ddp = container_of(kref, - struct cxgbi_ddp_info, - refcnt); - struct cxgbi_device *cdev = ddp->cdev; - int i = 0; + /* setup dma from scsi command sgl */ + sgl->offset = 0; + err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); + sgl->offset = sg_offset; + if (err == 0) { + pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", + __func__, sw_tag, xferlen, sgcnt); + goto rel_ppods; + } + if (err != ttinfo->nr_pages) { + log_debug(1 << CXGBI_DBG_DDP, + "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n", + __func__, sw_tag, xferlen, sgcnt, err); + } - pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); + ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED; + ttinfo->cid = csk->port_id; - while (i < ddp->nppods) { - struct cxgbi_gather_list *gl = ddp->gl_map[i]; + cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, + xferlen, &ttinfo->hdr); - if (gl) { - int npods = (gl->nelem + PPOD_PAGES_MAX - 1) - >> PPOD_PAGES_SHIFT; - pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); - kfree(gl); - i += npods; - } else - i++; + if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) { + /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */ + ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID; + } else { + /* write ppod from control queue now */ + err = cdev->csk_ddp_set_map(ppm, csk, ttinfo); + if (err < 0) + goto rel_ppods; } - cxgbi_free_big_mem(ddp); -} - -int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) -{ - struct cxgbi_ddp_info *ddp = cdev->ddp; - log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); - cdev->ddp = NULL; - if (ddp) - return kref_put(&ddp->refcnt, ddp_destroy); return 0; -} -EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); -int cxgbi_ddp_init(struct cxgbi_device *cdev, - unsigned int llimit, unsigned int ulimit, - unsigned int max_txsz, unsigned int max_rxsz) -{ - struct cxgbi_ddp_info *ddp; - unsigned int ppmax, bits; +rel_ppods: + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); - ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; - bits = __ilog2_u32(ppmax) + 1; - if (bits > PPOD_IDX_MAX_SIZE) - bits = PPOD_IDX_MAX_SIZE; - ppmax = (1 << (bits - 1)) - 1; - - ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + - ppmax * (sizeof(struct cxgbi_gather_list *) + - sizeof(struct sk_buff *)), - GFP_KERNEL); - if (!ddp) { - pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); - return -ENOMEM; + if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) { + ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED; + dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); } - ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); - cdev->ddp = ddp; - - spin_lock_init(&ddp->map_lock); - kref_init(&ddp->refcnt); - - ddp->cdev = cdev; - ddp->pdev = cdev->pdev; - ddp->llimit = llimit; - ddp->ulimit = ulimit; - ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); - ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); - ddp->nppods = ppmax; - ddp->idx_last = ppmax; - ddp->idx_bits = bits; - ddp->idx_mask = (1 << bits) - 1; - ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; - - cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; - cdev->tag_format.rsvd_bits = ddp->idx_bits; - cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; - cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; - - pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", - cdev->ports[0]->name, cdev->tag_format.sw_bits, - cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, - cdev->tag_format.rsvd_mask); - - cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, - ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); - cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, - ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); - - log_debug(1 << CXGBI_DBG_DDP, - "%s max payload size: %u/%u, %u/%u.\n", - cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, - cdev->rx_max_size, ddp->max_rxsz); - return 0; + return -EINVAL; } -EXPORT_SYMBOL_GPL(cxgbi_ddp_init); - -/* - * APIs interacting with open-iscsi libraries - */ - -static unsigned char padding[4]; static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) { struct scsi_cmnd *sc = task->sc; struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; - struct cxgbi_hba *chba = cconn->chba; - struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); u32 tag = ntohl((__force u32)hdr_itt); log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); + "cdev 0x%p, task 0x%p, release tag 0x%x.\n", + cdev, task, tag); if (sc && (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && - cxgbi_is_ddp_tag(tformat, tag)) - ddp_tag_release(chba, tag); + cxgbi_ppm_is_ddp_tag(ppm, tag)) { + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + + if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ)) + cdev->csk_ddp_clear_map(cdev, ppm, ttinfo); + cxgbi_ppm_ppod_release(ppm, ttinfo->idx); + dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, + DMA_FROM_DEVICE); + } +} + +static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age) +{ + /* assume idx and age both are < 0x7FFF (32767) */ + return (idx << 16) | age; } static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) @@ -1710,34 +1433,41 @@ static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) struct iscsi_session *sess = conn->session; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; - struct cxgbi_hba *chba = cconn->chba; - struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; - u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age); u32 tag = 0; int err = -EINVAL; if (sc && - (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { - err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, - scsi_in(sc)->length, - scsi_in(sc)->table.sgl, - scsi_in(sc)->table.nents, - GFP_ATOMIC); - if (err < 0) - log_debug(1 << CXGBI_DBG_DDP, - "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", - cconn->cep->csk, task, scsi_in(sc)->length, - scsi_in(sc)->table.nents); + (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) + ) { + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; + + scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents, + &tdata->dlen, 0); + err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen); + if (!err) + tag = ttinfo->tag; + else + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", + cconn->cep->csk, task, tdata->dlen, + ttinfo->nents); } - if (err < 0) - tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); + if (err < 0) { + err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag); + if (err < 0) + return err; + } /* the itt need to sent in big-endian order */ *hdr_itt = (__force itt_t)htonl(tag); log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", - chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); + "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", + cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); return 0; } @@ -1746,19 +1476,24 @@ void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_device *cdev = cconn->chba->cdev; - u32 tag = ntohl((__force u32) itt); + struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); + u32 tag = ntohl((__force u32)itt); u32 sw_bits; - sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); - if (idx) - *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); - if (age) - *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; + if (ppm) { + if (cxgbi_ppm_is_ddp_tag(ppm, tag)) + sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag); + else + sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag); + } else { + sw_bits = tag; + } + cxgbi_decode_sw_tag(sw_bits, idx, age); log_debug(1 << CXGBI_DBG_DDP, - "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", - cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, - age ? *age : 0xFF); + "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", + cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, + age ? *age : 0xFF); } EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); @@ -2260,7 +1995,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); + struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; struct sk_buff *skb = tdata->skb; + struct cxgbi_sock *csk = NULL; unsigned int datalen; int err; @@ -2270,8 +2007,28 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) return 0; } + if (cconn && cconn->cep) + csk = cconn->cep->csk; + if (!csk) { + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p, csk gone.\n", task); + return -EPIPE; + } + datalen = skb->data_len; tdata->skb = NULL; + + /* write ppod first if using ofldq to write ppod */ + if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { + struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev); + + ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID; + if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0) + pr_err("task 0x%p, ppod writing using ofldq failed.\n", + task); + /* continue. Let fl get the data */ + } + err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); if (err > 0) { int pdulen = err; @@ -2313,12 +2070,14 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); void cxgbi_cleanup_task(struct iscsi_task *task) { + struct iscsi_tcp_task *tcp_task = task->dd_data; struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); log_debug(1 << CXGBI_DBG_ISCSI, "task 0x%p, skb 0x%p, itt 0x%x.\n", task, tdata->skb, task->hdr_itt); + tcp_task->dd_data = NULL; /* never reached the xmit task callout */ if (tdata->skb) __kfree_skb(tdata->skb); @@ -2528,6 +2287,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_ppm *ppm; struct iscsi_endpoint *ep; struct cxgbi_endpoint *cep; struct cxgbi_sock *csk; @@ -2540,7 +2300,10 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, /* setup ddp pagesize */ cep = ep->dd_data; csk = cep->csk; - err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); + + ppm = csk->cdev->cdev2ppm(csk->cdev); + err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, + ppm->tformat.pgsz_idx_dflt, 0); if (err < 0) return err; @@ -2915,16 +2678,7 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); static int __init libcxgbi_init_module(void) { - sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; - sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; - pr_info("%s", version); - - pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", - ISCSI_ITT_MASK, sw_tag_idx_bits, - ISCSI_AGE_MASK, sw_tag_age_bits); - - ddp_setup_host_page_size(); return 0; } diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 9842301f7..e7802738f 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -24,9 +24,12 @@ #include #include #include +#include #include #include +#include + enum cxgbi_dbg_flag { CXGBI_DBG_ISCSI, CXGBI_DBG_DDP, @@ -84,92 +87,11 @@ static inline unsigned int cxgbi_ulp_extra_len(int submode) return ulp2_extra_len[submode & 3]; } -/* - * struct pagepod_hdr, pagepod - pagepod format - */ - #define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ #define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ #define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ #define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ -struct cxgbi_pagepod_hdr { - u32 vld_tid; - u32 pgsz_tag_clr; - u32 max_offset; - u32 page_offset; - u64 rsvd; -}; - -#define PPOD_PAGES_MAX 4 -struct cxgbi_pagepod { - struct cxgbi_pagepod_hdr hdr; - u64 addr[PPOD_PAGES_MAX + 1]; -}; - -struct cxgbi_tag_format { - unsigned char sw_bits; - unsigned char rsvd_bits; - unsigned char rsvd_shift; - unsigned char filler[1]; - u32 rsvd_mask; -}; - -struct cxgbi_gather_list { - unsigned int tag; - unsigned int length; - unsigned int offset; - unsigned int nelem; - struct page **pages; - dma_addr_t phys_addr[0]; -}; - -struct cxgbi_ddp_info { - struct kref refcnt; - struct cxgbi_device *cdev; - struct pci_dev *pdev; - unsigned int max_txsz; - unsigned int max_rxsz; - unsigned int llimit; - unsigned int ulimit; - unsigned int nppods; - unsigned int idx_last; - unsigned char idx_bits; - unsigned char filler[3]; - unsigned int idx_mask; - unsigned int rsvd_tag_mask; - spinlock_t map_lock; - struct cxgbi_gather_list **gl_map; -}; - -#define DDP_PGIDX_MAX 4 -#define DDP_THRESHOLD 2048 - -#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ - -#define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ -#define PPOD_SIZE_SHIFT 6 - -#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ -#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ -#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ - -#define PPOD_COLOR_SHIFT 0 -#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) - -#define PPOD_IDX_SHIFT 6 -#define PPOD_IDX_MAX_SIZE 24 - -#define PPOD_TID_SHIFT 0 -#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) - -#define PPOD_TAG_SHIFT 6 -#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) - -#define PPOD_VALID_SHIFT 24 -#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) -#define PPOD_VALID_FLAG PPOD_VALID(1U) - /* * sge_opaque_hdr - * Opaque version of structure the SGE stores at skb->head of TX_DATA packets @@ -279,6 +201,8 @@ struct cxgbi_skb_tx_cb { enum cxgbi_skcb_flags { SKCBF_TX_NEED_HDR, /* packet needs a header */ + SKCBF_TX_MEM_WRITE, /* memory write */ + SKCBF_TX_FLAG_COMPL, /* wr completion flag */ SKCBF_RX_COALESCED, /* received whole pdu */ SKCBF_RX_HDR, /* received pdu header */ SKCBF_RX_DATA, /* received pdu payload */ @@ -527,6 +451,9 @@ struct cxgbi_ports_map { #define CXGBI_FLAG_DEV_T4 0x2 #define CXGBI_FLAG_ADAPTER_RESET 0x4 #define CXGBI_FLAG_IPV4_SET 0x10 +#define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40 +#define CXGBI_FLAG_DDP_OFF 0x100 + struct cxgbi_device { struct list_head list_head; struct list_head rcu_node; @@ -548,15 +475,14 @@ struct cxgbi_device { unsigned int tx_max_size; unsigned int rx_max_size; struct cxgbi_ports_map pmap; - struct cxgbi_tag_format tag_format; - struct cxgbi_ddp_info *ddp; void (*dev_ddp_cleanup)(struct cxgbi_device *); - int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *, - unsigned int, unsigned int, - struct cxgbi_gather_list *); - void (*csk_ddp_clear)(struct cxgbi_hba *, - unsigned int, unsigned int, unsigned int); + struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *); + int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *, + struct cxgbi_task_tag_info *); + void (*csk_ddp_clear_map)(struct cxgbi_device *cdev, + struct cxgbi_ppm *, + struct cxgbi_task_tag_info *); int (*csk_ddp_setup_digest)(struct cxgbi_sock *, unsigned int, int, int, int); int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, @@ -580,6 +506,8 @@ struct cxgbi_conn { struct iscsi_conn *iconn; struct cxgbi_hba *chba; u32 task_idx_bits; + unsigned int ddp_full; + unsigned int ddp_tag_full; }; struct cxgbi_endpoint { @@ -593,85 +521,15 @@ struct cxgbi_task_data { unsigned short nr_frags; struct page_frag frags[MAX_PDU_FRAGS]; struct sk_buff *skb; + unsigned int dlen; unsigned int offset; unsigned int count; unsigned int sgoffset; + struct cxgbi_task_tag_info ttinfo; }; #define iscsi_task_cxgbi_data(task) \ ((task)->dd_data + sizeof(struct iscsi_tcp_task)) -static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag) -{ - return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))); -} - -static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat, - u32 sw_tag) -{ - sw_tag >>= (32 - tformat->rsvd_bits); - return !sw_tag; -} - -static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat, - u32 sw_tag) -{ - unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; - u32 mask = (1 << shift) - 1; - - if (sw_tag && (sw_tag & ~mask)) { - u32 v1 = sw_tag & ((1 << shift) - 1); - u32 v2 = (sw_tag >> (shift - 1)) << shift; - - return v2 | v1 | 1 << shift; - } - - return sw_tag | 1 << shift; -} - -static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat, - u32 sw_tag) -{ - u32 mask = (1 << tformat->rsvd_shift) - 1; - - if (sw_tag && (sw_tag & ~mask)) { - u32 v1 = sw_tag & mask; - u32 v2 = sw_tag >> tformat->rsvd_shift; - - v2 <<= tformat->rsvd_bits + tformat->rsvd_shift; - - return v2 | v1; - } - - return sw_tag; -} - -static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat, - u32 tag) -{ - if (cxgbi_is_ddp_tag(tformat, tag)) - return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask; - - return 0; -} - -static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat, - u32 tag) -{ - unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; - u32 v1, v2; - - if (cxgbi_is_ddp_tag(tformat, tag)) { - v1 = tag & ((1 << tformat->rsvd_shift) - 1); - v2 = (tag >> (shift + 1)) << tformat->rsvd_shift; - } else { - u32 mask = (1 << shift) - 1; - tag &= ~(1 << shift); - v1 = tag & mask; - v2 = (tag >> 1) & ~mask; - } - return v1 | v2; -} - static inline void *cxgbi_alloc_big_mem(unsigned int size, gfp_t gfp) { @@ -749,7 +607,11 @@ int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int, unsigned int, unsigned int); int cxgbi_ddp_cleanup(struct cxgbi_device *); void cxgbi_ddp_page_size_factor(int *); -void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *); -void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *, - struct cxgbi_gather_list *, unsigned int); +void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *, + struct cxgbi_task_tag_info *, + struct scatterlist **sg_pp, unsigned int *sg_off); +void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *, + struct cxgbi_tag_format *, unsigned int ppmax, + unsigned int llimit, unsigned int start, + unsigned int rsvd_factor); #endif /*__LIBCXGBI_H__*/ diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 8fb9643fe..661bb94e2 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -764,6 +764,75 @@ static void term_afu(struct cxlflash_cfg *cfg) pr_debug("%s: returning\n", __func__); } +/** + * notify_shutdown() - notifies device of pending shutdown + * @cfg: Internal structure associated with the host. + * @wait: Whether to wait for shutdown processing to complete. + * + * This function will notify the AFU that the adapter is being shutdown + * and will wait for shutdown processing to complete if wait is true. + * This notification should flush pending I/Os to the device and halt + * further I/Os until the next AFU reset is issued and device restarted. + */ +static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + struct sisl_global_map __iomem *global; + struct dev_dependent_vals *ddv; + u64 reg, status; + int i, retry_cnt = 0; + + ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; + if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) + return; + + if (!afu || !afu->afu_map) { + dev_dbg(dev, "%s: The problem state area is not mapped\n", + __func__); + return; + } + + global = &afu->afu_map->global; + + /* Notify AFU */ + for (i = 0; i < NUM_FC_PORTS; i++) { + reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]); + reg |= SISL_FC_SHUTDOWN_NORMAL; + writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]); + } + + if (!wait) + return; + + /* Wait up to 1.5 seconds for shutdown processing to complete */ + for (i = 0; i < NUM_FC_PORTS; i++) { + retry_cnt = 0; + while (true) { + status = readq_be(&global->fc_regs[i][FC_STATUS / 8]); + if (status & SISL_STATUS_SHUTDOWN_COMPLETE) + break; + if (++retry_cnt >= MC_RETRY_CNT) { + dev_dbg(dev, "%s: port %d shutdown processing " + "not yet completed\n", __func__, i); + break; + } + msleep(100 * retry_cnt); + } + } +} + +/** + * cxlflash_shutdown() - shutdown handler + * @pdev: PCI device associated with the host. + */ +static void cxlflash_shutdown(struct pci_dev *pdev) +{ + struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); + + notify_shutdown(cfg, false); +} + /** * cxlflash_remove() - PCI entry point to tear down host * @pdev: PCI device associated with the host. @@ -785,6 +854,9 @@ static void cxlflash_remove(struct pci_dev *pdev) cfg->tmf_slock); spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); + /* Notify AFU and wait for shutdown processing to complete */ + notify_shutdown(cfg, true); + cfg->state = STATE_FAILTERM; cxlflash_stop_term_user_contexts(cfg); @@ -1915,6 +1987,19 @@ static int afu_reset(struct cxlflash_cfg *cfg) return rc; } +/** + * drain_ioctls() - wait until all currently executing ioctls have completed + * @cfg: Internal structure associated with the host. + * + * Obtain write access to read/write semaphore that wraps ioctl + * handling to 'drain' ioctls currently executing. + */ +static void drain_ioctls(struct cxlflash_cfg *cfg) +{ + down_write(&cfg->ioctl_rwsem); + up_write(&cfg->ioctl_rwsem); +} + /** * cxlflash_eh_device_reset_handler() - reset a single LUN * @scp: SCSI command to send. @@ -1986,6 +2071,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) switch (cfg->state) { case STATE_NORMAL: cfg->state = STATE_RESET; + drain_ioctls(cfg); cxlflash_mark_contexts_error(cfg); rcr = afu_reset(cfg); if (rcr) { @@ -2319,8 +2405,10 @@ static struct scsi_host_template driver_template = { /* * Device dependent values */ -static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; -static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS }; +static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, + 0ULL }; +static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, + CXLFLASH_NOTIFY_SHUTDOWN }; /* * PCI device binding table @@ -2503,19 +2591,6 @@ out_remove: goto out; } -/** - * drain_ioctls() - wait until all currently executing ioctls have completed - * @cfg: Internal structure associated with the host. - * - * Obtain write access to read/write semaphore that wraps ioctl - * handling to 'drain' ioctls currently executing. - */ -static void drain_ioctls(struct cxlflash_cfg *cfg) -{ - down_write(&cfg->ioctl_rwsem); - up_write(&cfg->ioctl_rwsem); -} - /** * cxlflash_pci_error_detected() - called when a PCI error is detected * @pdev: PCI device struct. @@ -2610,6 +2685,7 @@ static struct pci_driver cxlflash_driver = { .id_table = cxlflash_pci_table, .probe = cxlflash_probe, .remove = cxlflash_remove, + .shutdown = cxlflash_shutdown, .err_handler = &cxlflash_err_handler, }; diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index eb9d8f730..e43545c86 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -88,6 +88,8 @@ enum undo_level { struct dev_dependent_vals { u64 max_sectors; + u64 flags; +#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL }; struct asyc_intr_info { @@ -100,8 +102,4 @@ struct asyc_intr_info { #define SCAN_HOST 0x04 }; -#ifndef CONFIG_CXL_EEH -#define cxl_perst_reloads_same_image(_a, _b) do { } while (0) -#endif - #endif /* _CXLFLASH_MAIN_H */ diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index 0b3366f5e..347fc1671 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -311,6 +311,12 @@ struct sisl_global_regs { #define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK) #define SISL_FC_INTERNAL_SHIFT 32 +#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL +#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL + +#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL +#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL + #define SISL_ASTATUS_UNMASK 0xFFFFULL /* 1 means unmasked */ #define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */ diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 0efe7112f..9bd41a35a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -67,9 +67,6 @@ static DEFINE_MUTEX(fcoe_config_mutex); static struct workqueue_struct *fcoe_wq; -/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ -static DECLARE_COMPLETION(fcoe_flush_completion); - /* fcoe host list */ /* must only by accessed under the RTNL mutex */ static LIST_HEAD(fcoe_hostlist); @@ -80,7 +77,6 @@ static int fcoe_reset(struct Scsi_Host *); static int fcoe_xmit(struct fc_lport *, struct fc_frame *); static int fcoe_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); -static int fcoe_percpu_receive_thread(void *); static void fcoe_percpu_clean(struct fc_lport *); static int fcoe_link_ok(struct fc_lport *); @@ -96,6 +92,8 @@ static struct fcoe_interface static int fcoe_fip_recv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); +static int fcoe_fip_vlan_recv(struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *); static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *); static void fcoe_update_src_mac(struct fc_lport *, u8 *); @@ -107,12 +105,11 @@ static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *, static int fcoe_ddp_done(struct fc_lport *, u16); static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, unsigned int); -static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); static int fcoe_dcb_app_notification(struct notifier_block *notifier, ulong event, void *ptr); static bool fcoe_match(struct net_device *netdev); -static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode); +static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode); static int fcoe_destroy(struct net_device *netdev); static int fcoe_enable(struct net_device *netdev); static int fcoe_disable(struct net_device *netdev); @@ -120,7 +117,7 @@ static int fcoe_disable(struct net_device *netdev); /* fcoe_syfs control interface handlers */ static int fcoe_ctlr_alloc(struct net_device *netdev); static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev); - +static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev); static struct fc_seq *fcoe_elsct_send(struct fc_lport *, u32 did, struct fc_frame *, @@ -136,11 +133,6 @@ static struct notifier_block fcoe_notifier = { .notifier_call = fcoe_device_notification, }; -/* notification function for CPU hotplug events */ -static struct notifier_block fcoe_cpu_notifier = { - .notifier_call = fcoe_cpu_callback, -}; - /* notification function for DCB events */ static struct notifier_block dcb_notifier = { .notifier_call = fcoe_dcb_app_notification, @@ -156,8 +148,9 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *); static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); + static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { - .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode, + .set_fcoe_ctlr_mode = fcoe_ctlr_mode, .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled, .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, @@ -372,6 +365,12 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, fcoe->fip_packet_type.dev = netdev; dev_add_pack(&fcoe->fip_packet_type); + if (netdev != real_dev) { + fcoe->fip_vlan_packet_type.func = fcoe_fip_vlan_recv; + fcoe->fip_vlan_packet_type.type = htons(ETH_P_FIP); + fcoe->fip_vlan_packet_type.dev = real_dev; + dev_add_pack(&fcoe->fip_vlan_packet_type); + } return 0; } @@ -459,6 +458,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe) */ __dev_remove_pack(&fcoe->fcoe_packet_type); __dev_remove_pack(&fcoe->fip_packet_type); + if (netdev != fcoe->realdev) + __dev_remove_pack(&fcoe->fip_vlan_packet_type); synchronize_net(); /* Delete secondary MAC addresses */ @@ -528,6 +529,29 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev, return 0; } +/** + * fcoe_fip_vlan_recv() - Handler for received FIP VLAN discovery frames + * @skb: The receive skb + * @netdev: The associated net device + * @ptype: The packet_type structure which was used to register this handler + * @orig_dev: The original net_device the the skb was received on. + * (in case dev is a bond) + * + * Returns: 0 for success + */ +static int fcoe_fip_vlan_recv(struct sk_buff *skb, struct net_device *netdev, + struct packet_type *ptype, + struct net_device *orig_dev) +{ + struct fcoe_interface *fcoe; + struct fcoe_ctlr *ctlr; + + fcoe = container_of(ptype, struct fcoe_interface, fip_vlan_packet_type); + ctlr = fcoe_to_ctlr(fcoe); + fcoe_ctlr_recv(ctlr, skb); + return 0; +} + /** * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame * @port: The FCoE port @@ -548,7 +572,21 @@ static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb) */ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { - skb->dev = fcoe_from_ctlr(fip)->netdev; + struct fcoe_interface *fcoe = fcoe_from_ctlr(fip); + struct fip_frame { + struct ethhdr eth; + struct fip_header fip; + } __packed *frame; + + /* + * Use default VLAN for FIP VLAN discovery protocol + */ + frame = (struct fip_frame *)skb->data; + if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) && + fcoe->realdev != fcoe->netdev) + skb->dev = fcoe->realdev; + else + skb->dev = fcoe->netdev; fcoe_port_send(lport_priv(fip->lp), skb); } @@ -682,6 +720,12 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) fcoe = port->priv; ctlr = fcoe_to_ctlr(fcoe); + /* Figure out the VLAN ID, if any */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + lport->vlan = vlan_dev_vlan_id(netdev); + else + lport->vlan = 0; + /* * Determine max frame size based on underlying device and optional * user-configured limit. If the MFS is too low, fcoe_link_ok() @@ -780,9 +824,6 @@ static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev) fcoe = port->priv; realdev = fcoe->realdev; - if (!realdev) - return; - /* No FDMI state m/c for NPIV ports */ if (lport->vport) return; @@ -1245,152 +1286,21 @@ static int __exit fcoe_if_exit(void) return 0; } -/** - * fcoe_percpu_thread_create() - Create a receive thread for an online CPU - * @cpu: The CPU index of the CPU to create a receive thread for - */ -static void fcoe_percpu_thread_create(unsigned int cpu) +static void fcoe_thread_cleanup_local(unsigned int cpu) { - struct fcoe_percpu_s *p; - struct task_struct *thread; - - p = &per_cpu(fcoe_percpu, cpu); - - thread = kthread_create_on_node(fcoe_percpu_receive_thread, - (void *)p, cpu_to_node(cpu), - "fcoethread/%d", cpu); - - if (likely(!IS_ERR(thread))) { - kthread_bind(thread, cpu); - wake_up_process(thread); - - spin_lock_bh(&p->fcoe_rx_list.lock); - p->thread = thread; - spin_unlock_bh(&p->fcoe_rx_list.lock); - } -} - -/** - * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU - * @cpu: The CPU index of the CPU whose receive thread is to be destroyed - * - * Destroys a per-CPU Rx thread. Any pending skbs are moved to the - * current CPU's Rx thread. If the thread being destroyed is bound to - * the CPU processing this context the skbs will be freed. - */ -static void fcoe_percpu_thread_destroy(unsigned int cpu) -{ - struct fcoe_percpu_s *p; - struct task_struct *thread; struct page *crc_eof; - struct sk_buff *skb; -#ifdef CONFIG_SMP - struct fcoe_percpu_s *p0; - unsigned targ_cpu = get_cpu(); -#endif /* CONFIG_SMP */ - - FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); + struct fcoe_percpu_s *p; - /* Prevent any new skbs from being queued for this CPU. */ - p = &per_cpu(fcoe_percpu, cpu); + p = per_cpu_ptr(&fcoe_percpu, cpu); spin_lock_bh(&p->fcoe_rx_list.lock); - thread = p->thread; - p->thread = NULL; crc_eof = p->crc_eof_page; p->crc_eof_page = NULL; p->crc_eof_offset = 0; spin_unlock_bh(&p->fcoe_rx_list.lock); -#ifdef CONFIG_SMP - /* - * Don't bother moving the skb's if this context is running - * on the same CPU that is having its thread destroyed. This - * can easily happen when the module is removed. - */ - if (cpu != targ_cpu) { - p0 = &per_cpu(fcoe_percpu, targ_cpu); - spin_lock_bh(&p0->fcoe_rx_list.lock); - if (p0->thread) { - FCOE_DBG("Moving frames from CPU %d to CPU %d\n", - cpu, targ_cpu); - - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - __skb_queue_tail(&p0->fcoe_rx_list, skb); - spin_unlock_bh(&p0->fcoe_rx_list.lock); - } else { - /* - * The targeted CPU is not initialized and cannot accept - * new skbs. Unlock the targeted CPU and drop the skbs - * on the CPU that is going offline. - */ - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p0->fcoe_rx_list.lock); - } - } else { - /* - * This scenario occurs when the module is being removed - * and all threads are being destroyed. skbs will continue - * to be shifted from the CPU thread that is being removed - * to the CPU thread associated with the CPU that is processing - * the module removal. Once there is only one CPU Rx thread it - * will reach this case and we will drop all skbs and later - * stop the thread. - */ - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); - } - put_cpu(); -#else - /* - * This a non-SMP scenario where the singular Rx thread is - * being removed. Free all skbs and stop the thread. - */ - spin_lock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); -#endif - - if (thread) - kthread_stop(thread); - if (crc_eof) put_page(crc_eof); -} - -/** - * fcoe_cpu_callback() - Handler for CPU hotplug events - * @nfb: The callback data block - * @action: The event triggering the callback - * @hcpu: The index of the CPU that the event is for - * - * This creates or destroys per-CPU data for fcoe - * - * Returns NOTIFY_OK always. - */ -static int fcoe_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned cpu = (unsigned long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - FCOE_DBG("CPU %x online: Create Rx thread\n", cpu); - fcoe_percpu_thread_create(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu); - fcoe_percpu_thread_destroy(cpu); - break; - default: - break; - } - return NOTIFY_OK; + flush_work(&p->work); } /** @@ -1509,26 +1419,6 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, fps = &per_cpu(fcoe_percpu, cpu); spin_lock(&fps->fcoe_rx_list.lock); - if (unlikely(!fps->thread)) { - /* - * The targeted CPU is not ready, let's target - * the first CPU now. For non-SMP systems this - * will check the same CPU twice. - */ - FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread " - "ready for incoming skb- using first online " - "CPU.\n"); - - spin_unlock(&fps->fcoe_rx_list.lock); - cpu = cpumask_first(cpu_online_mask); - fps = &per_cpu(fcoe_percpu, cpu); - spin_lock(&fps->fcoe_rx_list.lock); - if (!fps->thread) { - spin_unlock(&fps->fcoe_rx_list.lock); - goto err; - } - } - /* * We now have a valid CPU that we're targeting for * this skb. We also have this receive thread locked, @@ -1543,8 +1433,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, * in softirq context. */ __skb_queue_tail(&fps->fcoe_rx_list, skb); - if (fps->thread->state == TASK_INTERRUPTIBLE) - wake_up_process(fps->thread); + schedule_work_on(cpu, &fps->work); spin_unlock(&fps->fcoe_rx_list.lock); return NET_RX_SUCCESS; @@ -1712,15 +1601,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) return 0; } -/** - * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion - * @skb: The completed skb (argument required by destructor) - */ -static void fcoe_percpu_flush_done(struct sk_buff *skb) -{ - complete(&fcoe_flush_completion); -} - /** * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC * @lport: The local port the frame was received on @@ -1792,8 +1672,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) fr = fcoe_dev_from_skb(skb); lport = fr->fr_dev; if (unlikely(!lport)) { - if (skb->destructor != fcoe_percpu_flush_done) - FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); kfree_skb(skb); return; } @@ -1857,40 +1736,28 @@ drop: } /** - * fcoe_percpu_receive_thread() - The per-CPU packet receive thread - * @arg: The per-CPU context + * fcoe_receive_work() - The per-CPU worker + * @work: The work struct * - * Return: 0 for success */ -static int fcoe_percpu_receive_thread(void *arg) +static void fcoe_receive_work(struct work_struct *work) { - struct fcoe_percpu_s *p = arg; + struct fcoe_percpu_s *p; struct sk_buff *skb; struct sk_buff_head tmp; + p = container_of(work, struct fcoe_percpu_s, work); skb_queue_head_init(&tmp); - set_user_nice(current, MIN_NICE); - - while (!kthread_should_stop()) { - - spin_lock_bh(&p->fcoe_rx_list.lock); - skb_queue_splice_init(&p->fcoe_rx_list, &tmp); - - if (!skb_queue_len(&tmp)) { - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_bh(&p->fcoe_rx_list.lock); - schedule(); - continue; - } - - spin_unlock_bh(&p->fcoe_rx_list.lock); + spin_lock_bh(&p->fcoe_rx_list.lock); + skb_queue_splice_init(&p->fcoe_rx_list, &tmp); + spin_unlock_bh(&p->fcoe_rx_list.lock); - while ((skb = __skb_dequeue(&tmp)) != NULL) - fcoe_recv_frame(skb); + if (!skb_queue_len(&tmp)) + return; - } - return 0; + while ((skb = __skb_dequeue(&tmp))) + fcoe_recv_frame(skb); } /** @@ -2162,6 +2029,32 @@ static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev) }; } +/** + * fcoe_ctlr_mode() - Switch FIP mode + * @cdev: The FCoE Controller that is being modified + * + * When the FIP mode has been changed we need to update + * the multicast addresses to ensure we get the correct + * frames. + */ +static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr); + + if (ctlr_dev->mode == FIP_CONN_TYPE_VN2VN && + ctlr->mode != FIP_MODE_VN2VN) { + dev_mc_del(fcoe->netdev, FIP_ALL_ENODE_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_VN2VN_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_P2P_MACS); + } else if (ctlr->mode != FIP_MODE_FABRIC) { + dev_mc_del(fcoe->netdev, FIP_ALL_VN2VN_MACS); + dev_mc_del(fcoe->netdev, FIP_ALL_P2P_MACS); + dev_mc_add(fcoe->netdev, FIP_ALL_ENODE_MACS); + } + fcoe_ctlr_set_fip_mode(ctlr_dev); +} + /** * fcoe_destroy() - Destroy a FCoE interface * @netdev : The net_device object the Ethernet interface to create on @@ -2317,7 +2210,7 @@ enum fcoe_create_link_state { * consolidation of code can be done when that interface is * removed. */ -static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, +static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode, enum fcoe_create_link_state link_state) { int rc = 0; @@ -2406,7 +2299,7 @@ out: * * Returns: 0 for success */ -static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) +static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode) { return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP); } @@ -2450,36 +2343,19 @@ static int fcoe_link_ok(struct fc_lport *lport) * * Must be called with fcoe_create_mutex held to single-thread completion. * - * This flushes the pending skbs by adding a new skb to each queue and - * waiting until they are all freed. This assures us that not only are - * there no packets that will be handled by the lport, but also that any - * threads already handling packet have returned. + * This flushes the pending skbs by flush the work item for each CPU. The work + * item on each possible CPU is flushed because we may have used the per-CPU + * struct of an offline CPU. */ static void fcoe_percpu_clean(struct fc_lport *lport) { struct fcoe_percpu_s *pp; - struct sk_buff *skb; unsigned int cpu; for_each_possible_cpu(cpu) { pp = &per_cpu(fcoe_percpu, cpu); - if (!pp->thread || !cpu_online(cpu)) - continue; - - skb = dev_alloc_skb(0); - if (!skb) - continue; - - skb->destructor = fcoe_percpu_flush_done; - - spin_lock_bh(&pp->fcoe_rx_list.lock); - __skb_queue_tail(&pp->fcoe_rx_list, skb); - if (pp->fcoe_rx_list.qlen == 1) - wake_up_process(pp->thread); - spin_unlock_bh(&pp->fcoe_rx_list.lock); - - wait_for_completion(&fcoe_flush_completion); + flush_work(&pp->work); } } @@ -2619,28 +2495,17 @@ static int __init fcoe_init(void) if (rc) { printk(KERN_ERR "failed to register an fcoe transport, check " "if libfcoe is loaded\n"); - return rc; + goto out_destroy; } mutex_lock(&fcoe_config_mutex); for_each_possible_cpu(cpu) { - p = &per_cpu(fcoe_percpu, cpu); + p = per_cpu_ptr(&fcoe_percpu, cpu); + INIT_WORK(&p->work, fcoe_receive_work); skb_queue_head_init(&p->fcoe_rx_list); } - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - fcoe_percpu_thread_create(cpu); - - /* Initialize per CPU interrupt thread */ - rc = __register_hotcpu_notifier(&fcoe_cpu_notifier); - if (rc) - goto out_free; - - cpu_notifier_register_done(); - /* Setup link change notification */ fcoe_dev_setup(); @@ -2652,13 +2517,8 @@ static int __init fcoe_init(void) return 0; out_free: - for_each_online_cpu(cpu) { - fcoe_percpu_thread_destroy(cpu); - } - - cpu_notifier_register_done(); - mutex_unlock(&fcoe_config_mutex); +out_destroy: destroy_workqueue(fcoe_wq); return rc; } @@ -2690,14 +2550,8 @@ static void __exit fcoe_exit(void) } rtnl_unlock(); - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - fcoe_percpu_thread_destroy(cpu); - - __unregister_hotcpu_notifier(&fcoe_cpu_notifier); - - cpu_notifier_register_done(); + for_each_possible_cpu(cpu) + fcoe_thread_cleanup_local(cpu); mutex_unlock(&fcoe_config_mutex); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 2b53672bf..6aa4820f6 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -80,6 +80,7 @@ struct fcoe_interface { struct net_device *realdev; struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; + struct packet_type fip_vlan_packet_type; struct fc_exch_mgr *oem; u8 removed; u8 priority; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 3e83d485f..dcf36537a 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -59,6 +59,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *); static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *); +static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *, struct sk_buff *); + static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; @@ -149,6 +151,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) { fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); fip->mode = mode; + fip->fip_resp = false; INIT_LIST_HEAD(&fip->fcfs); mutex_init(&fip->ctlr_mutex); spin_lock_init(&fip->ctlr_lock); @@ -991,7 +994,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) return -EINVAL; break; } @@ -1232,7 +1235,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) goto drop; if (desc_cnt <= 2) { LIBFCOE_FIP_DBG(fip, "FIP descriptors " @@ -1410,7 +1413,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, break; default: /* standard says ignore unknown descriptors >= 128 */ - if (desc->fip_dtype < FIP_DT_VENDOR_BASE) + if (desc->fip_dtype < FIP_DT_NON_CRITICAL) goto err; break; } @@ -1513,6 +1516,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) struct fip_header *fiph; struct ethhdr *eh; enum fip_state state; + bool fip_vlan_resp = false; u16 op; u8 sub; @@ -1546,11 +1550,17 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) state = FIP_ST_ENABLED; LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); } + fip_vlan_resp = fip->fip_resp; mutex_unlock(&fip->ctlr_mutex); if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) return fcoe_ctlr_vn_recv(fip, skb); + if (fip_vlan_resp && op == FIP_OP_VLAN) { + LIBFCOE_FIP_DBG(fip, "fip vlan discovery\n"); + return fcoe_ctlr_vlan_recv(fip, skb); + } + if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP && state != FIP_ST_VNMP_CLAIM) goto drop; @@ -1989,7 +1999,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, const u8 *dest, size_t min_len) { struct sk_buff *skb; - struct fip_frame { + struct fip_vn2vn_probe_frame { struct ethhdr eth; struct fip_header fip; struct fip_mac_desc mac; @@ -2016,7 +2026,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, if (!skb) return; - frame = (struct fip_frame *)skb->data; + frame = (struct fip_vn2vn_probe_frame *)skb->data; memset(frame, 0, len); memcpy(frame->eth.h_dest, dest, ETH_ALEN); @@ -2338,7 +2348,7 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP probe\n", dtype); /* standard says ignore unknown descriptors >= 128 */ - if (dtype < FIP_DT_VENDOR_BASE) + if (dtype < FIP_DT_NON_CRITICAL) return -EINVAL; break; } @@ -2496,14 +2506,13 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) struct fcoe_rport *frport; int ret = -1; - rcu_read_lock(); rdata = lport->tt.rport_lookup(lport, port_id); if (rdata) { frport = fcoe_ctlr_rport(rdata); memcpy(mac, frport->enode_mac, ETH_ALEN); ret = 0; + kref_put(&rdata->kref, lport->tt.rport_destroy); } - rcu_read_unlock(); return ret; } @@ -2585,11 +2594,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); return; } - mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, new->ids.port_id); - if (rdata) - kref_get(&rdata->kref); - mutex_unlock(&lport->disc.disc_mutex); if (rdata) { if (rdata->ids.node_name == new->ids.node_name && rdata->ids.port_name == new->ids.port_name) { @@ -2708,6 +2713,220 @@ drop: return rc; } +/** + * fcoe_ctlr_vlan_parse - parse vlan discovery request or response + * @fip: The FCoE controller + * @skb: incoming packet + * @rdata: buffer for resulting parsed VLAN entry plus fcoe_rport + * + * Returns non-zero error number on error. + * Does not consume the packet. + */ +static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, + struct sk_buff *skb, + struct fc_rport_priv *rdata) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_mac_desc *macd = NULL; + struct fip_wwn_desc *wwn = NULL; + struct fcoe_rport *frport; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 dtype; + u8 sub; + + memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); + frport = fcoe_ctlr_rport(rdata); + + fiph = (struct fip_header *)skb->data; + frport->flags = ntohs(fiph->fip_flags); + + sub = fiph->fip_subcode; + switch (sub) { + case FIP_SC_VL_REQ: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); + return -EINVAL; + } + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + + dtype = desc->fip_dtype; + if (dtype < 32) { + if (!(desc_mask & BIT(dtype))) { + LIBFCOE_FIP_DBG(fip, + "unexpected or duplicated desc " + "desc type %u in " + "FIP VN2VN subtype %u\n", + dtype, sub); + return -EINVAL; + } + desc_mask &= ~BIT(dtype); + } + + switch (dtype) { + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + macd = (struct fip_mac_desc *)desc; + if (!is_valid_ether_addr(macd->fd_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP VN2VN\n", + macd->fd_mac); + return -EINVAL; + } + memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP probe\n", dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (dtype < FIP_DT_NON_CRITICAL) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_vlan_send() - Send a FIP VLAN Notification + * @fip: The FCoE controller + * @sub: sub-opcode for vlan notification or vn2vn vlan notification + * @dest: The destination Ethernet MAC address + * @min_len: minimum size of the Ethernet payload to be sent + */ +static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip, + enum fip_vlan_subcode sub, + const u8 *dest) +{ + struct sk_buff *skb; + struct fip_vlan_notify_frame { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + struct fip_vlan_desc vlan; + } __packed * frame; + size_t len; + size_t dlen; + + len = sizeof(*frame); + dlen = sizeof(frame->mac) + sizeof(frame->vlan); + len = max(len, sizeof(struct ethhdr)); + + skb = dev_alloc_skb(len); + if (!skb) + return; + + LIBFCOE_FIP_DBG(fip, "fip %s vlan notification, vlan %d\n", + fip->mode == FIP_MODE_VN2VN ? "vn2vn" : "fcf", + fip->lp->vlan); + + frame = (struct fip_vlan_notify_frame *)skb->data; + memset(frame, 0, len); + memcpy(frame->eth.h_dest, dest, ETH_ALEN); + + memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + frame->eth.h_proto = htons(ETH_P_FIP); + + frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + frame->fip.fip_op = htons(FIP_OP_VLAN); + frame->fip.fip_subcode = sub; + frame->fip.fip_dl_len = htons(dlen / FIP_BPW); + + frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; + frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; + memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + frame->vlan.fd_desc.fip_dtype = FIP_DT_VLAN; + frame->vlan.fd_desc.fip_dlen = sizeof(frame->vlan) / FIP_BPW; + put_unaligned_be16(fip->lp->vlan, &frame->vlan.fd_vlan); + + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb->priority = fip->priority; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification. + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, + struct fc_rport_priv *rdata) +{ + struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); + enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; + + if (fip->mode == FIP_MODE_VN2VN) + sub = FIP_SC_VL_VN2VN_NOTE; + + fcoe_ctlr_vlan_send(fip, sub, frport->enode_mac); +} + +/** + * fcoe_ctlr_vlan_recv - vlan request receive handler for VN2VN mode. + * @lport: The local port + * @fp: The received frame + * + */ +static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + enum fip_vlan_subcode sub; + struct { + struct fc_rport_priv rdata; + struct fcoe_rport frport; + } buf; + int rc; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata); + if (rc) { + LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); + goto drop; + } + mutex_lock(&fip->ctlr_mutex); + if (sub == FIP_SC_VL_REQ) + fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata); + mutex_unlock(&fip->ctlr_mutex); + +drop: + kfree_skb(skb); + return rc; +} + /** * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. * @lport: The local port @@ -2869,7 +3088,7 @@ unlock: * when nothing is happening. */ static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, - enum fip_state fip_mode) + enum fip_mode fip_mode) { void *priv; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 045c4e11e..0675fd128 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -385,6 +385,44 @@ static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, show_ctlr_enabled_state, store_ctlr_enabled); +static ssize_t store_ctlr_fip_resp(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); + + mutex_lock(&fip->ctlr_mutex); + if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { + if (buf[0] == '1') { + fip->fip_resp = 1; + mutex_unlock(&fip->ctlr_mutex); + return count; + } + if (buf[0] == '0') { + fip->fip_resp = 0; + mutex_unlock(&fip->ctlr_mutex); + return count; + } + } + mutex_unlock(&fip->ctlr_mutex); + return -EINVAL; +} + +static ssize_t show_ctlr_fip_resp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); + struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); + + return sprintf(buf, "%d\n", fip->fip_resp ? 1 : 0); +} + +static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR, + show_ctlr_fip_resp, + store_ctlr_fip_resp); + static ssize_t store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, @@ -467,6 +505,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = { }; static struct attribute *fcoe_ctlr_attrs[] = { + &device_attr_fcoe_ctlr_fip_vlan_responder.attr, &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, &device_attr_fcoe_ctlr_enabled.attr, &device_attr_fcoe_ctlr_mode.attr, diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 641c60e8f..7028dd37e 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -133,10 +133,10 @@ int fcoe_link_speed_update(struct fc_lport *lport) case SPEED_10000: lport->link_speed = FC_PORTSPEED_10GBIT; break; - case 20000: + case SPEED_20000: lport->link_speed = FC_PORTSPEED_20GBIT; break; - case 40000: + case SPEED_40000: lport->link_speed = FC_PORTSPEED_40GBIT; break; default: diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 67669a9e7..3b7da66e2 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -359,7 +359,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic) vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); vlan->fip.fip_op = htons(FIP_OP_VLAN); - vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_subcode = FIP_SC_VL_NOTE; vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; @@ -551,7 +551,7 @@ static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) goto drop; /* pass it on to fcoe */ ret = 1; - } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) { + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { /* set the vlan as used */ fnic_fcoe_process_vlan_resp(fnic, skb); ret = 0; @@ -954,8 +954,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq) skb_put(skb, len); pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); - r = pci_dma_mapping_error(fnic->pdev, pa); - if (r) { + if (pci_dma_mapping_error(fnic->pdev, pa)) { + r = -ENOMEM; printk(KERN_ERR "PCI mapping failed with error %d\n", r); goto free_skb; } @@ -1093,8 +1093,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); - ret = pci_dma_mapping_error(fnic->pdev, pa); - if (ret) { + if (pci_dma_mapping_error(fnic->pdev, pa)) { + ret = -ENOMEM; printk(KERN_ERR "DMA map failed with error %d\n", ret); goto free_skb_on_err; } @@ -1308,7 +1308,7 @@ void fnic_handle_fip_timer(struct fnic *fnic) } spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (fnic->ctlr.mode == FIP_ST_NON_FIP) + if (fnic->ctlr.mode == FIP_MODE_NON_FIP) return; spin_lock_irqsave(&fnic->vlans_lock, flags); diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h index 87e74c2ab..7761f33ab 100644 --- a/drivers/scsi/fnic/fnic_fip.h +++ b/drivers/scsi/fnic/fnic_fip.h @@ -26,14 +26,6 @@ #define FINC_MAX_FLOGI_REJECTS 8 -/* - * FIP_DT_VLAN descriptor. - */ -struct fip_vlan_desc { - struct fip_desc fd_desc; - __be16 fd_vlan; -} __attribute__((packed)); - struct vlan { __be16 vid; __be16 type; diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index d7cab724f..4731d3241 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -23,7 +23,7 @@ #include #include -#define DRV_VERSION "v1.4" +#define DRV_VERSION "v1.5" #define HISI_SAS_MAX_PHYS 9 #define HISI_SAS_MAX_QUEUES 32 diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index bd20c5488..f96560431 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -721,30 +721,41 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) return -EIO; } - /* reset and disable clock*/ - regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, - reset_val); - regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, - reset_val); - msleep(1); - regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); - if (reset_val != (val & reset_val)) { - dev_err(dev, "SAS reset fail.\n"); - return -EIO; - } + if (ACPI_HANDLE(dev)) { + acpi_status s; - /* De-reset and enable clock*/ - regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, - reset_val); - regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, - reset_val); - msleep(1); - regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, - &val); - if (val & reset_val) { - dev_err(dev, "SAS de-reset fail.\n"); - return -EIO; - } + s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(dev, "Reset failed\n"); + return -EIO; + } + } else if (hisi_hba->ctrl) { + /* reset and disable clock*/ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg, + reset_val); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4, + reset_val); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val); + if (reset_val != (val & reset_val)) { + dev_err(dev, "SAS reset fail.\n"); + return -EIO; + } + + /* De-reset and enable clock*/ + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4, + reset_val); + regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg, + reset_val); + msleep(1); + regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, + &val); + if (val & reset_val) { + dev_err(dev, "SAS de-reset fail.\n"); + return -EIO; + } + } else + dev_warn(dev, "no reset method\n"); return 0; } @@ -752,13 +763,12 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) static void init_reg_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = &hisi_hba->pdev->dev; - struct device_node *np = dev->of_node; int i; /* Global registers init */ /* Deal with am-max-transmissions quirk */ - if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) { + if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) { hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020); hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS, 0x2020); @@ -1902,14 +1912,9 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_ha_struct *sas_ha = &hisi_hba->sha; - unsigned long flags; hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); - - spin_lock_irqsave(&hisi_hba->lock, flags); sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_RX_BCST_ACK_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); @@ -2260,12 +2265,20 @@ static const struct of_device_id sas_v2_of_match[] = { }; MODULE_DEVICE_TABLE(of, sas_v2_of_match); +static const struct acpi_device_id sas_v2_acpi_match[] = { + { "HISI0162", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); + static struct platform_driver hisi_sas_v2_driver = { .probe = hisi_sas_v2_probe, .remove = hisi_sas_v2_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v2_of_match, + .acpi_match_table = ACPI_PTR(sas_v2_acpi_match), }, }; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 1547bd93c..ec6381e57 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -486,7 +486,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) else shost->dma_boundary = 0xffffffff; - shost->use_blk_mq = scsi_use_blk_mq && !shost->hostt->disable_blk_mq; + shost->use_blk_mq = scsi_use_blk_mq; device_initialize(&shost->shost_gendev); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ff8dcd5b0..030d0023e 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4105,6 +4105,70 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h, return rc; } +static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes) +{ + struct bmic_identify_physical_device *id_phys; + bool is_spare = false; + int rc; + + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); + if (!id_phys) + return false; + + rc = hpsa_bmic_id_physical_device(h, + lunaddrbytes, + GET_BMIC_DRIVE_NUMBER(lunaddrbytes), + id_phys, sizeof(*id_phys)); + if (rc == 0) + is_spare = (id_phys->more_flags >> 6) & 0x01; + + kfree(id_phys); + return is_spare; +} + +#define RPL_DEV_FLAG_NON_DISK 0x1 +#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2 +#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4 + +#define BMIC_DEVICE_TYPE_ENCLOSURE 6 + +static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes, + struct ext_report_lun_entry *rle) +{ + u8 device_flags; + u8 device_type; + + if (!MASKED_DEVICE(lunaddrbytes)) + return false; + + device_flags = rle->device_flags; + device_type = rle->device_type; + + if (device_flags & RPL_DEV_FLAG_NON_DISK) { + if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE) + return false; + return true; + } + + if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED)) + return false; + + if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK) + return false; + + /* + * Spares may be spun down, we do not want to + * do an Inquiry to a RAID set spare drive as + * that would have them spun up, that is a + * performance hit because I/O to the RAID device + * stops while the spin up occurs which can take + * over 50 seconds. + */ + if (hpsa_is_disk_spare(h, lunaddrbytes)) + return true; + + return false; +} static void hpsa_update_scsi_devices(struct ctlr_info *h) { @@ -4198,6 +4262,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) u8 *lunaddrbytes, is_OBDR = 0; int rc = 0; int phys_dev_index = i - (raid_ctlr_position == 0); + bool skip_device = false; physical_device = i < nphysicals + (raid_ctlr_position == 0); @@ -4205,11 +4270,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, i, nphysicals, nlogicals, physdev_list, logdev_list); - /* skip masked non-disk devices */ - if (MASKED_DEVICE(lunaddrbytes) && physical_device && - (physdev_list->LUN[phys_dev_index].device_type != 0x06) && - (physdev_list->LUN[phys_dev_index].device_flags & 0x01)) - continue; + /* + * Skip over some devices such as a spare. + */ + if (!tmpdevice->external && physical_device) { + skip_device = hpsa_skip_device(h, lunaddrbytes, + &physdev_list->LUN[phys_dev_index]); + if (skip_device) + continue; + } /* Get device type, vendor, model, device id */ rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, @@ -6455,7 +6524,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, - DEFAULT_TIMEOUT); + NO_TIMEOUT); if (iocommand.buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); @@ -6588,7 +6657,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); } status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, - DEFAULT_TIMEOUT); + NO_TIMEOUT); if (sg_used) hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index fc523c3e5..ab67ec4b6 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4722,6 +4722,8 @@ static void ibmvfc_rport_add_thread(struct work_struct *work) tgt_dbg(tgt, "Setting rport roles\n"); fc_remote_port_rolechg(rport, tgt->ids.roles); put_device(&rport->dev); + } else { + spin_unlock_irqrestore(vhost->host->host_lock, flags); } kref_put(&tgt->kref, ibmvfc_release_tgt); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 8fae03215..5c70a52ad 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -26,7 +26,7 @@ #include #include -#include "viosrp.h" +#include #define IBMVFC_NAME "ibmvfc" #define IBMVFC_DRIVER_VERSION "1.0.11" diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index 106736739..e0f6c3aeb 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h @@ -33,7 +33,7 @@ #include #include #include -#include "viosrp.h" +#include struct scsi_cmnd; struct Scsi_Host; diff --git a/drivers/scsi/ibmvscsi_tgt/Makefile b/drivers/scsi/ibmvscsi_tgt/Makefile new file mode 100644 index 000000000..0c060ce64 --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsis.o + +ibmvscsis-y := libsrp.o ibmvscsi_tgt.o diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c new file mode 100644 index 000000000..b29fef9d0 --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -0,0 +1,4087 @@ +/******************************************************************************* + * IBM Virtual SCSI Target Driver + * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. + * Santiago Leon (santil@us.ibm.com) IBM Corp. + * Linda Xie (lxie@us.ibm.com) IBM Corp. + * + * Copyright (C) 2005-2011 FUJITA Tomonori + * Copyright (C) 2010 Nicholas A. Bellinger + * + * Authors: Bryant G. Ly + * Authors: Michael Cyr + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include "ibmvscsi_tgt.h" + +#define IBMVSCSIS_VERSION "v0.2" + +#define INITIAL_SRP_LIMIT 800 +#define DEFAULT_MAX_SECTORS 256 + +static uint max_vdma_size = MAX_H_COPY_RDMA; + +static char system_id[SYS_ID_NAME_LEN] = ""; +static char partition_name[PARTITION_NAMELEN] = "UNKNOWN"; +static uint partition_number = -1; + +/* Adapter list and lock to control it */ +static DEFINE_SPINLOCK(ibmvscsis_dev_lock); +static LIST_HEAD(ibmvscsis_dev_list); + +static long ibmvscsis_parse_command(struct scsi_info *vscsi, + struct viosrp_crq *crq); + +static void ibmvscsis_adapter_idle(struct scsi_info *vscsi); + +static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, + struct srp_rsp *rsp) +{ + u32 residual_count = se_cmd->residual_count; + + if (!residual_count) + return; + + if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* residual data from an underflow write */ + rsp->flags = SRP_RSP_FLAG_DOUNDER; + rsp->data_out_res_cnt = cpu_to_be32(residual_count); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + /* residual data from an underflow read */ + rsp->flags = SRP_RSP_FLAG_DIUNDER; + rsp->data_in_res_cnt = cpu_to_be32(residual_count); + } + } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* residual data from an overflow write */ + rsp->flags = SRP_RSP_FLAG_DOOVER; + rsp->data_out_res_cnt = cpu_to_be32(residual_count); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + /* residual data from an overflow read */ + rsp->flags = SRP_RSP_FLAG_DIOVER; + rsp->data_in_res_cnt = cpu_to_be32(residual_count); + } + } +} + +/** + * connection_broken() - Determine if the connection to the client is good + * @vscsi: Pointer to our adapter structure + * + * This function attempts to send a ping MAD to the client. If the call to + * queue the request returns H_CLOSED then the connection has been broken + * and the function returns TRUE. + * + * EXECUTION ENVIRONMENT: + * Interrupt or Process environment + */ +static bool connection_broken(struct scsi_info *vscsi) +{ + struct viosrp_crq *crq; + u64 buffer[2] = { 0, 0 }; + long h_return_code; + bool rc = false; + + /* create a PING crq */ + crq = (struct viosrp_crq *)&buffer; + crq->valid = VALID_CMD_RESP_EL; + crq->format = MESSAGE_IN_CRQ; + crq->status = PING; + + h_return_code = h_send_crq(vscsi->dds.unit_id, + cpu_to_be64(buffer[MSG_HI]), + cpu_to_be64(buffer[MSG_LOW])); + + pr_debug("connection_broken: rc %ld\n", h_return_code); + + if (h_return_code == H_CLOSED) + rc = true; + + return rc; +} + +/** + * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue + * @vscsi: Pointer to our adapter structure + * + * This function calls h_free_q then frees the interrupt bit etc. + * It must release the lock before doing so because of the time it can take + * for h_free_crq in PHYP + * NOTE: the caller must make sure that state and or flags will prevent + * interrupt handler from scheduling work. + * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag + * we can't do it here, because we don't have the lock + * + * EXECUTION ENVIRONMENT: + * Process level + */ +static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi) +{ + long qrc; + long rc = ADAPT_SUCCESS; + int ticks = 0; + + do { + qrc = h_free_crq(vscsi->dds.unit_id); + switch (qrc) { + case H_SUCCESS: + break; + + case H_HARDWARE: + case H_PARAMETER: + dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n", + qrc); + rc = ERROR; + break; + + case H_BUSY: + case H_LONG_BUSY_ORDER_1_MSEC: + /* msleep not good for small values */ + usleep_range(1000, 2000); + ticks += 1; + break; + case H_LONG_BUSY_ORDER_10_MSEC: + usleep_range(10000, 20000); + ticks += 10; + break; + case H_LONG_BUSY_ORDER_100_MSEC: + msleep(100); + ticks += 100; + break; + case H_LONG_BUSY_ORDER_1_SEC: + ssleep(1); + ticks += 1000; + break; + case H_LONG_BUSY_ORDER_10_SEC: + ssleep(10); + ticks += 10000; + break; + case H_LONG_BUSY_ORDER_100_SEC: + ssleep(100); + ticks += 100000; + break; + default: + dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n", + qrc); + rc = ERROR; + break; + } + + /* + * dont wait more then 300 seconds + * ticks are in milliseconds more or less + */ + if (ticks > 300000 && qrc != H_SUCCESS) { + rc = ERROR; + dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n"); + } + } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS); + + pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc); + + return rc; +} + +/** + * ibmvscsis_delete_client_info() - Helper function to Delete Client Info + * @vscsi: Pointer to our adapter structure + * @client_closed: True if client closed its queue + * + * Deletes information specific to the client when the client goes away + * + * EXECUTION ENVIRONMENT: + * Interrupt or Process + */ +static void ibmvscsis_delete_client_info(struct scsi_info *vscsi, + bool client_closed) +{ + vscsi->client_cap = 0; + + /* + * Some things we don't want to clear if we're closing the queue, + * because some clients don't resend the host handshake when they + * get a transport event. + */ + if (client_closed) + vscsi->client_data.os_type = 0; +} + +/** + * ibmvscsis_free_command_q() - Free Command Queue + * @vscsi: Pointer to our adapter structure + * + * This function calls unregister_command_q, then clears interrupts and + * any pending interrupt acknowledgments associated with the command q. + * It also clears memory if there is no error. + * + * PHYP did not meet the PAPR architecture so that we must give up the + * lock. This causes a timing hole regarding state change. To close the + * hole this routine does accounting on any change that occurred during + * the time the lock is not held. + * NOTE: must give up and then acquire the interrupt lock, the caller must + * make sure that state and or flags will prevent interrupt handler from + * scheduling work. + * + * EXECUTION ENVIRONMENT: + * Process level, interrupt lock is held + */ +static long ibmvscsis_free_command_q(struct scsi_info *vscsi) +{ + int bytes; + u32 flags_under_lock; + u16 state_under_lock; + long rc = ADAPT_SUCCESS; + + if (!(vscsi->flags & CRQ_CLOSED)) { + vio_disable_interrupts(vscsi->dma_dev); + + state_under_lock = vscsi->new_state; + flags_under_lock = vscsi->flags; + vscsi->phyp_acr_state = 0; + vscsi->phyp_acr_flags = 0; + + spin_unlock_bh(&vscsi->intr_lock); + rc = ibmvscsis_unregister_command_q(vscsi); + spin_lock_bh(&vscsi->intr_lock); + + if (state_under_lock != vscsi->new_state) + vscsi->phyp_acr_state = vscsi->new_state; + + vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags); + + if (rc == ADAPT_SUCCESS) { + bytes = vscsi->cmd_q.size * PAGE_SIZE; + memset(vscsi->cmd_q.base_addr, 0, bytes); + vscsi->cmd_q.index = 0; + vscsi->flags |= CRQ_CLOSED; + + ibmvscsis_delete_client_info(vscsi, false); + } + + pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", + vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, + vscsi->phyp_acr_state); + } + return rc; +} + +/** + * ibmvscsis_cmd_q_dequeue() - Get valid Command element + * @mask: Mask to use in case index wraps + * @current_index: Current index into command queue + * @base_addr: Pointer to start of command queue + * + * Returns a pointer to a valid command element or NULL, if the command + * queue is empty + * + * EXECUTION ENVIRONMENT: + * Interrupt environment, interrupt lock held + */ +static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, + uint *current_index, + struct viosrp_crq *base_addr) +{ + struct viosrp_crq *ptr; + + ptr = base_addr + *current_index; + + if (ptr->valid) { + *current_index = (*current_index + 1) & mask; + dma_rmb(); + } else { + ptr = NULL; + } + + return ptr; +} + +/** + * ibmvscsis_send_init_message() - send initialize message to the client + * @vscsi: Pointer to our adapter structure + * @format: Which Init Message format to send + * + * EXECUTION ENVIRONMENT: + * Interrupt environment interrupt lock held + */ +static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format) +{ + struct viosrp_crq *crq; + u64 buffer[2] = { 0, 0 }; + long rc; + + crq = (struct viosrp_crq *)&buffer; + crq->valid = VALID_INIT_MSG; + crq->format = format; + rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), + cpu_to_be64(buffer[MSG_LOW])); + + return rc; +} + +/** + * ibmvscsis_check_init_msg() - Check init message valid + * @vscsi: Pointer to our adapter structure + * @format: Pointer to return format of Init Message, if any. + * Set to UNUSED_FORMAT if no Init Message in queue. + * + * Checks if an initialize message was queued by the initiatior + * after the queue was created and before the interrupt was enabled. + * + * EXECUTION ENVIRONMENT: + * Process level only, interrupt lock held + */ +static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) +{ + struct viosrp_crq *crq; + long rc = ADAPT_SUCCESS; + + crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index, + vscsi->cmd_q.base_addr); + if (!crq) { + *format = (uint)UNUSED_FORMAT; + } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) { + *format = (uint)INIT_MSG; + crq->valid = INVALIDATE_CMD_RESP_EL; + dma_rmb(); + + /* + * the caller has ensured no initialize message was + * sent after the queue was + * created so there should be no other message on the queue. + */ + crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, + &vscsi->cmd_q.index, + vscsi->cmd_q.base_addr); + if (crq) { + *format = (uint)(crq->format); + rc = ERROR; + crq->valid = INVALIDATE_CMD_RESP_EL; + dma_rmb(); + } + } else { + *format = (uint)(crq->format); + rc = ERROR; + crq->valid = INVALIDATE_CMD_RESP_EL; + dma_rmb(); + } + + return rc; +} + +/** + * ibmvscsis_establish_new_q() - Establish new CRQ queue + * @vscsi: Pointer to our adapter structure + * @new_state: New state being established after resetting the queue + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state) +{ + long rc = ADAPT_SUCCESS; + uint format; + + vscsi->flags &= PRESERVE_FLAG_FIELDS; + vscsi->rsp_q_timer.timer_pops = 0; + vscsi->debit = 0; + vscsi->credit = 0; + + rc = vio_enable_interrupts(vscsi->dma_dev); + if (rc) { + pr_warn("reset_queue: failed to enable interrupts, rc %ld\n", + rc); + return rc; + } + + rc = ibmvscsis_check_init_msg(vscsi, &format); + if (rc) { + dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n", + rc); + return rc; + } + + if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) { + rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); + switch (rc) { + case H_SUCCESS: + case H_DROPPED: + case H_CLOSED: + rc = ADAPT_SUCCESS; + break; + + case H_PARAMETER: + case H_HARDWARE: + break; + + default: + vscsi->state = UNDEFINED; + rc = H_HARDWARE; + break; + } + } + + return rc; +} + +/** + * ibmvscsis_reset_queue() - Reset CRQ Queue + * @vscsi: Pointer to our adapter structure + * @new_state: New state to establish after resetting the queue + * + * This function calls h_free_q and then calls h_reg_q and does all + * of the bookkeeping to get us back to where we can communicate. + * + * Actually, we don't always call h_free_crq. A problem was discovered + * where one partition would close and reopen his queue, which would + * cause his partner to get a transport event, which would cause him to + * close and reopen his queue, which would cause the original partition + * to get a transport event, etc., etc. To prevent this, we don't + * actually close our queue if the client initiated the reset, (i.e. + * either we got a transport event or we have detected that the client's + * queue is gone) + * + * EXECUTION ENVIRONMENT: + * Process environment, called with interrupt lock held + */ +static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state) +{ + int bytes; + long rc = ADAPT_SUCCESS; + + pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); + + /* don't reset, the client did it for us */ + if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { + vscsi->flags &= PRESERVE_FLAG_FIELDS; + vscsi->rsp_q_timer.timer_pops = 0; + vscsi->debit = 0; + vscsi->credit = 0; + vscsi->state = new_state; + vio_enable_interrupts(vscsi->dma_dev); + } else { + rc = ibmvscsis_free_command_q(vscsi); + if (rc == ADAPT_SUCCESS) { + vscsi->state = new_state; + + bytes = vscsi->cmd_q.size * PAGE_SIZE; + rc = h_reg_crq(vscsi->dds.unit_id, + vscsi->cmd_q.crq_token, bytes); + if (rc == H_CLOSED || rc == H_SUCCESS) { + rc = ibmvscsis_establish_new_q(vscsi, + new_state); + } + + if (rc != ADAPT_SUCCESS) { + pr_debug("reset_queue: reg_crq rc %ld\n", rc); + + vscsi->state = ERR_DISCONNECTED; + vscsi->flags |= RESPONSE_Q_DOWN; + ibmvscsis_free_command_q(vscsi); + } + } else { + vscsi->state = ERR_DISCONNECTED; + vscsi->flags |= RESPONSE_Q_DOWN; + } + } +} + +/** + * ibmvscsis_free_cmd_resources() - Free command resources + * @vscsi: Pointer to our adapter structure + * @cmd: Command which is not longer in use + * + * Must be called with interrupt lock held. + */ +static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + + switch (cmd->type) { + case TASK_MANAGEMENT: + case SCSI_CDB: + /* + * When the queue goes down this value is cleared, so it + * cannot be cleared in this general purpose function. + */ + if (vscsi->debit) + vscsi->debit -= 1; + break; + case ADAPTER_MAD: + vscsi->flags &= ~PROCESSING_MAD; + break; + case UNSET_TYPE: + break; + default: + dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", + cmd->type); + break; + } + + cmd->iue = NULL; + list_add_tail(&cmd->list, &vscsi->free_cmd); + srp_iu_put(iue); + + if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && + list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { + vscsi->flags &= ~WAIT_FOR_IDLE; + complete(&vscsi->wait_idle); + } +} + +/** + * ibmvscsis_disconnect() - Helper function to disconnect + * @work: Pointer to work_struct, gives access to our adapter structure + * + * An error has occurred or the driver received a Transport event, + * and the driver is requesting that the command queue be de-registered + * in a safe manner. If there is no outstanding I/O then we can stop the + * queue. If we are restarting the queue it will be reflected in the + * the state of the adapter. + * + * EXECUTION ENVIRONMENT: + * Process environment + */ +static void ibmvscsis_disconnect(struct work_struct *work) +{ + struct scsi_info *vscsi = container_of(work, struct scsi_info, + proc_work); + u16 new_state; + bool wait_idle = false; + long rc = ADAPT_SUCCESS; + + spin_lock_bh(&vscsi->intr_lock); + new_state = vscsi->new_state; + vscsi->new_state = 0; + + pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags, + vscsi->state); + + /* + * check which state we are in and see if we + * should transitition to the new state + */ + switch (vscsi->state) { + /* Should never be called while in this state. */ + case NO_QUEUE: + /* + * Can never transition from this state; + * igonore errors and logout. + */ + case UNCONFIGURING: + break; + + /* can transition from this state to UNCONFIGURING */ + case ERR_DISCONNECT: + if (new_state == UNCONFIGURING) + vscsi->state = new_state; + break; + + /* + * Can transition from this state to to unconfiguring + * or err disconnect. + */ + case ERR_DISCONNECT_RECONNECT: + switch (new_state) { + case UNCONFIGURING: + case ERR_DISCONNECT: + vscsi->state = new_state; + break; + + case WAIT_IDLE: + break; + default: + break; + } + break; + + /* can transition from this state to UNCONFIGURING */ + case ERR_DISCONNECTED: + if (new_state == UNCONFIGURING) + vscsi->state = new_state; + break; + + /* + * If this is a transition into an error state. + * a client is attempting to establish a connection + * and has violated the RPA protocol. + * There can be nothing pending on the adapter although + * there can be requests in the command queue. + */ + case WAIT_ENABLED: + case PART_UP_WAIT_ENAB: + switch (new_state) { + case ERR_DISCONNECT: + vscsi->flags |= RESPONSE_Q_DOWN; + vscsi->state = new_state; + vscsi->flags &= ~(SCHEDULE_DISCONNECT | + DISCONNECT_SCHEDULED); + ibmvscsis_free_command_q(vscsi); + break; + case ERR_DISCONNECT_RECONNECT: + ibmvscsis_reset_queue(vscsi, WAIT_ENABLED); + break; + + /* should never happen */ + case WAIT_IDLE: + rc = ERROR; + dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", + vscsi->state); + break; + } + break; + + case WAIT_IDLE: + switch (new_state) { + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + vscsi->state = new_state; + break; + } + break; + + /* + * Initiator has not done a successful srp login + * or has done a successful srp logout ( adapter was not + * busy). In the first case there can be responses queued + * waiting for space on the initiators response queue (MAD) + * The second case the adapter is idle. Assume the worse case, + * i.e. the second case. + */ + case WAIT_CONNECTION: + case CONNECTED: + case SRP_PROCESSING: + wait_idle = true; + vscsi->state = new_state; + break; + + /* can transition from this state to UNCONFIGURING */ + case UNDEFINED: + if (new_state == UNCONFIGURING) + vscsi->state = new_state; + break; + default: + break; + } + + if (wait_idle) { + pr_debug("disconnect start wait, active %d, sched %d\n", + (int)list_empty(&vscsi->active_q), + (int)list_empty(&vscsi->schedule_q)); + if (!list_empty(&vscsi->active_q) || + !list_empty(&vscsi->schedule_q)) { + vscsi->flags |= WAIT_FOR_IDLE; + pr_debug("disconnect flags 0x%x\n", vscsi->flags); + /* + * This routine is can not be called with the interrupt + * lock held. + */ + spin_unlock_bh(&vscsi->intr_lock); + wait_for_completion(&vscsi->wait_idle); + spin_lock_bh(&vscsi->intr_lock); + } + pr_debug("disconnect stop wait\n"); + + ibmvscsis_adapter_idle(vscsi); + } + + spin_unlock_bh(&vscsi->intr_lock); +} + +/** + * ibmvscsis_post_disconnect() - Schedule the disconnect + * @vscsi: Pointer to our adapter structure + * @new_state: State to move to after disconnecting + * @flag_bits: Flags to turn on in adapter structure + * + * If it's already been scheduled, then see if we need to "upgrade" + * the new state (if the one passed in is more "severe" than the + * previous one). + * + * PRECONDITION: + * interrupt lock is held + */ +static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, + uint flag_bits) +{ + uint state; + + /* check the validity of the new state */ + switch (new_state) { + case UNCONFIGURING: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case WAIT_IDLE: + break; + + default: + dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n", + new_state); + return; + } + + vscsi->flags |= flag_bits; + + pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n", + new_state, flag_bits, vscsi->flags, vscsi->state); + + if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) { + vscsi->flags |= SCHEDULE_DISCONNECT; + vscsi->new_state = new_state; + + INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect); + (void)queue_work(vscsi->work_q, &vscsi->proc_work); + } else { + if (vscsi->new_state) + state = vscsi->new_state; + else + state = vscsi->state; + + switch (state) { + case NO_QUEUE: + case UNCONFIGURING: + break; + + case ERR_DISCONNECTED: + case ERR_DISCONNECT: + case UNDEFINED: + if (new_state == UNCONFIGURING) + vscsi->new_state = new_state; + break; + + case ERR_DISCONNECT_RECONNECT: + switch (new_state) { + case UNCONFIGURING: + case ERR_DISCONNECT: + vscsi->new_state = new_state; + break; + default: + break; + } + break; + + case WAIT_ENABLED: + case PART_UP_WAIT_ENAB: + case WAIT_IDLE: + case WAIT_CONNECTION: + case CONNECTED: + case SRP_PROCESSING: + vscsi->new_state = new_state; + break; + + default: + break; + } + } + + pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", + vscsi->flags, vscsi->new_state); +} + +/** + * ibmvscsis_trans_event() - Handle a Transport Event + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to CRQ entry containing the Transport Event + * + * Do the logic to close the I_T nexus. This function may not + * behave to specification. + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_trans_event(struct scsi_info *vscsi, + struct viosrp_crq *crq) +{ + long rc = ADAPT_SUCCESS; + + pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n", + (int)crq->format, vscsi->flags, vscsi->state); + + switch (crq->format) { + case MIGRATED: + case PARTNER_FAILED: + case PARTNER_DEREGISTER: + ibmvscsis_delete_client_info(vscsi, true); + break; + + default: + rc = ERROR; + dev_err(&vscsi->dev, "trans_event: invalid format %d\n", + (uint)crq->format); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, + RESPONSE_Q_DOWN); + break; + } + + if (rc == ADAPT_SUCCESS) { + switch (vscsi->state) { + case NO_QUEUE: + case ERR_DISCONNECTED: + case UNDEFINED: + break; + + case UNCONFIGURING: + vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); + break; + + case WAIT_ENABLED: + break; + + case WAIT_CONNECTION: + break; + + case CONNECTED: + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, + (RESPONSE_Q_DOWN | + TRANS_EVENT)); + break; + + case PART_UP_WAIT_ENAB: + vscsi->state = WAIT_ENABLED; + break; + + case SRP_PROCESSING: + if ((vscsi->debit > 0) || + !list_empty(&vscsi->schedule_q) || + !list_empty(&vscsi->waiting_rsp) || + !list_empty(&vscsi->active_q)) { + pr_debug("debit %d, sched %d, wait %d, active %d\n", + vscsi->debit, + (int)list_empty(&vscsi->schedule_q), + (int)list_empty(&vscsi->waiting_rsp), + (int)list_empty(&vscsi->active_q)); + pr_warn("connection lost with outstanding work\n"); + } else { + pr_debug("trans_event: SRP Processing, but no outstanding work\n"); + } + + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, + (RESPONSE_Q_DOWN | + TRANS_EVENT)); + break; + + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case WAIT_IDLE: + vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); + break; + } + } + + rc = vscsi->flags & SCHEDULE_DISCONNECT; + + pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", + vscsi->flags, vscsi->state, rc); + + return rc; +} + +/** + * ibmvscsis_poll_cmd_q() - Poll Command Queue + * @vscsi: Pointer to our adapter structure + * + * Called to handle command elements that may have arrived while + * interrupts were disabled. + * + * EXECUTION ENVIRONMENT: + * intr_lock must be held + */ +static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi) +{ + struct viosrp_crq *crq; + long rc; + bool ack = true; + volatile u8 valid; + + pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n", + vscsi->flags, vscsi->state, vscsi->cmd_q.index); + + rc = vscsi->flags & SCHEDULE_DISCONNECT; + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + valid = crq->valid; + dma_rmb(); + + while (valid) { +poll_work: + vscsi->cmd_q.index = + (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; + + if (!rc) { + rc = ibmvscsis_parse_command(vscsi, crq); + } else { + if ((uint)crq->valid == VALID_TRANS_EVENT) { + /* + * must service the transport layer events even + * in an error state, dont break out until all + * the consecutive transport events have been + * processed + */ + rc = ibmvscsis_trans_event(vscsi, crq); + } else if (vscsi->flags & TRANS_EVENT) { + /* + * if a tranport event has occurred leave + * everything but transport events on the queue + */ + pr_debug("poll_cmd_q, ignoring\n"); + + /* + * need to decrement the queue index so we can + * look at the elment again + */ + if (vscsi->cmd_q.index) + vscsi->cmd_q.index -= 1; + else + /* + * index is at 0 it just wrapped. + * have it index last element in q + */ + vscsi->cmd_q.index = vscsi->cmd_q.mask; + break; + } + } + + crq->valid = INVALIDATE_CMD_RESP_EL; + + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + valid = crq->valid; + dma_rmb(); + } + + if (!rc) { + if (ack) { + vio_enable_interrupts(vscsi->dma_dev); + ack = false; + pr_debug("poll_cmd_q, reenabling interrupts\n"); + } + valid = crq->valid; + dma_rmb(); + if (valid) + goto poll_work; + } + + pr_debug("Leaving poll_cmd_q: rc %ld\n", rc); +} + +/** + * ibmvscsis_free_cmd_qs() - Free elements in queue + * @vscsi: Pointer to our adapter structure + * + * Free all of the elements on all queues that are waiting for + * whatever reason. + * + * PRECONDITION: + * Called with interrupt lock held + */ +static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi) +{ + struct ibmvscsis_cmd *cmd, *nxt; + + pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n", + (int)list_empty(&vscsi->waiting_rsp), + vscsi->rsp_q_timer.started); + + list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { + list_del(&cmd->list); + ibmvscsis_free_cmd_resources(vscsi, cmd); + } +} + +/** + * ibmvscsis_get_free_cmd() - Get free command from list + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) +{ + struct ibmvscsis_cmd *cmd = NULL; + struct iu_entry *iue; + + iue = srp_iu_get(&vscsi->target); + if (iue) { + cmd = list_first_entry_or_null(&vscsi->free_cmd, + struct ibmvscsis_cmd, list); + if (cmd) { + list_del(&cmd->list); + cmd->iue = iue; + cmd->type = UNSET_TYPE; + memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd)); + } else { + srp_iu_put(iue); + } + } + + return cmd; +} + +/** + * ibmvscsis_adapter_idle() - Helper function to handle idle adapter + * @vscsi: Pointer to our adapter structure + * + * This function is called when the adapter is idle when the driver + * is attempting to clear an error condition. + * The adapter is considered busy if any of its cmd queues + * are non-empty. This function can be invoked + * from the off level disconnect function. + * + * EXECUTION ENVIRONMENT: + * Process environment called with interrupt lock held + */ +static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) +{ + int free_qs = false; + + pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags, + vscsi->state); + + /* Only need to free qs if we're disconnecting from client */ + if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT) + free_qs = true; + + switch (vscsi->state) { + case ERR_DISCONNECT_RECONNECT: + ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION); + pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); + break; + + case ERR_DISCONNECT: + ibmvscsis_free_command_q(vscsi); + vscsi->flags &= ~DISCONNECT_SCHEDULED; + vscsi->flags |= RESPONSE_Q_DOWN; + vscsi->state = ERR_DISCONNECTED; + pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + break; + + case WAIT_IDLE: + vscsi->rsp_q_timer.timer_pops = 0; + vscsi->debit = 0; + vscsi->credit = 0; + if (vscsi->flags & TRANS_EVENT) { + vscsi->state = WAIT_CONNECTION; + vscsi->flags &= PRESERVE_FLAG_FIELDS; + } else { + vscsi->state = CONNECTED; + vscsi->flags &= ~DISCONNECT_SCHEDULED; + } + + pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + ibmvscsis_poll_cmd_q(vscsi); + break; + + case ERR_DISCONNECTED: + vscsi->flags &= ~DISCONNECT_SCHEDULED; + pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + break; + + default: + dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n", + vscsi->state); + break; + } + + if (free_qs) + ibmvscsis_free_cmd_qs(vscsi); + + /* + * There is a timing window where we could lose a disconnect request. + * The known path to this window occurs during the DISCONNECT_RECONNECT + * case above: reset_queue calls free_command_q, which will release the + * interrupt lock. During that time, a new post_disconnect call can be + * made with a "more severe" state (DISCONNECT or UNCONFIGURING). + * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect + * will only set the new_state. Now free_command_q reacquires the intr + * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_ + * FIELDS), and the disconnect is lost. This is particularly bad when + * the new disconnect was for UNCONFIGURING, since the unconfigure hangs + * forever. + * Fix is that free command queue sets acr state and acr flags if there + * is a change under the lock + * note free command queue writes to this state it clears it + * before releasing the lock, different drivers call the free command + * queue different times so dont initialize above + */ + if (vscsi->phyp_acr_state != 0) { + /* + * set any bits in flags that may have been cleared by + * a call to free command queue in switch statement + * or reset queue + */ + vscsi->flags |= vscsi->phyp_acr_flags; + ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0); + vscsi->phyp_acr_state = 0; + vscsi->phyp_acr_flags = 0; + + pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", + vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, + vscsi->phyp_acr_state); + } + + pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n", + vscsi->flags, vscsi->state, vscsi->new_state); +} + +/** + * ibmvscsis_copy_crq_packet() - Copy CRQ Packet + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to command element to use to process the request + * @crq: Pointer to CRQ entry containing the request + * + * Copy the srp information unit from the hosted + * partition using remote dma + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, + struct viosrp_crq *crq) +{ + struct iu_entry *iue = cmd->iue; + long rc = 0; + u16 len; + + len = be16_to_cpu(crq->IU_length); + if ((len > SRP_MAX_IU_LEN) || (len == 0)) { + dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + return SRP_VIOLATION; + } + + rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(crq->IU_data_ptr), + vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma); + + switch (rc) { + case H_SUCCESS: + cmd->init_time = mftb(); + iue->remote_token = crq->IU_data_ptr; + iue->iu_len = len; + pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n", + be64_to_cpu(crq->IU_data_ptr), cmd->init_time); + break; + case H_PERMISSION: + if (connection_broken(vscsi)) + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + (RESPONSE_Q_DOWN | + CLIENT_FAILED)); + else + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + + dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", + rc); + break; + case H_DEST_PARM: + case H_SOURCE_PARM: + default: + dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram + * @vscsi: Pointer to our adapter structure + * @iue: Information Unit containing the Adapter Info MAD request + * + * EXECUTION ENVIRONMENT: + * Interrupt adpater lock is held + */ +static long ibmvscsis_adapter_info(struct scsi_info *vscsi, + struct iu_entry *iue) +{ + struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info; + struct mad_adapter_info_data *info; + uint flag_bits = 0; + dma_addr_t token; + long rc; + + mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS); + + if (be16_to_cpu(mad->common.length) > sizeof(*info)) { + mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); + return 0; + } + + info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, + GFP_KERNEL); + if (!info) { + dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", + iue->target); + mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); + return 0; + } + + /* Get remote info */ + rc = h_copy_rdma(be16_to_cpu(mad->common.length), + vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(mad->buffer), + vscsi->dds.window[LOCAL].liobn, token); + + if (rc != H_SUCCESS) { + if (rc == H_PERMISSION) { + if (connection_broken(vscsi)) + flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); + } + pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n", + rc); + pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n", + be64_to_cpu(mad->buffer), vscsi->flags, flag_bits); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, + flag_bits); + goto free_dma; + } + + /* + * Copy client info, but ignore partition number, which we + * already got from phyp - unless we failed to get it from + * phyp (e.g. if we're running on a p5 system). + */ + if (vscsi->client_data.partition_number == 0) + vscsi->client_data.partition_number = + be32_to_cpu(info->partition_number); + strncpy(vscsi->client_data.srp_version, info->srp_version, + sizeof(vscsi->client_data.srp_version)); + strncpy(vscsi->client_data.partition_name, info->partition_name, + sizeof(vscsi->client_data.partition_name)); + vscsi->client_data.mad_version = be32_to_cpu(info->mad_version); + vscsi->client_data.os_type = be32_to_cpu(info->os_type); + + /* Copy our info */ + strncpy(info->srp_version, SRP_VERSION, + sizeof(info->srp_version)); + strncpy(info->partition_name, vscsi->dds.partition_name, + sizeof(info->partition_name)); + info->partition_number = cpu_to_be32(vscsi->dds.partition_num); + info->mad_version = cpu_to_be32(MAD_VERSION_1); + info->os_type = cpu_to_be32(LINUX); + memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); + info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); + + dma_wmb(); + rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, + token, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(mad->buffer)); + switch (rc) { + case H_SUCCESS: + break; + + case H_SOURCE_PARM: + case H_DEST_PARM: + case H_PERMISSION: + if (connection_broken(vscsi)) + flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); + default: + dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + flag_bits); + break; + } + +free_dma: + dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token); + pr_debug("Leaving adapter_info, rc %ld\n", rc); + + return rc; +} + +/** + * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram + * @vscsi: Pointer to our adapter structure + * @iue: Information Unit containing the Capabilities MAD request + * + * NOTE: if you return an error from this routine you must be + * disconnecting or you will cause a hang + * + * EXECUTION ENVIRONMENT: + * Interrupt called with adapter lock held + */ +static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) +{ + struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities; + struct capabilities *cap; + struct mad_capability_common *common; + dma_addr_t token; + u16 olen, len, status, min_len, cap_len; + u32 flag; + uint flag_bits = 0; + long rc = 0; + + olen = be16_to_cpu(mad->common.length); + /* + * struct capabilities hardcodes a couple capabilities after the + * header, but the capabilities can actually be in any order. + */ + min_len = offsetof(struct capabilities, migration); + if ((olen < min_len) || (olen > PAGE_SIZE)) { + pr_warn("cap_mad: invalid len %d\n", olen); + mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); + return 0; + } + + cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, + GFP_KERNEL); + if (!cap) { + dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", + iue->target); + mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); + return 0; + } + rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(mad->buffer), + vscsi->dds.window[LOCAL].liobn, token); + if (rc == H_SUCCESS) { + strncpy(cap->name, dev_name(&vscsi->dma_dev->dev), + SRP_MAX_LOC_LEN); + + len = olen - min_len; + status = VIOSRP_MAD_SUCCESS; + common = (struct mad_capability_common *)&cap->migration; + + while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) { + pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n", + len, be32_to_cpu(common->cap_type), + be16_to_cpu(common->length)); + + cap_len = be16_to_cpu(common->length); + if (cap_len > len) { + dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n"); + status = VIOSRP_MAD_FAILED; + break; + } + + if (cap_len == 0) { + dev_err(&vscsi->dev, "cap_mad: cap len is 0\n"); + status = VIOSRP_MAD_FAILED; + break; + } + + switch (common->cap_type) { + default: + pr_debug("cap_mad: unsupported capability\n"); + common->server_support = 0; + flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED); + cap->flags &= ~flag; + break; + } + + len = len - cap_len; + common = (struct mad_capability_common *) + ((char *)common + cap_len); + } + + mad->common.status = cpu_to_be16(status); + + dma_wmb(); + rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token, + vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(mad->buffer)); + + if (rc != H_SUCCESS) { + pr_debug("cap_mad: failed to copy to client, rc %ld\n", + rc); + + if (rc == H_PERMISSION) { + if (connection_broken(vscsi)) + flag_bits = (RESPONSE_Q_DOWN | + CLIENT_FAILED); + } + + pr_warn("cap_mad: error copying data to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + flag_bits); + } + } + + dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token); + + pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n", + rc, vscsi->client_cap); + + return rc; +} + +/** + * ibmvscsis_process_mad() - Service a MAnagement Data gram + * @vscsi: Pointer to our adapter structure + * @iue: Information Unit containing the MAD request + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue) +{ + struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; + struct viosrp_empty_iu *empty; + long rc = ADAPT_SUCCESS; + + switch (be32_to_cpu(mad->type)) { + case VIOSRP_EMPTY_IU_TYPE: + empty = &vio_iu(iue)->mad.empty_iu; + vscsi->empty_iu_id = be64_to_cpu(empty->buffer); + vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag); + mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); + break; + case VIOSRP_ADAPTER_INFO_TYPE: + rc = ibmvscsis_adapter_info(vscsi, iue); + break; + case VIOSRP_CAPABILITIES_TYPE: + rc = ibmvscsis_cap_mad(vscsi, iue); + break; + case VIOSRP_ENABLE_FAST_FAIL: + if (vscsi->state == CONNECTED) { + vscsi->fast_fail = true; + mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); + } else { + pr_warn("fast fail mad sent after login\n"); + mad->status = cpu_to_be16(VIOSRP_MAD_FAILED); + } + break; + default: + mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED); + break; + } + + return rc; +} + +/** + * srp_snd_msg_failed() - Handle an error when sending a response + * @vscsi: Pointer to our adapter structure + * @rc: The return code from the h_send_crq command + * + * Must be called with interrupt lock held. + */ +static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc) +{ + ktime_t kt; + + if (rc != H_DROPPED) { + ibmvscsis_free_cmd_qs(vscsi); + + if (rc == H_CLOSED) + vscsi->flags |= CLIENT_FAILED; + + /* don't flag the same problem multiple times */ + if (!(vscsi->flags & RESPONSE_Q_DOWN)) { + vscsi->flags |= RESPONSE_Q_DOWN; + if (!(vscsi->state & (ERR_DISCONNECT | + ERR_DISCONNECT_RECONNECT | + ERR_DISCONNECTED | UNDEFINED))) { + dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n", + vscsi->state, vscsi->flags, rc); + } + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + } + return; + } + + /* + * The response queue is full. + * If the server is processing SRP requests, i.e. + * the client has successfully done an + * SRP_LOGIN, then it will wait forever for room in + * the queue. However if the system admin + * is attempting to unconfigure the server then one + * or more children will be in a state where + * they are being removed. So if there is even one + * child being removed then the driver assumes + * the system admin is attempting to break the + * connection with the client and MAX_TIMER_POPS + * is honored. + */ + if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) || + (vscsi->state == SRP_PROCESSING)) { + pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n", + vscsi->flags, (int)vscsi->rsp_q_timer.started, + vscsi->rsp_q_timer.timer_pops); + + /* + * Check if the timer is running; if it + * is not then start it up. + */ + if (!vscsi->rsp_q_timer.started) { + if (vscsi->rsp_q_timer.timer_pops < + MAX_TIMER_POPS) { + kt = ktime_set(0, WAIT_NANO_SECONDS); + } else { + /* + * slide the timeslice if the maximum + * timer pops have already happened + */ + kt = ktime_set(WAIT_SECONDS, 0); + } + + vscsi->rsp_q_timer.started = true; + hrtimer_start(&vscsi->rsp_q_timer.timer, kt, + HRTIMER_MODE_REL); + } + } else { + /* + * TBD: Do we need to worry about this? Need to get + * remove working. + */ + /* + * waited a long time and it appears the system admin + * is bring this driver down + */ + vscsi->flags |= RESPONSE_Q_DOWN; + ibmvscsis_free_cmd_qs(vscsi); + /* + * if the driver is already attempting to disconnect + * from the client and has already logged an error + * trace this event but don't put it in the error log + */ + if (!(vscsi->state & (ERR_DISCONNECT | + ERR_DISCONNECT_RECONNECT | + ERR_DISCONNECTED | UNDEFINED))) { + dev_err(&vscsi->dev, "client crq full too long\n"); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + 0); + } + } +} + +/** + * ibmvscsis_send_messages() - Send a Response + * @vscsi: Pointer to our adapter structure + * + * Send a response, first checking the waiting queue. Responses are + * sent in order they are received. If the response cannot be sent, + * because the client queue is full, it stays on the waiting queue. + * + * PRECONDITION: + * Called with interrupt lock held + */ +static void ibmvscsis_send_messages(struct scsi_info *vscsi) +{ + u64 msg_hi = 0; + /* note do not attmempt to access the IU_data_ptr with this pointer + * it is not valid + */ + struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi; + struct ibmvscsis_cmd *cmd, *nxt; + struct iu_entry *iue; + long rc = ADAPT_SUCCESS; + + if (!(vscsi->flags & RESPONSE_Q_DOWN)) { + list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { + pr_debug("send_messages cmd %p\n", cmd); + + iue = cmd->iue; + + crq->valid = VALID_CMD_RESP_EL; + crq->format = cmd->rsp.format; + + if (cmd->flags & CMD_FAST_FAIL) + crq->status = VIOSRP_ADAPTER_FAIL; + + crq->IU_length = cpu_to_be16(cmd->rsp.len); + + rc = h_send_crq(vscsi->dma_dev->unit_address, + be64_to_cpu(msg_hi), + be64_to_cpu(cmd->rsp.tag)); + + pr_debug("send_messages: tag 0x%llx, rc %ld\n", + be64_to_cpu(cmd->rsp.tag), rc); + + /* if all ok free up the command element resources */ + if (rc == H_SUCCESS) { + /* some movement has occurred */ + vscsi->rsp_q_timer.timer_pops = 0; + list_del(&cmd->list); + + ibmvscsis_free_cmd_resources(vscsi, cmd); + } else { + srp_snd_msg_failed(vscsi, rc); + break; + } + } + + if (!rc) { + /* + * The timer could pop with the queue empty. If + * this happens, rc will always indicate a + * success; clear the pop count. + */ + vscsi->rsp_q_timer.timer_pops = 0; + } + } else { + ibmvscsis_free_cmd_qs(vscsi); + } +} + +/* Called with intr lock held */ +static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, + struct viosrp_crq *crq) +{ + struct iu_entry *iue = cmd->iue; + struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; + uint flag_bits = 0; + long rc; + + dma_wmb(); + rc = h_copy_rdma(sizeof(struct mad_common), + vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, + vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(crq->IU_data_ptr)); + if (!rc) { + cmd->rsp.format = VIOSRP_MAD_FORMAT; + cmd->rsp.len = sizeof(struct mad_common); + cmd->rsp.tag = mad->tag; + list_add_tail(&cmd->list, &vscsi->waiting_rsp); + ibmvscsis_send_messages(vscsi); + } else { + pr_debug("Error sending mad response, rc %ld\n", rc); + if (rc == H_PERMISSION) { + if (connection_broken(vscsi)) + flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); + } + dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n", + rc); + + ibmvscsis_free_cmd_resources(vscsi, cmd); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, + flag_bits); + } +} + +/** + * ibmvscsis_mad() - Service a MAnagement Data gram. + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to the CRQ entry containing the MAD request + * + * EXECUTION ENVIRONMENT: + * Interrupt called with adapter lock held + */ +static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) +{ + struct iu_entry *iue; + struct ibmvscsis_cmd *cmd; + struct mad_common *mad; + long rc = ADAPT_SUCCESS; + + switch (vscsi->state) { + /* + * We have not exchanged Init Msgs yet, so this MAD was sent + * before the last Transport Event; client will not be + * expecting a response. + */ + case WAIT_CONNECTION: + pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n", + vscsi->flags); + return ADAPT_SUCCESS; + + case SRP_PROCESSING: + case CONNECTED: + break; + + /* + * We should never get here while we're in these states. + * Just log an error and get out. + */ + case UNCONFIGURING: + case WAIT_IDLE: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + default: + dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n", + vscsi->state); + return ADAPT_SUCCESS; + } + + cmd = ibmvscsis_get_free_cmd(vscsi); + if (!cmd) { + dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n", + vscsi->debit); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + return ERROR; + } + iue = cmd->iue; + cmd->type = ADAPTER_MAD; + + rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); + if (!rc) { + mad = (struct mad_common *)&vio_iu(iue)->mad; + + pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); + + if (be16_to_cpu(mad->length) < 0) { + dev_err(&vscsi->dev, "mad: length is < 0\n"); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + rc = SRP_VIOLATION; + } else { + rc = ibmvscsis_process_mad(vscsi, iue); + } + + pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), + rc); + + if (!rc) + ibmvscsis_send_mad_resp(vscsi, cmd, crq); + } else { + ibmvscsis_free_cmd_resources(vscsi, cmd); + } + + pr_debug("Leaving mad, rc %ld\n", rc); + return rc; +} + +/** + * ibmvscsis_login_rsp() - Create/copy a login response notice to the client + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to the command for the SRP Login request + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_login_rsp(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp; + struct format_code *fmt; + uint flag_bits = 0; + long rc = ADAPT_SUCCESS; + + memset(rsp, 0, sizeof(struct srp_login_rsp)); + + rsp->opcode = SRP_LOGIN_RSP; + rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit); + rsp->tag = cmd->rsp.tag; + rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); + rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); + fmt = (struct format_code *)&rsp->buf_fmt; + fmt->buffers = SUPPORTED_FORMATS; + vscsi->credit = 0; + + cmd->rsp.len = sizeof(struct srp_login_rsp); + + dma_wmb(); + rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, + iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(iue->remote_token)); + + switch (rc) { + case H_SUCCESS: + break; + + case H_PERMISSION: + if (connection_broken(vscsi)) + flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; + dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, + flag_bits); + break; + case H_SOURCE_PARM: + case H_DEST_PARM: + default: + dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to the command for the SRP Login request + * @reason: The reason the SRP Login is being rejected, per SRP protocol + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, u32 reason) +{ + struct iu_entry *iue = cmd->iue; + struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej; + struct format_code *fmt; + uint flag_bits = 0; + long rc = ADAPT_SUCCESS; + + memset(rej, 0, sizeof(*rej)); + + rej->opcode = SRP_LOGIN_REJ; + rej->reason = cpu_to_be32(reason); + rej->tag = cmd->rsp.tag; + fmt = (struct format_code *)&rej->buf_fmt; + fmt->buffers = SUPPORTED_FORMATS; + + cmd->rsp.len = sizeof(*rej); + + dma_wmb(); + rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, + iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(iue->remote_token)); + + switch (rc) { + case H_SUCCESS: + break; + case H_PERMISSION: + if (connection_broken(vscsi)) + flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; + dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, + flag_bits); + break; + case H_SOURCE_PARM: + case H_DEST_PARM: + default: + dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport) +{ + char *name = tport->tport_name; + struct ibmvscsis_nexus *nexus; + int rc; + + if (tport->ibmv_nexus) { + pr_debug("tport->ibmv_nexus already exists\n"); + return 0; + } + + nexus = kzalloc(sizeof(*nexus), GFP_KERNEL); + if (!nexus) { + pr_err("Unable to allocate struct ibmvscsis_nexus\n"); + return -ENOMEM; + } + + nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0, + TARGET_PROT_NORMAL, name, nexus, + NULL); + if (IS_ERR(nexus->se_sess)) { + rc = PTR_ERR(nexus->se_sess); + goto transport_init_fail; + } + + tport->ibmv_nexus = nexus; + + return 0; + +transport_init_fail: + kfree(nexus); + return rc; +} + +static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport) +{ + struct se_session *se_sess; + struct ibmvscsis_nexus *nexus; + + nexus = tport->ibmv_nexus; + if (!nexus) + return -ENODEV; + + se_sess = nexus->se_sess; + if (!se_sess) + return -ENODEV; + + /* + * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port + */ + transport_deregister_session(se_sess); + tport->ibmv_nexus = NULL; + kfree(nexus); + + return 0; +} + +/** + * ibmvscsis_srp_login() - Process an SRP Login Request + * @vscsi: Pointer to our adapter structure + * @cmd: Command element to use to process the SRP Login request + * @crq: Pointer to CRQ entry containing the SRP Login request + * + * EXECUTION ENVIRONMENT: + * Interrupt, called with interrupt lock held + */ +static long ibmvscsis_srp_login(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, + struct viosrp_crq *crq) +{ + struct iu_entry *iue = cmd->iue; + struct srp_login_req *req = &vio_iu(iue)->srp.login_req; + struct port_id { + __be64 id_extension; + __be64 io_guid; + } *iport, *tport; + struct format_code *fmt; + u32 reason = 0x0; + long rc = ADAPT_SUCCESS; + + iport = (struct port_id *)req->initiator_port_id; + tport = (struct port_id *)req->target_port_id; + fmt = (struct format_code *)&req->req_buf_fmt; + if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN) + reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE; + else if (be32_to_cpu(req->req_it_iu_len) < 64) + reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; + else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) || + (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1))) + reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL; + else if (req->req_flags & SRP_MULTICHAN_MULTI) + reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED; + else if (fmt->buffers & (~SUPPORTED_FORMATS)) + reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; + else if ((fmt->buffers | SUPPORTED_FORMATS) == 0) + reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; + + if (vscsi->state == SRP_PROCESSING) + reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED; + + rc = ibmvscsis_make_nexus(&vscsi->tport); + if (rc) + reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; + + cmd->rsp.format = VIOSRP_SRP_FORMAT; + cmd->rsp.tag = req->tag; + + pr_debug("srp_login: reason 0x%x\n", reason); + + if (reason) + rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason); + else + rc = ibmvscsis_login_rsp(vscsi, cmd); + + if (!rc) { + if (!reason) + vscsi->state = SRP_PROCESSING; + + list_add_tail(&cmd->list, &vscsi->waiting_rsp); + ibmvscsis_send_messages(vscsi); + } else { + ibmvscsis_free_cmd_resources(vscsi, cmd); + } + + pr_debug("Leaving srp_login, rc %ld\n", rc); + return rc; +} + +/** + * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus + * @vscsi: Pointer to our adapter structure + * @cmd: Command element to use to process the Implicit Logout request + * @crq: Pointer to CRQ entry containing the Implicit Logout request + * + * Do the logic to close the I_T nexus. This function may not + * behave to specification. + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, + struct viosrp_crq *crq) +{ + struct iu_entry *iue = cmd->iue; + struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout; + long rc = ADAPT_SUCCESS; + + if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) || + !list_empty(&vscsi->waiting_rsp)) { + dev_err(&vscsi->dev, "i_logout: outstanding work\n"); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + } else { + cmd->rsp.format = SRP_FORMAT; + cmd->rsp.tag = log_out->tag; + cmd->rsp.len = sizeof(struct mad_common); + list_add_tail(&cmd->list, &vscsi->waiting_rsp); + ibmvscsis_send_messages(vscsi); + + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); + } + + return rc; +} + +/* Called with intr lock held */ +static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) +{ + struct ibmvscsis_cmd *cmd; + struct iu_entry *iue; + struct srp_cmd *srp; + struct srp_tsk_mgmt *tsk; + long rc; + + if (vscsi->request_limit - vscsi->debit <= 0) { + /* Client has exceeded request limit */ + dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n", + vscsi->request_limit, vscsi->debit); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + return; + } + + cmd = ibmvscsis_get_free_cmd(vscsi); + if (!cmd) { + dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n", + vscsi->debit); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + return; + } + iue = cmd->iue; + srp = &vio_iu(iue)->srp.cmd; + + rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); + if (rc) { + ibmvscsis_free_cmd_resources(vscsi, cmd); + return; + } + + if (vscsi->state == SRP_PROCESSING) { + switch (srp->opcode) { + case SRP_LOGIN_REQ: + rc = ibmvscsis_srp_login(vscsi, cmd, crq); + break; + + case SRP_TSK_MGMT: + tsk = &vio_iu(iue)->srp.tsk_mgmt; + pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, + tsk->tag); + cmd->rsp.tag = tsk->tag; + vscsi->debit += 1; + cmd->type = TASK_MANAGEMENT; + list_add_tail(&cmd->list, &vscsi->schedule_q); + queue_work(vscsi->work_q, &cmd->work); + break; + + case SRP_CMD: + pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, + srp->tag); + cmd->rsp.tag = srp->tag; + vscsi->debit += 1; + cmd->type = SCSI_CDB; + /* + * We want to keep track of work waiting for + * the workqueue. + */ + list_add_tail(&cmd->list, &vscsi->schedule_q); + queue_work(vscsi->work_q, &cmd->work); + break; + + case SRP_I_LOGOUT: + rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); + break; + + case SRP_CRED_RSP: + case SRP_AER_RSP: + default: + ibmvscsis_free_cmd_resources(vscsi, cmd); + dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", + (uint)srp->opcode); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + break; + } + } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { + rc = ibmvscsis_srp_login(vscsi, cmd, crq); + } else { + ibmvscsis_free_cmd_resources(vscsi, cmd); + dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", + vscsi->state); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + } +} + +/** + * ibmvscsis_ping_response() - Respond to a ping request + * @vscsi: Pointer to our adapter structure + * + * Let the client know that the server is alive and waiting on + * its native I/O stack. + * If any type of error occurs from the call to queue a ping + * response then the client is either not accepting or receiving + * interrupts. Disconnect with an error. + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_ping_response(struct scsi_info *vscsi) +{ + struct viosrp_crq *crq; + u64 buffer[2] = { 0, 0 }; + long rc; + + crq = (struct viosrp_crq *)&buffer; + crq->valid = VALID_CMD_RESP_EL; + crq->format = (u8)MESSAGE_IN_CRQ; + crq->status = PING_RESPONSE; + + rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), + cpu_to_be64(buffer[MSG_LOW])); + + switch (rc) { + case H_SUCCESS: + break; + case H_CLOSED: + vscsi->flags |= CLIENT_FAILED; + case H_DROPPED: + vscsi->flags |= RESPONSE_Q_DOWN; + case H_REMOTE_PARM: + dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + default: + dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + + switch (vscsi->state) { + case NO_QUEUE: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case ERR_DISCONNECTED: + case UNCONFIGURING: + case UNDEFINED: + rc = ERROR; + break; + + case WAIT_CONNECTION: + vscsi->state = CONNECTED; + break; + + case WAIT_IDLE: + case SRP_PROCESSING: + case CONNECTED: + case WAIT_ENABLED: + case PART_UP_WAIT_ENAB: + default: + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", + vscsi->state); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_handle_init_msg() - Respond to an Init Message + * @vscsi: Pointer to our adapter structure + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + + switch (vscsi->state) { + case WAIT_ENABLED: + vscsi->state = PART_UP_WAIT_ENAB; + break; + + case WAIT_CONNECTION: + rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); + switch (rc) { + case H_SUCCESS: + vscsi->state = CONNECTED; + break; + + case H_PARAMETER: + dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", + rc); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); + break; + + case H_DROPPED: + dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", + rc); + rc = ERROR; + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + break; + + case H_CLOSED: + pr_warn("init_msg: failed to send, rc %ld\n", rc); + rc = 0; + break; + } + break; + + case UNDEFINED: + rc = ERROR; + break; + + case UNCONFIGURING: + break; + + case PART_UP_WAIT_ENAB: + case CONNECTED: + case SRP_PROCESSING: + case WAIT_IDLE: + case NO_QUEUE: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case ERR_DISCONNECTED: + default: + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", + vscsi->state); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + return rc; +} + +/** + * ibmvscsis_init_msg() - Respond to an init message + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to CRQ element containing the Init Message + * + * EXECUTION ENVIRONMENT: + * Interrupt, interrupt lock held + */ +static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) +{ + long rc = ADAPT_SUCCESS; + + pr_debug("init_msg: state 0x%hx\n", vscsi->state); + + rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, + (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, + 0); + if (rc == H_SUCCESS) { + vscsi->client_data.partition_number = + be64_to_cpu(*(u64 *)vscsi->map_buf); + pr_debug("init_msg, part num %d\n", + vscsi->client_data.partition_number); + } else { + pr_debug("init_msg h_vioctl rc %ld\n", rc); + rc = ADAPT_SUCCESS; + } + + if (crq->format == INIT_MSG) { + rc = ibmvscsis_handle_init_msg(vscsi); + } else if (crq->format == INIT_COMPLETE_MSG) { + rc = ibmvscsis_handle_init_compl_msg(vscsi); + } else { + rc = ERROR; + dev_err(&vscsi->dev, "init_msg: invalid format %d\n", + (uint)crq->format); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + } + + return rc; +} + +/** + * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue. + * @vscsi: Pointer to our adapter structure + * @crq: Pointer to CRQ element containing the SRP request + * + * This function will return success if the command queue element is valid + * and the srp iu or MAD request it pointed to was also valid. That does + * not mean that an error was not returned to the client. + * + * EXECUTION ENVIRONMENT: + * Interrupt, intr lock held + */ +static long ibmvscsis_parse_command(struct scsi_info *vscsi, + struct viosrp_crq *crq) +{ + long rc = ADAPT_SUCCESS; + + switch (crq->valid) { + case VALID_CMD_RESP_EL: + switch (crq->format) { + case OS400_FORMAT: + case AIX_FORMAT: + case LINUX_FORMAT: + case MAD_FORMAT: + if (vscsi->flags & PROCESSING_MAD) { + rc = ERROR; + dev_err(&vscsi->dev, "parse_command: already processing mad\n"); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + 0); + } else { + vscsi->flags |= PROCESSING_MAD; + rc = ibmvscsis_mad(vscsi, crq); + } + break; + + case SRP_FORMAT: + ibmvscsis_srp_cmd(vscsi, crq); + break; + + case MESSAGE_IN_CRQ: + if (crq->status == PING) + ibmvscsis_ping_response(vscsi); + break; + + default: + dev_err(&vscsi->dev, "parse_command: invalid format %d\n", + (uint)crq->format); + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + break; + } + break; + + case VALID_TRANS_EVENT: + rc = ibmvscsis_trans_event(vscsi, crq); + break; + + case VALID_INIT_MSG: + rc = ibmvscsis_init_msg(vscsi, crq); + break; + + default: + dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n", + (uint)crq->valid); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + break; + } + + /* + * Return only what the interrupt handler cares + * about. Most errors we keep right on trucking. + */ + rc = vscsi->flags & SCHEDULE_DISCONNECT; + + return rc; +} + +static int read_dma_window(struct scsi_info *vscsi) +{ + struct vio_dev *vdev = vscsi->dma_dev; + const __be32 *dma_window; + const __be32 *prop; + + /* TODO Using of_parse_dma_window would be better, but it doesn't give + * a way to read multiple windows without already knowing the size of + * a window or the number of windows. + */ + dma_window = (const __be32 *)vio_get_attribute(vdev, + "ibm,my-dma-window", + NULL); + if (!dma_window) { + pr_err("Couldn't find ibm,my-dma-window property\n"); + return -1; + } + + vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window); + dma_window++; + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", + NULL); + if (!prop) { + pr_warn("Couldn't find ibm,#dma-address-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", + NULL); + if (!prop) { + pr_warn("Couldn't find ibm,#dma-size-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + /* dma_window should point to the second window now */ + vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window); + + return 0; +} + +static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name) +{ + struct ibmvscsis_tport *tport = NULL; + struct vio_dev *vdev; + struct scsi_info *vscsi; + + spin_lock_bh(&ibmvscsis_dev_lock); + list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) { + vdev = vscsi->dma_dev; + if (!strcmp(dev_name(&vdev->dev), name)) { + tport = &vscsi->tport; + break; + } + } + spin_unlock_bh(&ibmvscsis_dev_lock); + + return tport; +} + +/** + * ibmvscsis_parse_cmd() - Parse SRP Command + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to command element with SRP command + * + * Parse the srp command; if it is valid then submit it to tcm. + * Note: The return code does not reflect the status of the SCSI CDB. + * + * EXECUTION ENVIRONMENT: + * Process level + */ +static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; + struct ibmvscsis_nexus *nexus; + u64 data_len = 0; + enum dma_data_direction dir; + int attr = 0; + int rc = 0; + + nexus = vscsi->tport.ibmv_nexus; + /* + * additional length in bytes. Note that the SRP spec says that + * additional length is in 4-byte words, but technically the + * additional length field is only the upper 6 bits of the byte. + * The lower 2 bits are reserved. If the lower 2 bits are 0 (as + * all reserved fields should be), then interpreting the byte as + * an int will yield the length in bytes. + */ + if (srp->add_cdb_len & 0x03) { + dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n"); + spin_lock_bh(&vscsi->intr_lock); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + ibmvscsis_free_cmd_resources(vscsi, cmd); + spin_unlock_bh(&vscsi->intr_lock); + return; + } + + if (srp_get_desc_table(srp, &dir, &data_len)) { + dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", + srp->tag); + goto fail; + return; + } + + cmd->rsp.sol_not = srp->sol_not; + + switch (srp->task_attr) { + case SRP_SIMPLE_TASK: + attr = TCM_SIMPLE_TAG; + break; + case SRP_ORDERED_TASK: + attr = TCM_ORDERED_TAG; + break; + case SRP_HEAD_TASK: + attr = TCM_HEAD_TAG; + break; + case SRP_ACA_TASK: + attr = TCM_ACA_TAG; + break; + default: + dev_err(&vscsi->dev, "Invalid task attribute %d\n", + srp->task_attr); + goto fail; + } + + cmd->se_cmd.tag = be64_to_cpu(srp->tag); + + spin_lock_bh(&vscsi->intr_lock); + list_add_tail(&cmd->list, &vscsi->active_q); + spin_unlock_bh(&vscsi->intr_lock); + + srp->lun.scsi_lun[0] &= 0x3f; + + pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n", + &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0], + attr); + + rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb, + cmd->sense_buf, scsilun_to_int(&srp->lun), + data_len, attr, dir, 0); + if (rc) { + dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); + goto fail; + } + return; + +fail: + spin_lock_bh(&vscsi->intr_lock); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + spin_unlock_bh(&vscsi->intr_lock); +} + +/** + * ibmvscsis_parse_task() - Parse SRP Task Management Request + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to command element with SRP task management request + * + * Parse the srp task management request; if it is valid then submit it to tcm. + * Note: The return code does not reflect the status of the task management + * request. + * + * EXECUTION ENVIRONMENT: + * Processor level + */ +static void ibmvscsis_parse_task(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; + int tcm_type; + u64 tag_to_abort = 0; + int rc = 0; + struct ibmvscsis_nexus *nexus; + + nexus = vscsi->tport.ibmv_nexus; + + cmd->rsp.sol_not = srp_tsk->sol_not; + + switch (srp_tsk->tsk_mgmt_func) { + case SRP_TSK_ABORT_TASK: + tcm_type = TMR_ABORT_TASK; + tag_to_abort = be64_to_cpu(srp_tsk->task_tag); + break; + case SRP_TSK_ABORT_TASK_SET: + tcm_type = TMR_ABORT_TASK_SET; + break; + case SRP_TSK_CLEAR_TASK_SET: + tcm_type = TMR_CLEAR_TASK_SET; + break; + case SRP_TSK_LUN_RESET: + tcm_type = TMR_LUN_RESET; + break; + case SRP_TSK_CLEAR_ACA: + tcm_type = TMR_CLEAR_ACA; + break; + default: + dev_err(&vscsi->dev, "unknown task mgmt func %d\n", + srp_tsk->tsk_mgmt_func); + cmd->se_cmd.se_tmr_req->response = + TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; + rc = -1; + break; + } + + if (!rc) { + cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag); + + spin_lock_bh(&vscsi->intr_lock); + list_add_tail(&cmd->list, &vscsi->active_q); + spin_unlock_bh(&vscsi->intr_lock); + + srp_tsk->lun.scsi_lun[0] &= 0x3f; + + pr_debug("calling submit_tmr, func %d\n", + srp_tsk->tsk_mgmt_func); + rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL, + scsilun_to_int(&srp_tsk->lun), srp_tsk, + tcm_type, GFP_KERNEL, tag_to_abort, 0); + if (rc) { + dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", + rc); + cmd->se_cmd.se_tmr_req->response = + TMR_FUNCTION_REJECTED; + } + } + + if (rc) + transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0); +} + +static void ibmvscsis_scheduler(struct work_struct *work) +{ + struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd, + work); + struct scsi_info *vscsi = cmd->adapter; + + spin_lock_bh(&vscsi->intr_lock); + + /* Remove from schedule_q */ + list_del(&cmd->list); + + /* Don't submit cmd if we're disconnecting */ + if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) { + ibmvscsis_free_cmd_resources(vscsi, cmd); + + /* ibmvscsis_disconnect might be waiting for us */ + if (list_empty(&vscsi->active_q) && + list_empty(&vscsi->schedule_q) && + (vscsi->flags & WAIT_FOR_IDLE)) { + vscsi->flags &= ~WAIT_FOR_IDLE; + complete(&vscsi->wait_idle); + } + + spin_unlock_bh(&vscsi->intr_lock); + return; + } + + spin_unlock_bh(&vscsi->intr_lock); + + switch (cmd->type) { + case SCSI_CDB: + ibmvscsis_parse_cmd(vscsi, cmd); + break; + case TASK_MANAGEMENT: + ibmvscsis_parse_task(vscsi, cmd); + break; + default: + dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n", + cmd->type); + spin_lock_bh(&vscsi->intr_lock); + ibmvscsis_free_cmd_resources(vscsi, cmd); + spin_unlock_bh(&vscsi->intr_lock); + break; + } +} + +static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num) +{ + struct ibmvscsis_cmd *cmd; + int i; + + INIT_LIST_HEAD(&vscsi->free_cmd); + vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd), + GFP_KERNEL); + if (!vscsi->cmd_pool) + return -ENOMEM; + + for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num; + i++, cmd++) { + cmd->adapter = vscsi; + INIT_WORK(&cmd->work, ibmvscsis_scheduler); + list_add_tail(&cmd->list, &vscsi->free_cmd); + } + + return 0; +} + +static void ibmvscsis_free_cmds(struct scsi_info *vscsi) +{ + kfree(vscsi->cmd_pool); + vscsi->cmd_pool = NULL; + INIT_LIST_HEAD(&vscsi->free_cmd); +} + +/** + * ibmvscsis_service_wait_q() - Service Waiting Queue + * @timer: Pointer to timer which has expired + * + * This routine is called when the timer pops to service the waiting + * queue. Elements on the queue have completed, their responses have been + * copied to the client, but the client's response queue was full so + * the queue message could not be sent. The routine grabs the proper locks + * and calls send messages. + * + * EXECUTION ENVIRONMENT: + * called at interrupt level + */ +static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer) +{ + struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer); + struct scsi_info *vscsi = container_of(p_timer, struct scsi_info, + rsp_q_timer); + + spin_lock_bh(&vscsi->intr_lock); + p_timer->timer_pops += 1; + p_timer->started = false; + ibmvscsis_send_messages(vscsi); + spin_unlock_bh(&vscsi->intr_lock); + + return HRTIMER_NORESTART; +} + +static long ibmvscsis_alloctimer(struct scsi_info *vscsi) +{ + struct timer_cb *p_timer; + + p_timer = &vscsi->rsp_q_timer; + hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + p_timer->timer.function = ibmvscsis_service_wait_q; + p_timer->started = false; + p_timer->timer_pops = 0; + + return ADAPT_SUCCESS; +} + +static void ibmvscsis_freetimer(struct scsi_info *vscsi) +{ + struct timer_cb *p_timer; + + p_timer = &vscsi->rsp_q_timer; + + (void)hrtimer_cancel(&p_timer->timer); + + p_timer->started = false; + p_timer->timer_pops = 0; +} + +static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) +{ + struct scsi_info *vscsi = data; + + vio_disable_interrupts(vscsi->dma_dev); + tasklet_schedule(&vscsi->work_task); + + return IRQ_HANDLED; +} + +/** + * ibmvscsis_check_q() - Helper function to Check Init Message Valid + * @vscsi: Pointer to our adapter structure + * + * Checks if a initialize message was queued by the initiatior + * while the timing window was open. This function is called from + * probe after the CRQ is created and interrupts are enabled. + * It would only be used by adapters who wait for some event before + * completing the init handshake with the client. For ibmvscsi, this + * event is waiting for the port to be enabled. + * + * EXECUTION ENVIRONMENT: + * Process level only, interrupt lock held + */ +static long ibmvscsis_check_q(struct scsi_info *vscsi) +{ + uint format; + long rc; + + rc = ibmvscsis_check_init_msg(vscsi, &format); + if (rc) + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); + else if (format == UNUSED_FORMAT) + vscsi->state = WAIT_ENABLED; + else + vscsi->state = PART_UP_WAIT_ENAB; + + return rc; +} + +/** + * ibmvscsis_enable_change_state() - Set new state based on enabled status + * @vscsi: Pointer to our adapter structure + * + * This function determines our new state now that we are enabled. This + * may involve sending an Init Complete message to the client. + * + * Must be called with interrupt lock held. + */ +static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) +{ + long rc = ADAPT_SUCCESS; + +handle_state_change: + switch (vscsi->state) { + case WAIT_ENABLED: + rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); + switch (rc) { + case H_SUCCESS: + case H_DROPPED: + case H_CLOSED: + vscsi->state = WAIT_CONNECTION; + rc = ADAPT_SUCCESS; + break; + + case H_PARAMETER: + break; + + case H_HARDWARE: + break; + + default: + vscsi->state = UNDEFINED; + rc = H_HARDWARE; + break; + } + break; + case PART_UP_WAIT_ENAB: + rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); + switch (rc) { + case H_SUCCESS: + vscsi->state = CONNECTED; + rc = ADAPT_SUCCESS; + break; + + case H_DROPPED: + case H_CLOSED: + vscsi->state = WAIT_ENABLED; + goto handle_state_change; + + case H_PARAMETER: + break; + + case H_HARDWARE: + break; + + default: + rc = H_HARDWARE; + break; + } + break; + + case WAIT_CONNECTION: + case WAIT_IDLE: + case SRP_PROCESSING: + case CONNECTED: + rc = ADAPT_SUCCESS; + break; + /* should not be able to get here */ + case UNCONFIGURING: + rc = ERROR; + vscsi->state = UNDEFINED; + break; + + /* driver should never allow this to happen */ + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + default: + dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n", + vscsi->state); + rc = ADAPT_SUCCESS; + break; + } + + return rc; +} + +/** + * ibmvscsis_create_command_q() - Create Command Queue + * @vscsi: Pointer to our adapter structure + * @num_cmds: Currently unused. In the future, may be used to determine + * the size of the CRQ. + * + * Allocates memory for command queue maps remote memory into an ioba + * initializes the command response queue + * + * EXECUTION ENVIRONMENT: + * Process level only + */ +static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) +{ + long rc = 0; + int pages; + struct vio_dev *vdev = vscsi->dma_dev; + + /* We might support multiple pages in the future, but just 1 for now */ + pages = 1; + + vscsi->cmd_q.size = pages; + + vscsi->cmd_q.base_addr = + (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); + if (!vscsi->cmd_q.base_addr) + return -ENOMEM; + + vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1; + + vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev, + vscsi->cmd_q.base_addr, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) { + free_page((unsigned long)vscsi->cmd_q.base_addr); + return -ENOMEM; + } + + rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE); + if (rc) { + if (rc == H_CLOSED) { + vscsi->state = WAIT_ENABLED; + rc = 0; + } else { + dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token, + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)vscsi->cmd_q.base_addr); + rc = -ENODEV; + } + } else { + vscsi->state = WAIT_ENABLED; + } + + return rc; +} + +/** + * ibmvscsis_destroy_command_q - Destroy Command Queue + * @vscsi: Pointer to our adapter structure + * + * Releases memory for command queue and unmaps mapped remote memory. + * + * EXECUTION ENVIRONMENT: + * Process level only + */ +static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi) +{ + dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token, + PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)vscsi->cmd_q.base_addr); + vscsi->cmd_q.base_addr = NULL; + vscsi->state = NO_QUEUE; +} + +static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd) +{ + struct iu_entry *iue = cmd->iue; + struct se_cmd *se_cmd = &cmd->se_cmd; + struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; + struct scsi_sense_hdr sshdr; + u8 rc = se_cmd->scsi_status; + + if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb))) + if (scsi_normalize_sense(se_cmd->sense_buffer, + se_cmd->scsi_sense_length, &sshdr)) + if (sshdr.sense_key == HARDWARE_ERROR && + (se_cmd->residual_count == 0 || + se_cmd->residual_count == se_cmd->data_length)) { + rc = NO_SENSE; + cmd->flags |= CMD_FAST_FAIL; + } + + return rc; +} + +/** + * srp_build_response() - Build an SRP response buffer + * @vscsi: Pointer to our adapter structure + * @cmd: Pointer to command for which to send the response + * @len_p: Where to return the length of the IU response sent. This + * is needed to construct the CRQ response. + * + * Build the SRP response buffer and copy it to the client's memory space. + */ +static long srp_build_response(struct scsi_info *vscsi, + struct ibmvscsis_cmd *cmd, uint *len_p) +{ + struct iu_entry *iue = cmd->iue; + struct se_cmd *se_cmd = &cmd->se_cmd; + struct srp_rsp *rsp; + uint len; + u32 rsp_code; + char *data; + u32 *tsk_status; + long rc = ADAPT_SUCCESS; + + spin_lock_bh(&vscsi->intr_lock); + + rsp = &vio_iu(iue)->srp.rsp; + len = sizeof(*rsp); + memset(rsp, 0, len); + data = rsp->data; + + rsp->opcode = SRP_RSP; + + if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) + rsp->req_lim_delta = cpu_to_be32(vscsi->credit); + else + rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); + rsp->tag = cmd->rsp.tag; + rsp->flags = 0; + + if (cmd->type == SCSI_CDB) { + rsp->status = ibmvscsis_fast_fail(vscsi, cmd); + if (rsp->status) { + pr_debug("build_resp: cmd %p, scsi status %d\n", cmd, + (int)rsp->status); + ibmvscsis_determine_resid(se_cmd, rsp); + if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) { + rsp->sense_data_len = + cpu_to_be32(se_cmd->scsi_sense_length); + rsp->flags |= SRP_RSP_FLAG_SNSVALID; + len += se_cmd->scsi_sense_length; + memcpy(data, se_cmd->sense_buffer, + se_cmd->scsi_sense_length); + } + rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> + UCSOLNT_RESP_SHIFT; + } else if (cmd->flags & CMD_FAST_FAIL) { + pr_debug("build_resp: cmd %p, fast fail\n", cmd); + rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> + UCSOLNT_RESP_SHIFT; + } else { + rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> + SCSOLNT_RESP_SHIFT; + } + } else { + /* this is task management */ + rsp->status = 0; + rsp->resp_data_len = cpu_to_be32(4); + rsp->flags |= SRP_RSP_FLAG_RSPVALID; + + switch (se_cmd->se_tmr_req->response) { + case TMR_FUNCTION_COMPLETE: + case TMR_TASK_DOES_NOT_EXIST: + rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE; + rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> + SCSOLNT_RESP_SHIFT; + break; + case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: + case TMR_LUN_DOES_NOT_EXIST: + rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED; + rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> + UCSOLNT_RESP_SHIFT; + break; + case TMR_FUNCTION_FAILED: + case TMR_FUNCTION_REJECTED: + default: + rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED; + rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> + UCSOLNT_RESP_SHIFT; + break; + } + + tsk_status = (u32 *)data; + *tsk_status = cpu_to_be32(rsp_code); + data = (char *)(tsk_status + 1); + len += 4; + } + + dma_wmb(); + rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, + vscsi->dds.window[REMOTE].liobn, + be64_to_cpu(iue->remote_token)); + + switch (rc) { + case H_SUCCESS: + vscsi->credit = 0; + *len_p = len; + break; + case H_PERMISSION: + if (connection_broken(vscsi)) + vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED; + + dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n", + rc, vscsi->flags, vscsi->state); + break; + case H_SOURCE_PARM: + case H_DEST_PARM: + default: + dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n", + rc); + break; + } + + spin_unlock_bh(&vscsi->intr_lock); + + return rc; +} + +static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg, + int nsg, struct srp_direct_buf *md, int nmd, + enum dma_data_direction dir, unsigned int bytes) +{ + struct iu_entry *iue = cmd->iue; + struct srp_target *target = iue->target; + struct scsi_info *vscsi = target->ldata; + struct scatterlist *sgp; + dma_addr_t client_ioba, server_ioba; + ulong buf_len; + ulong client_len, server_len; + int md_idx; + long tx_len; + long rc = 0; + + pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes); + + if (bytes == 0) + return 0; + + sgp = sg; + client_len = 0; + server_len = 0; + md_idx = 0; + tx_len = bytes; + + do { + if (client_len == 0) { + if (md_idx >= nmd) { + dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n"); + rc = -EIO; + break; + } + client_ioba = be64_to_cpu(md[md_idx].va); + client_len = be32_to_cpu(md[md_idx].len); + } + if (server_len == 0) { + if (!sgp) { + dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n"); + rc = -EIO; + break; + } + server_ioba = sg_dma_address(sgp); + server_len = sg_dma_len(sgp); + } + + buf_len = tx_len; + + if (buf_len > client_len) + buf_len = client_len; + + if (buf_len > server_len) + buf_len = server_len; + + if (buf_len > max_vdma_size) + buf_len = max_vdma_size; + + if (dir == DMA_TO_DEVICE) { + /* read from client */ + rc = h_copy_rdma(buf_len, + vscsi->dds.window[REMOTE].liobn, + client_ioba, + vscsi->dds.window[LOCAL].liobn, + server_ioba); + } else { + /* write to client */ + struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; + + if (!READ_CMD(srp->cdb)) + print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE, + sg_virt(sgp), buf_len); + /* The h_copy_rdma will cause phyp, running in another + * partition, to read memory, so we need to make sure + * the data has been written out, hence these syncs. + */ + /* ensure that everything is in memory */ + isync(); + /* ensure that memory has been made visible */ + dma_wmb(); + rc = h_copy_rdma(buf_len, + vscsi->dds.window[LOCAL].liobn, + server_ioba, + vscsi->dds.window[REMOTE].liobn, + client_ioba); + } + switch (rc) { + case H_SUCCESS: + break; + case H_PERMISSION: + case H_SOURCE_PARM: + case H_DEST_PARM: + if (connection_broken(vscsi)) { + spin_lock_bh(&vscsi->intr_lock); + vscsi->flags |= + (RESPONSE_Q_DOWN | CLIENT_FAILED); + spin_unlock_bh(&vscsi->intr_lock); + } + dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n", + rc); + break; + + default: + dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n", + rc); + break; + } + + if (!rc) { + tx_len -= buf_len; + if (tx_len) { + client_len -= buf_len; + if (client_len == 0) + md_idx++; + else + client_ioba += buf_len; + + server_len -= buf_len; + if (server_len == 0) + sgp = sg_next(sgp); + else + server_ioba += buf_len; + } else { + break; + } + } + } while (!rc); + + return rc; +} + +/** + * ibmvscsis_handle_crq() - Handle CRQ + * @data: Pointer to our adapter structure + * + * Read the command elements from the command queue and copy the payloads + * associated with the command elements to local memory and execute the + * SRP requests. + * + * Note: this is an edge triggered interrupt. It can not be shared. + */ +static void ibmvscsis_handle_crq(unsigned long data) +{ + struct scsi_info *vscsi = (struct scsi_info *)data; + struct viosrp_crq *crq; + long rc; + bool ack = true; + volatile u8 valid; + + spin_lock_bh(&vscsi->intr_lock); + + pr_debug("got interrupt\n"); + + /* + * if we are in a path where we are waiting for all pending commands + * to complete because we received a transport event and anything in + * the command queue is for a new connection, do nothing + */ + if (TARGET_STOP(vscsi)) { + vio_enable_interrupts(vscsi->dma_dev); + + pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n", + vscsi->flags, vscsi->state); + spin_unlock_bh(&vscsi->intr_lock); + return; + } + + rc = vscsi->flags & SCHEDULE_DISCONNECT; + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + valid = crq->valid; + dma_rmb(); + + while (valid) { + /* + * These are edege triggered interrupts. After dropping out of + * the while loop, the code must check for work since an + * interrupt could be lost, and an elment be left on the queue, + * hence the label. + */ +cmd_work: + vscsi->cmd_q.index = + (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; + + if (!rc) { + rc = ibmvscsis_parse_command(vscsi, crq); + } else { + if ((uint)crq->valid == VALID_TRANS_EVENT) { + /* + * must service the transport layer events even + * in an error state, dont break out until all + * the consecutive transport events have been + * processed + */ + rc = ibmvscsis_trans_event(vscsi, crq); + } else if (vscsi->flags & TRANS_EVENT) { + /* + * if a tranport event has occurred leave + * everything but transport events on the queue + */ + pr_debug("handle_crq, ignoring\n"); + + /* + * need to decrement the queue index so we can + * look at the elment again + */ + if (vscsi->cmd_q.index) + vscsi->cmd_q.index -= 1; + else + /* + * index is at 0 it just wrapped. + * have it index last element in q + */ + vscsi->cmd_q.index = vscsi->cmd_q.mask; + break; + } + } + + crq->valid = INVALIDATE_CMD_RESP_EL; + + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + valid = crq->valid; + dma_rmb(); + } + + if (!rc) { + if (ack) { + vio_enable_interrupts(vscsi->dma_dev); + ack = false; + pr_debug("handle_crq, reenabling interrupts\n"); + } + valid = crq->valid; + dma_rmb(); + if (valid) + goto cmd_work; + } else { + pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n", + vscsi->flags, vscsi->state, vscsi->cmd_q.index); + } + + pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n", + (int)list_empty(&vscsi->schedule_q), vscsi->flags, + vscsi->state); + + spin_unlock_bh(&vscsi->intr_lock); +} + +static int ibmvscsis_probe(struct vio_dev *vdev, + const struct vio_device_id *id) +{ + struct scsi_info *vscsi; + int rc = 0; + long hrc = 0; + char wq_name[24]; + + vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); + if (!vscsi) { + rc = -ENOMEM; + pr_err("probe: allocation of adapter failed\n"); + return rc; + } + + vscsi->dma_dev = vdev; + vscsi->dev = vdev->dev; + INIT_LIST_HEAD(&vscsi->schedule_q); + INIT_LIST_HEAD(&vscsi->waiting_rsp); + INIT_LIST_HEAD(&vscsi->active_q); + + snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev)); + + pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); + + rc = read_dma_window(vscsi); + if (rc) + goto free_adapter; + pr_debug("Probe: liobn 0x%x, riobn 0x%x\n", + vscsi->dds.window[LOCAL].liobn, + vscsi->dds.window[REMOTE].liobn); + + strcpy(vscsi->eye, "VSCSI "); + strncat(vscsi->eye, vdev->name, MAX_EYE); + + vscsi->dds.unit_id = vdev->unit_address; + + spin_lock_bh(&ibmvscsis_dev_lock); + list_add_tail(&vscsi->list, &ibmvscsis_dev_list); + spin_unlock_bh(&ibmvscsis_dev_lock); + + /* + * TBD: How do we determine # of cmds to request? Do we know how + * many "children" we have? + */ + vscsi->request_limit = INITIAL_SRP_LIMIT; + rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit, + SRP_MAX_IU_LEN); + if (rc) + goto rem_list; + + vscsi->target.ldata = vscsi; + + rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit); + if (rc) { + dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n", + rc, vscsi->request_limit); + goto free_target; + } + + /* + * Note: the lock is used in freeing timers, so must initialize + * first so that ordering in case of error is correct. + */ + spin_lock_init(&vscsi->intr_lock); + + rc = ibmvscsis_alloctimer(vscsi); + if (rc) { + dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc); + goto free_cmds; + } + + rc = ibmvscsis_create_command_q(vscsi, 256); + if (rc) { + dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n", + rc); + goto free_timer; + } + + vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!vscsi->map_buf) { + rc = -ENOMEM; + dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n"); + goto destroy_queue; + } + + vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) { + dev_err(&vscsi->dev, "probe: error mapping command buffer\n"); + goto free_buf; + } + + hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, + (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, + 0); + if (hrc == H_SUCCESS) + vscsi->client_data.partition_number = + be64_to_cpu(*(u64 *)vscsi->map_buf); + /* + * We expect the VIOCTL to fail if we're configured as "any + * client can connect" and the client isn't activated yet. + * We'll make the call again when he sends an init msg. + */ + pr_debug("probe hrc %ld, client partition num %d\n", + hrc, vscsi->client_data.partition_number); + + tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq, + (unsigned long)vscsi); + + init_completion(&vscsi->wait_idle); + + snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); + vscsi->work_q = create_workqueue(wq_name); + if (!vscsi->work_q) { + rc = -ENOMEM; + dev_err(&vscsi->dev, "create_workqueue failed\n"); + goto unmap_buf; + } + + rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi); + if (rc) { + rc = -EPERM; + dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc); + goto destroy_WQ; + } + + spin_lock_bh(&vscsi->intr_lock); + vio_enable_interrupts(vdev); + if (rc) { + dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc); + rc = -ENODEV; + spin_unlock_bh(&vscsi->intr_lock); + goto free_irq; + } + + if (ibmvscsis_check_q(vscsi)) { + rc = ERROR; + dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc); + spin_unlock_bh(&vscsi->intr_lock); + goto disable_interrupt; + } + spin_unlock_bh(&vscsi->intr_lock); + + dev_set_drvdata(&vdev->dev, vscsi); + + return 0; + +disable_interrupt: + vio_disable_interrupts(vdev); +free_irq: + free_irq(vdev->irq, vscsi); +destroy_WQ: + destroy_workqueue(vscsi->work_q); +unmap_buf: + dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, + DMA_BIDIRECTIONAL); +free_buf: + kfree(vscsi->map_buf); +destroy_queue: + tasklet_kill(&vscsi->work_task); + ibmvscsis_unregister_command_q(vscsi); + ibmvscsis_destroy_command_q(vscsi); +free_timer: + ibmvscsis_freetimer(vscsi); +free_cmds: + ibmvscsis_free_cmds(vscsi); +free_target: + srp_target_free(&vscsi->target); +rem_list: + spin_lock_bh(&ibmvscsis_dev_lock); + list_del(&vscsi->list); + spin_unlock_bh(&ibmvscsis_dev_lock); +free_adapter: + kfree(vscsi); + + return rc; +} + +static int ibmvscsis_remove(struct vio_dev *vdev) +{ + struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev); + + pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); + + /* + * TBD: Need to handle if there are commands on the waiting_rsp q + * Actually, can there still be cmds outstanding to tcm? + */ + + vio_disable_interrupts(vdev); + free_irq(vdev->irq, vscsi); + destroy_workqueue(vscsi->work_q); + dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, + DMA_BIDIRECTIONAL); + kfree(vscsi->map_buf); + tasklet_kill(&vscsi->work_task); + ibmvscsis_unregister_command_q(vscsi); + ibmvscsis_destroy_command_q(vscsi); + ibmvscsis_freetimer(vscsi); + ibmvscsis_free_cmds(vscsi); + srp_target_free(&vscsi->target); + spin_lock_bh(&ibmvscsis_dev_lock); + list_del(&vscsi->list); + spin_unlock_bh(&ibmvscsis_dev_lock); + kfree(vscsi); + + return 0; +} + +static ssize_t system_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", system_id); +} + +static ssize_t partition_number_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%x\n", partition_number); +} + +static ssize_t unit_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev); + + return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address); +} + +static int ibmvscsis_get_system_info(void) +{ + struct device_node *rootdn, *vdevdn; + const char *id, *model, *name; + const uint *num; + + rootdn = of_find_node_by_path("/"); + if (!rootdn) + return -ENOENT; + + model = of_get_property(rootdn, "model", NULL); + id = of_get_property(rootdn, "system-id", NULL); + if (model && id) + snprintf(system_id, sizeof(system_id), "%s-%s", model, id); + + name = of_get_property(rootdn, "ibm,partition-name", NULL); + if (name) + strncpy(partition_name, name, sizeof(partition_name)); + + num = of_get_property(rootdn, "ibm,partition-no", NULL); + if (num) + partition_number = *num; + + of_node_put(rootdn); + + vdevdn = of_find_node_by_path("/vdevice"); + if (vdevdn) { + const uint *mvds; + + mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size", + NULL); + if (mvds) + max_vdma_size = *mvds; + of_node_put(vdevdn); + } + + return 0; +} + +static char *ibmvscsis_get_fabric_name(void) +{ + return "ibmvscsis"; +} + +static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct ibmvscsis_tport *tport = + container_of(se_tpg, struct ibmvscsis_tport, se_tpg); + + return tport->tport_name; +} + +static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg) +{ + struct ibmvscsis_tport *tport = + container_of(se_tpg, struct ibmvscsis_tport, se_tpg); + + return tport->tport_tpgt; +} + +static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int ibmvscsis_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int ibmvscsis_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd) +{ + return target_put_sess_cmd(se_cmd); +} + +static void ibmvscsis_release_cmd(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct scsi_info *vscsi = cmd->adapter; + + pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags); + + spin_lock_bh(&vscsi->intr_lock); + /* Remove from active_q */ + list_del(&cmd->list); + list_add_tail(&cmd->list, &vscsi->waiting_rsp); + ibmvscsis_send_messages(vscsi); + spin_unlock_bh(&vscsi->intr_lock); +} + +static u32 ibmvscsis_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static int ibmvscsis_write_pending(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct iu_entry *iue = cmd->iue; + int rc; + + pr_debug("write_pending, se_cmd %p, length 0x%x\n", + se_cmd, se_cmd->data_length); + + rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, + 1, 1); + if (rc) { + pr_err("srp_transfer_data() failed: %d\n", rc); + return -EAGAIN; + } + /* + * We now tell TCM to add this WRITE CDB directly into the TCM storage + * object execution queue. + */ + target_execute_cmd(se_cmd); + return 0; +} + +static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl) +{ +} + +static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct iu_entry *iue = cmd->iue; + struct scsi_info *vscsi = cmd->adapter; + char *sd; + uint len = 0; + int rc; + + pr_debug("queue_data_in, se_cmd %p, length 0x%x\n", + se_cmd, se_cmd->data_length); + + rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1, + 1); + if (rc) { + pr_err("srp_transfer_data failed: %d\n", rc); + sd = se_cmd->sense_buffer; + se_cmd->scsi_sense_length = 18; + memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length); + /* Logical Unit Communication Time-out asc/ascq = 0x0801 */ + scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR, + 0x08, 0x01); + } + + srp_build_response(vscsi, cmd, &len); + cmd->rsp.format = SRP_FORMAT; + cmd->rsp.len = len; + + return 0; +} + +static int ibmvscsis_queue_status(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct scsi_info *vscsi = cmd->adapter; + uint len; + + pr_debug("queue_status %p\n", se_cmd); + + srp_build_response(vscsi, cmd, &len); + cmd->rsp.format = SRP_FORMAT; + cmd->rsp.len = len; + + return 0; +} + +static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) +{ + struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, + se_cmd); + struct scsi_info *vscsi = cmd->adapter; + uint len; + + pr_debug("queue_tm_rsp %p, status %d\n", + se_cmd, (int)se_cmd->se_tmr_req->response); + + srp_build_response(vscsi, cmd, &len); + cmd->rsp.format = SRP_FORMAT; + cmd->rsp.len = len; +} + +static void ibmvscsis_aborted_task(struct se_cmd *se_cmd) +{ + /* TBD: What (if anything) should we do here? */ + pr_debug("ibmvscsis_aborted_task %p\n", se_cmd); +} + +static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct ibmvscsis_tport *tport; + + tport = ibmvscsis_lookup_port(name); + if (tport) { + tport->tport_proto_id = SCSI_PROTOCOL_SRP; + pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n", + name, tport, tport->tport_proto_id); + return &tport->tport_wwn; + } + + return ERR_PTR(-EINVAL); +} + +static void ibmvscsis_drop_tport(struct se_wwn *wwn) +{ + struct ibmvscsis_tport *tport = container_of(wwn, + struct ibmvscsis_tport, + tport_wwn); + + pr_debug("drop_tport(%s)\n", + config_item_name(&tport->tport_wwn.wwn_group.cg_item)); +} + +static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct ibmvscsis_tport *tport = + container_of(wwn, struct ibmvscsis_tport, tport_wwn); + int rc; + + tport->releasing = false; + + rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg, + tport->tport_proto_id); + if (rc) + return ERR_PTR(rc); + + return &tport->se_tpg; +} + +static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg) +{ + struct ibmvscsis_tport *tport = container_of(se_tpg, + struct ibmvscsis_tport, + se_tpg); + + tport->releasing = true; + tport->enabled = false; + + /* + * Release the virtual I_T Nexus for this ibmvscsis TPG + */ + ibmvscsis_drop_nexus(tport); + /* + * Deregister the se_tpg from TCM.. + */ + core_tpg_deregister(se_tpg); +} + +static ssize_t ibmvscsis_wwn_version_show(struct config_item *item, + char *page) +{ + return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION); +} +CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version); + +static struct configfs_attribute *ibmvscsis_wwn_attrs[] = { + &ibmvscsis_wwn_attr_version, + NULL, +}; + +static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item, + char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct ibmvscsis_tport *tport = container_of(se_tpg, + struct ibmvscsis_tport, + se_tpg); + + return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0); +} + +static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + struct ibmvscsis_tport *tport = container_of(se_tpg, + struct ibmvscsis_tport, + se_tpg); + struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); + unsigned long tmp; + int rc; + long lrc; + + rc = kstrtoul(page, 0, &tmp); + if (rc < 0) { + pr_err("Unable to extract srpt_tpg_store_enable\n"); + return -EINVAL; + } + + if ((tmp != 0) && (tmp != 1)) { + pr_err("Illegal value for srpt_tpg_store_enable\n"); + return -EINVAL; + } + + if (tmp) { + tport->enabled = true; + spin_lock_bh(&vscsi->intr_lock); + lrc = ibmvscsis_enable_change_state(vscsi); + if (lrc) + pr_err("enable_change_state failed, rc %ld state %d\n", + lrc, vscsi->state); + spin_unlock_bh(&vscsi->intr_lock); + } else { + tport->enabled = false; + } + + pr_debug("tpg_enable_store, state %d\n", vscsi->state); + + return count; +} +CONFIGFS_ATTR(ibmvscsis_tpg_, enable); + +static struct configfs_attribute *ibmvscsis_tpg_attrs[] = { + &ibmvscsis_tpg_attr_enable, + NULL, +}; + +static const struct target_core_fabric_ops ibmvscsis_ops = { + .module = THIS_MODULE, + .name = "ibmvscsis", + .get_fabric_name = ibmvscsis_get_fabric_name, + .tpg_get_wwn = ibmvscsis_get_fabric_wwn, + .tpg_get_tag = ibmvscsis_get_tag, + .tpg_get_default_depth = ibmvscsis_get_default_depth, + .tpg_check_demo_mode = ibmvscsis_check_true, + .tpg_check_demo_mode_cache = ibmvscsis_check_true, + .tpg_check_demo_mode_write_protect = ibmvscsis_check_false, + .tpg_check_prod_mode_write_protect = ibmvscsis_check_false, + .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index, + .check_stop_free = ibmvscsis_check_stop_free, + .release_cmd = ibmvscsis_release_cmd, + .sess_get_index = ibmvscsis_sess_get_index, + .write_pending = ibmvscsis_write_pending, + .write_pending_status = ibmvscsis_write_pending_status, + .set_default_node_attributes = ibmvscsis_set_default_node_attrs, + .get_cmd_state = ibmvscsis_get_cmd_state, + .queue_data_in = ibmvscsis_queue_data_in, + .queue_status = ibmvscsis_queue_status, + .queue_tm_rsp = ibmvscsis_queue_tm_rsp, + .aborted_task = ibmvscsis_aborted_task, + /* + * Setup function pointers for logic in target_core_fabric_configfs.c + */ + .fabric_make_wwn = ibmvscsis_make_tport, + .fabric_drop_wwn = ibmvscsis_drop_tport, + .fabric_make_tpg = ibmvscsis_make_tpg, + .fabric_drop_tpg = ibmvscsis_drop_tpg, + + .tfc_wwn_attrs = ibmvscsis_wwn_attrs, + .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs, +}; + +static void ibmvscsis_dev_release(struct device *dev) {}; + +static struct class_attribute ibmvscsis_class_attrs[] = { + __ATTR_NULL, +}; + +static struct device_attribute dev_attr_system_id = + __ATTR(system_id, S_IRUGO, system_id_show, NULL); + +static struct device_attribute dev_attr_partition_number = + __ATTR(partition_number, S_IRUGO, partition_number_show, NULL); + +static struct device_attribute dev_attr_unit_address = + __ATTR(unit_address, S_IRUGO, unit_address_show, NULL); + +static struct attribute *ibmvscsis_dev_attrs[] = { + &dev_attr_system_id.attr, + &dev_attr_partition_number.attr, + &dev_attr_unit_address.attr, +}; +ATTRIBUTE_GROUPS(ibmvscsis_dev); + +static struct class ibmvscsis_class = { + .name = "ibmvscsis", + .dev_release = ibmvscsis_dev_release, + .class_attrs = ibmvscsis_class_attrs, + .dev_groups = ibmvscsis_dev_groups, +}; + +static struct vio_device_id ibmvscsis_device_table[] = { + { "v-scsi-host", "IBM,v-scsi-host" }, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table); + +static struct vio_driver ibmvscsis_driver = { + .name = "ibmvscsis", + .id_table = ibmvscsis_device_table, + .probe = ibmvscsis_probe, + .remove = ibmvscsis_remove, +}; + +/* + * ibmvscsis_init() - Kernel Module initialization + * + * Note: vio_register_driver() registers callback functions, and at least one + * of those callback functions calls TCM - Linux IO Target Subsystem, thus + * the SCSI Target template must be registered before vio_register_driver() + * is called. + */ +static int __init ibmvscsis_init(void) +{ + int rc = 0; + + rc = ibmvscsis_get_system_info(); + if (rc) { + pr_err("rc %d from get_system_info\n", rc); + goto out; + } + + rc = class_register(&ibmvscsis_class); + if (rc) { + pr_err("failed class register\n"); + goto out; + } + + rc = target_register_template(&ibmvscsis_ops); + if (rc) { + pr_err("rc %d from target_register_template\n", rc); + goto unregister_class; + } + + rc = vio_register_driver(&ibmvscsis_driver); + if (rc) { + pr_err("rc %d from vio_register_driver\n", rc); + goto unregister_target; + } + + return 0; + +unregister_target: + target_unregister_template(&ibmvscsis_ops); +unregister_class: + class_unregister(&ibmvscsis_class); +out: + return rc; +} + +static void __exit ibmvscsis_exit(void) +{ + pr_info("Unregister IBM virtual SCSI host driver\n"); + vio_unregister_driver(&ibmvscsis_driver); + target_unregister_template(&ibmvscsis_ops); + class_unregister(&ibmvscsis_class); +} + +MODULE_DESCRIPTION("IBMVSCSIS fabric driver"); +MODULE_AUTHOR("Bryant G. Ly and Michael Cyr"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IBMVSCSIS_VERSION); +module_init(ibmvscsis_init); +module_exit(ibmvscsis_exit); diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h new file mode 100644 index 000000000..981a0c992 --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h @@ -0,0 +1,346 @@ +/******************************************************************************* + * IBM Virtual SCSI Target Driver + * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. + * Santiago Leon (santil@us.ibm.com) IBM Corp. + * Linda Xie (lxie@us.ibm.com) IBM Corp. + * + * Copyright (C) 2005-2011 FUJITA Tomonori + * Copyright (C) 2010 Nicholas A. Bellinger + * Copyright (C) 2016 Bryant G. Ly IBM Corp. + * + * Authors: Bryant G. Ly + * Authors: Michael Cyr + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ****************************************************************************/ + +#ifndef __H_IBMVSCSI_TGT +#define __H_IBMVSCSI_TGT + +#include "libsrp.h" + +#define SYS_ID_NAME_LEN 64 +#define PARTITION_NAMELEN 96 +#define IBMVSCSIS_NAMELEN 32 + +#define MSG_HI 0 +#define MSG_LOW 1 + +#define MAX_CMD_Q_PAGES 4 +#define CRQ_PER_PAGE (PAGE_SIZE / sizeof(struct viosrp_crq)) +/* in terms of number of elements */ +#define DEFAULT_CMD_Q_SIZE CRQ_PER_PAGE +#define MAX_CMD_Q_SIZE (DEFAULT_CMD_Q_SIZE * MAX_CMD_Q_PAGES) + +#define SRP_VIOLATION 0x102 /* general error code */ + +/* + * SRP buffer formats defined as of 16.a supported by this driver. + */ +#define SUPPORTED_FORMATS ((SRP_DATA_DESC_DIRECT << 1) | \ + (SRP_DATA_DESC_INDIRECT << 1)) + +#define SCSI_LUN_ADDR_METHOD_FLAT 1 + +struct dma_window { + u32 liobn; /* Unique per vdevice */ + u64 tce_base; /* Physical location of the TCE table */ + u64 tce_size; /* Size of the TCE table in bytes */ +}; + +struct target_dds { + u64 unit_id; /* 64 bit will force alignment */ +#define NUM_DMA_WINDOWS 2 +#define LOCAL 0 +#define REMOTE 1 + struct dma_window window[NUM_DMA_WINDOWS]; + + /* root node property "ibm,partition-no" */ + uint partition_num; + char partition_name[PARTITION_NAMELEN]; +}; + +#define MAX_NUM_PORTS 1 +#define MAX_H_COPY_RDMA (128 * 1024) + +#define MAX_EYE 64 + +/* Return codes */ +#define ADAPT_SUCCESS 0L +/* choose error codes that do not conflict with PHYP */ +#define ERROR -40L + +struct format_code { + u8 reserved; + u8 buffers; +}; + +struct client_info { +#define SRP_VERSION "16.a" + char srp_version[8]; + /* root node property ibm,partition-name */ + char partition_name[PARTITION_NAMELEN]; + /* root node property ibm,partition-no */ + u32 partition_number; + /* initially 1 */ + u32 mad_version; + u32 os_type; +}; + +/* + * Changing this constant changes the number of seconds to wait before + * considering the client will never service its queue again. + */ +#define SECONDS_TO_CONSIDER_FAILED 30 +/* + * These constants set the polling period used to determine if the client + * has freed at least one element in the response queue. + */ +#define WAIT_SECONDS 1 +#define WAIT_NANO_SECONDS 5000 +#define MAX_TIMER_POPS ((1000000 / WAIT_NANO_SECONDS) * \ + SECONDS_TO_CONSIDER_FAILED) +/* + * general purpose timer control block + * which can be used for multiple functions + */ +struct timer_cb { + struct hrtimer timer; + /* + * how long has it been since the client + * serviced the queue. The variable is incrmented + * in the service_wait_q routine and cleared + * in send messages + */ + int timer_pops; + /* the timer is started */ + bool started; +}; + +struct cmd_queue { + /* kva */ + struct viosrp_crq *base_addr; + dma_addr_t crq_token; + /* used to maintain index */ + uint mask; + /* current element */ + uint index; + int size; +}; + +#define SCSOLNT_RESP_SHIFT 1 +#define UCSOLNT_RESP_SHIFT 2 + +#define SCSOLNT BIT(SCSOLNT_RESP_SHIFT) +#define UCSOLNT BIT(UCSOLNT_RESP_SHIFT) + +enum cmd_type { + SCSI_CDB = 0x01, + TASK_MANAGEMENT = 0x02, + /* MAD or addressed to port 0 */ + ADAPTER_MAD = 0x04, + UNSET_TYPE = 0x08, +}; + +struct iu_rsp { + u8 format; + u8 sol_not; + u16 len; + /* tag is just to help client identify cmd, so don't translate be/le */ + u64 tag; +}; + +struct ibmvscsis_cmd { + struct list_head list; + /* Used for TCM Core operations */ + struct se_cmd se_cmd; + struct iu_entry *iue; + struct iu_rsp rsp; + struct work_struct work; + struct scsi_info *adapter; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char sense_buf[TRANSPORT_SENSE_BUFFER]; + u64 init_time; +#define CMD_FAST_FAIL BIT(0) + u32 flags; + char type; +}; + +struct ibmvscsis_nexus { + struct se_session *se_sess; +}; + +struct ibmvscsis_tport { + /* SCSI protocol the tport is providing */ + u8 tport_proto_id; + /* ASCII formatted WWPN for SRP Target port */ + char tport_name[IBMVSCSIS_NAMELEN]; + /* Returned by ibmvscsis_make_tport() */ + struct se_wwn tport_wwn; + /* Returned by ibmvscsis_make_tpg() */ + struct se_portal_group se_tpg; + /* ibmvscsis port target portal group tag for TCM */ + u16 tport_tpgt; + /* Pointer to TCM session for I_T Nexus */ + struct ibmvscsis_nexus *ibmv_nexus; + bool enabled; + bool releasing; +}; + +struct scsi_info { + struct list_head list; + char eye[MAX_EYE]; + + /* commands waiting for space on repsonse queue */ + struct list_head waiting_rsp; +#define NO_QUEUE 0x00 +#define WAIT_ENABLED 0X01 + /* driver has received an initialize command */ +#define PART_UP_WAIT_ENAB 0x02 +#define WAIT_CONNECTION 0x04 + /* have established a connection */ +#define CONNECTED 0x08 + /* at least one port is processing SRP IU */ +#define SRP_PROCESSING 0x10 + /* remove request received */ +#define UNCONFIGURING 0x20 + /* disconnect by letting adapter go idle, no error */ +#define WAIT_IDLE 0x40 + /* disconnecting to clear an error */ +#define ERR_DISCONNECT 0x80 + /* disconnect to clear error state, then come back up */ +#define ERR_DISCONNECT_RECONNECT 0x100 + /* disconnected after clearing an error */ +#define ERR_DISCONNECTED 0x200 + /* A series of errors caused unexpected errors */ +#define UNDEFINED 0x400 + u16 state; + int fast_fail; + struct target_dds dds; + char *cmd_pool; + /* list of free commands */ + struct list_head free_cmd; + /* command elements ready for scheduler */ + struct list_head schedule_q; + /* commands sent to TCM */ + struct list_head active_q; + caddr_t *map_buf; + /* ioba of map buffer */ + dma_addr_t map_ioba; + /* allowable number of outstanding SRP requests */ + int request_limit; + /* extra credit */ + int credit; + /* outstanding transactions against credit limit */ + int debit; + + /* allow only one outstanding mad request */ +#define PROCESSING_MAD 0x00002 + /* Waiting to go idle */ +#define WAIT_FOR_IDLE 0x00004 + /* H_REG_CRQ called */ +#define CRQ_CLOSED 0x00010 + /* detected that client has failed */ +#define CLIENT_FAILED 0x00040 + /* detected that transport event occurred */ +#define TRANS_EVENT 0x00080 + /* don't attempt to send anything to the client */ +#define RESPONSE_Q_DOWN 0x00100 + /* request made to schedule disconnect handler */ +#define SCHEDULE_DISCONNECT 0x00400 + /* disconnect handler is scheduled */ +#define DISCONNECT_SCHEDULED 0x00800 + u32 flags; + /* adapter lock */ + spinlock_t intr_lock; + /* information needed to manage command queue */ + struct cmd_queue cmd_q; + /* used in hcall to copy response back into srp buffer */ + u64 empty_iu_id; + /* used in crq, to tag what iu the response is for */ + u64 empty_iu_tag; + uint new_state; + /* control block for the response queue timer */ + struct timer_cb rsp_q_timer; + /* keep last client to enable proper accounting */ + struct client_info client_data; + /* what can this client do */ + u32 client_cap; + /* + * The following two fields capture state and flag changes that + * can occur when the lock is given up. In the orginal design, + * the lock was held during calls into phyp; + * however, phyp did not meet PAPR architecture. This is + * a work around. + */ + u16 phyp_acr_state; + u32 phyp_acr_flags; + + struct workqueue_struct *work_q; + struct completion wait_idle; + struct device dev; + struct vio_dev *dma_dev; + struct srp_target target; + struct ibmvscsis_tport tport; + struct tasklet_struct work_task; + struct work_struct proc_work; +}; + +/* + * Provide a constant that allows software to detect the adapter is + * disconnecting from the client from one of several states. + */ +#define IS_DISCONNECTING (UNCONFIGURING | ERR_DISCONNECT_RECONNECT | \ + ERR_DISCONNECT) + +/* + * Provide a constant that can be used with interrupt handling that + * essentially lets the interrupt handler know that all requests should + * be thrown out, + */ +#define DONT_PROCESS_STATE (IS_DISCONNECTING | UNDEFINED | \ + ERR_DISCONNECTED | WAIT_IDLE) + +/* + * If any of these flag bits are set then do not allow the interrupt + * handler to schedule the off level handler. + */ +#define BLOCK (DISCONNECT_SCHEDULED) + +/* State and transition events that stop the interrupt handler */ +#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \ + ((VSCSI)->flags & BLOCK)) + +/* flag bit that are not reset during disconnect */ +#define PRESERVE_FLAG_FIELDS 0 + +#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf)) + +#define READ_CMD(cdb) (((cdb)[0] & 0x1F) == 8) +#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA) + +#ifndef H_GET_PARTNER_INFO +#define H_GET_PARTNER_INFO 0x0000000000000008LL +#endif + +#define h_copy_rdma(l, sa, sb, da, db) \ + plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db) +#define h_vioctl(u, o, a, u1, u2, u3, u4) \ + plpar_hcall_norets(H_VIOCTL, u, o, a, u1, u2) +#define h_reg_crq(ua, tok, sz) \ + plpar_hcall_norets(H_REG_CRQ, ua, tok, sz) +#define h_free_crq(ua) \ + plpar_hcall_norets(H_FREE_CRQ, ua) +#define h_send_crq(ua, d1, d2) \ + plpar_hcall_norets(H_SEND_CRQ, ua, d1, d2) + +#endif diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c new file mode 100644 index 000000000..5a4cc28ca --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c @@ -0,0 +1,427 @@ +/******************************************************************************* + * SCSI RDMA Protocol lib functions + * + * Copyright (C) 2006 FUJITA Tomonori + * Copyright (C) 2016 Bryant G. Ly IBM Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ***********************************************************************/ + +#define pr_fmt(fmt) "libsrp: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libsrp.h" +#include "ibmvscsi_tgt.h" + +static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, + struct srp_buf **ring) +{ + struct iu_entry *iue; + int i; + + q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); + if (!q->pool) + return -ENOMEM; + q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); + if (!q->items) + goto free_pool; + + spin_lock_init(&q->lock); + kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *)); + + for (i = 0, iue = q->items; i < max; i++) { + kfifo_in(&q->queue, (void *)&iue, sizeof(void *)); + iue->sbuf = ring[i]; + iue++; + } + return 0; + +free_pool: + kfree(q->pool); + return -ENOMEM; +} + +static void srp_iu_pool_free(struct srp_queue *q) +{ + kfree(q->items); + kfree(q->pool); +} + +static struct srp_buf **srp_ring_alloc(struct device *dev, + size_t max, size_t size) +{ + struct srp_buf **ring; + int i; + + ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL); + if (!ring) + return NULL; + + for (i = 0; i < max; i++) { + ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL); + if (!ring[i]) + goto out; + ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma, + GFP_KERNEL); + if (!ring[i]->buf) + goto out; + } + return ring; + +out: + for (i = 0; i < max && ring[i]; i++) { + if (ring[i]->buf) { + dma_free_coherent(dev, size, ring[i]->buf, + ring[i]->dma); + } + kfree(ring[i]); + } + kfree(ring); + + return NULL; +} + +static void srp_ring_free(struct device *dev, struct srp_buf **ring, + size_t max, size_t size) +{ + int i; + + for (i = 0; i < max; i++) { + dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); + kfree(ring[i]); + } + kfree(ring); +} + +int srp_target_alloc(struct srp_target *target, struct device *dev, + size_t nr, size_t iu_size) +{ + int err; + + spin_lock_init(&target->lock); + + target->dev = dev; + + target->srp_iu_size = iu_size; + target->rx_ring_size = nr; + target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size); + if (!target->rx_ring) + return -ENOMEM; + err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring); + if (err) + goto free_ring; + + dev_set_drvdata(target->dev, target); + return 0; + +free_ring: + srp_ring_free(target->dev, target->rx_ring, nr, iu_size); + return -ENOMEM; +} + +void srp_target_free(struct srp_target *target) +{ + dev_set_drvdata(target->dev, NULL); + srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size, + target->srp_iu_size); + srp_iu_pool_free(&target->iu_queue); +} + +struct iu_entry *srp_iu_get(struct srp_target *target) +{ + struct iu_entry *iue = NULL; + + if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue, + sizeof(void *), + &target->iu_queue.lock) != sizeof(void *)) { + WARN_ONCE(1, "unexpected fifo state"); + return NULL; + } + if (!iue) + return iue; + iue->target = target; + iue->flags = 0; + return iue; +} + +void srp_iu_put(struct iu_entry *iue) +{ + kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue, + sizeof(void *), &iue->target->iu_queue.lock); +} + +static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md, + enum dma_data_direction dir, srp_rdma_t rdma_io, + int dma_map, int ext_desc) +{ + struct iu_entry *iue = NULL; + struct scatterlist *sg = NULL; + int err, nsg = 0, len; + + if (dma_map) { + iue = cmd->iue; + sg = cmd->se_cmd.t_data_sg; + nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, + DMA_BIDIRECTIONAL); + if (!nsg) { + pr_err("fail to map %p %d\n", iue, + cmd->se_cmd.t_data_nents); + return 0; + } + len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len)); + } else { + len = be32_to_cpu(md->len); + } + + err = rdma_io(cmd, sg, nsg, md, 1, dir, len); + + if (dma_map) + dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); + + return err; +} + +static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, + struct srp_indirect_buf *id, + enum dma_data_direction dir, srp_rdma_t rdma_io, + int dma_map, int ext_desc) +{ + struct iu_entry *iue = NULL; + struct srp_direct_buf *md = NULL; + struct scatterlist dummy, *sg = NULL; + dma_addr_t token = 0; + int err = 0; + int nmd, nsg = 0, len; + + if (dma_map || ext_desc) { + iue = cmd->iue; + sg = cmd->se_cmd.t_data_sg; + } + + nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf); + + if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) || + (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) { + md = &id->desc_list[0]; + goto rdma; + } + + if (ext_desc && dma_map) { + md = dma_alloc_coherent(iue->target->dev, + be32_to_cpu(id->table_desc.len), + &token, GFP_KERNEL); + if (!md) { + pr_err("Can't get dma memory %u\n", + be32_to_cpu(id->table_desc.len)); + return -ENOMEM; + } + + sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len)); + sg_dma_address(&dummy) = token; + sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len); + err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, + be32_to_cpu(id->table_desc.len)); + if (err) { + pr_err("Error copying indirect table %d\n", err); + goto free_mem; + } + } else { + pr_err("This command uses external indirect buffer\n"); + return -EINVAL; + } + +rdma: + if (dma_map) { + nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, + DMA_BIDIRECTIONAL); + if (!nsg) { + pr_err("fail to map %p %d\n", iue, + cmd->se_cmd.t_data_nents); + err = -EIO; + goto free_mem; + } + len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len)); + } else { + len = be32_to_cpu(id->len); + } + + err = rdma_io(cmd, sg, nsg, md, nmd, dir, len); + + if (dma_map) + dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); + +free_mem: + if (token && dma_map) { + dma_free_coherent(iue->target->dev, + be32_to_cpu(id->table_desc.len), md, token); + } + return err; +} + +static int data_out_desc_size(struct srp_cmd *cmd) +{ + int size = 0; + u8 fmt = cmd->buf_fmt >> 4; + + switch (fmt) { + case SRP_NO_DATA_DESC: + break; + case SRP_DATA_DESC_DIRECT: + size = sizeof(struct srp_direct_buf); + break; + case SRP_DATA_DESC_INDIRECT: + size = sizeof(struct srp_indirect_buf) + + sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt; + break; + default: + pr_err("client error. Invalid data_out_format %x\n", fmt); + break; + } + return size; +} + +/* + * TODO: this can be called multiple times for a single command if it + * has very long data. + */ +int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, + srp_rdma_t rdma_io, int dma_map, int ext_desc) +{ + struct srp_direct_buf *md; + struct srp_indirect_buf *id; + enum dma_data_direction dir; + int offset, err = 0; + u8 format; + + if (!cmd->se_cmd.t_data_nents) + return 0; + + offset = srp_cmd->add_cdb_len & ~3; + + dir = srp_cmd_direction(srp_cmd); + if (dir == DMA_FROM_DEVICE) + offset += data_out_desc_size(srp_cmd); + + if (dir == DMA_TO_DEVICE) + format = srp_cmd->buf_fmt >> 4; + else + format = srp_cmd->buf_fmt & ((1U << 4) - 1); + + switch (format) { + case SRP_NO_DATA_DESC: + break; + case SRP_DATA_DESC_DIRECT: + md = (struct srp_direct_buf *)(srp_cmd->add_data + offset); + err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc); + break; + case SRP_DATA_DESC_INDIRECT: + id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset); + err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map, + ext_desc); + break; + default: + pr_err("Unknown format %d %x\n", dir, format); + err = -EINVAL; + } + + return err; +} + +u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir) +{ + struct srp_direct_buf *md; + struct srp_indirect_buf *id; + u64 len = 0; + uint offset = cmd->add_cdb_len & ~3; + u8 fmt; + + if (dir == DMA_TO_DEVICE) { + fmt = cmd->buf_fmt >> 4; + } else { + fmt = cmd->buf_fmt & ((1U << 4) - 1); + offset += data_out_desc_size(cmd); + } + + switch (fmt) { + case SRP_NO_DATA_DESC: + break; + case SRP_DATA_DESC_DIRECT: + md = (struct srp_direct_buf *)(cmd->add_data + offset); + len = be32_to_cpu(md->len); + break; + case SRP_DATA_DESC_INDIRECT: + id = (struct srp_indirect_buf *)(cmd->add_data + offset); + len = be32_to_cpu(id->len); + break; + default: + pr_err("invalid data format %x\n", fmt); + break; + } + return len; +} + +int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir, + u64 *data_len) +{ + struct srp_indirect_buf *idb; + struct srp_direct_buf *db; + uint add_cdb_offset; + int rc; + + /* + * The pointer computations below will only be compiled correctly + * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check + * whether srp_cmd::add_data has been declared as a byte pointer. + */ + BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) + && !__same_type(srp_cmd->add_data[0], (u8)0)); + + BUG_ON(!dir); + BUG_ON(!data_len); + + rc = 0; + *data_len = 0; + + *dir = DMA_NONE; + + if (srp_cmd->buf_fmt & 0xf) + *dir = DMA_FROM_DEVICE; + else if (srp_cmd->buf_fmt >> 4) + *dir = DMA_TO_DEVICE; + + add_cdb_offset = srp_cmd->add_cdb_len & ~3; + if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || + ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { + db = (struct srp_direct_buf *)(srp_cmd->add_data + + add_cdb_offset); + *data_len = be32_to_cpu(db->len); + } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || + ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { + idb = (struct srp_indirect_buf *)(srp_cmd->add_data + + add_cdb_offset); + + *data_len = be32_to_cpu(idb->len); + } + return rc; +} + +MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions"); +MODULE_AUTHOR("FUJITA Tomonori"); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h new file mode 100644 index 000000000..4696f3314 --- /dev/null +++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h @@ -0,0 +1,123 @@ +#ifndef __LIBSRP_H__ +#define __LIBSRP_H__ + +#include +#include +#include + +enum srp_valid { + INVALIDATE_CMD_RESP_EL = 0, + VALID_CMD_RESP_EL = 0x80, + VALID_INIT_MSG = 0xC0, + VALID_TRANS_EVENT = 0xFF +}; + +enum srp_format { + SRP_FORMAT = 1, + MAD_FORMAT = 2, + OS400_FORMAT = 3, + AIX_FORMAT = 4, + LINUX_FORMAT = 5, + MESSAGE_IN_CRQ = 6 +}; + +enum srp_init_msg { + INIT_MSG = 1, + INIT_COMPLETE_MSG = 2 +}; + +enum srp_trans_event { + UNUSED_FORMAT = 0, + PARTNER_FAILED = 1, + PARTNER_DEREGISTER = 2, + MIGRATED = 6 +}; + +enum srp_status { + HEADER_DESCRIPTOR = 0xF1, + PING = 0xF5, + PING_RESPONSE = 0xF6 +}; + +enum srp_mad_version { + MAD_VERSION_1 = 1 +}; + +enum srp_os_type { + OS400 = 1, + LINUX = 2, + AIX = 3, + OFW = 4 +}; + +enum srp_task_attributes { + SRP_SIMPLE_TASK = 0, + SRP_HEAD_TASK = 1, + SRP_ORDERED_TASK = 2, + SRP_ACA_TASK = 4 +}; + +enum { + SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE = 0, + SRP_REQUEST_FIELDS_INVALID = 2, + SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED = 4, + SRP_TASK_MANAGEMENT_FUNCTION_FAILED = 5 +}; + +struct srp_buf { + dma_addr_t dma; + void *buf; +}; + +struct srp_queue { + void *pool; + void *items; + struct kfifo queue; + spinlock_t lock; +}; + +struct srp_target { + struct device *dev; + + spinlock_t lock; + struct list_head cmd_queue; + + size_t srp_iu_size; + struct srp_queue iu_queue; + size_t rx_ring_size; + struct srp_buf **rx_ring; + + void *ldata; +}; + +struct iu_entry { + struct srp_target *target; + + struct list_head ilist; + dma_addr_t remote_token; + unsigned long flags; + + struct srp_buf *sbuf; + u16 iu_len; +}; + +struct ibmvscsis_cmd; + +typedef int (srp_rdma_t)(struct ibmvscsis_cmd *, struct scatterlist *, int, + struct srp_direct_buf *, int, + enum dma_data_direction, unsigned int); +int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t); +void srp_target_free(struct srp_target *); +struct iu_entry *srp_iu_get(struct srp_target *); +void srp_iu_put(struct iu_entry *); +int srp_transfer_data(struct ibmvscsis_cmd *, struct srp_cmd *, + srp_rdma_t, int, int); +u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir); +int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir, + u64 *data_len); +static inline int srp_cmd_direction(struct srp_cmd *cmd) +{ + return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} + +#endif diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index d6803a9e5..17d04c702 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -98,7 +98,7 @@ static unsigned int ipr_transop_timeout = 0; static unsigned int ipr_debug = 0; static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; static unsigned int ipr_dual_ioa_raid = 1; -static unsigned int ipr_number_of_msix = 2; +static unsigned int ipr_number_of_msix = 16; static unsigned int ipr_fast_reboot; static DEFINE_SPINLOCK(ipr_driver_lock); @@ -194,7 +194,8 @@ static const struct ipr_chip_t ipr_chip[] = { { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } }; static int ipr_max_bus_speeds[] = { @@ -221,7 +222,7 @@ module_param_named(max_devs, ipr_max_devs, int, 0); MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); module_param_named(number_of_msix, ipr_number_of_msix, int, 0); -MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)"); +MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)"); module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)"); MODULE_LICENSE("GPL"); @@ -3287,6 +3288,11 @@ static void ipr_worker_thread(struct work_struct *work) return; } + if (!ioa_cfg->scan_enabled) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + restart: do { did_work = 0; @@ -10213,6 +10219,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev, if (!ioa_cfg->reset_work_q) { dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); + rc = -ENOMEM; goto out_free_irq; } } else @@ -10361,6 +10368,7 @@ static void ipr_remove(struct pci_dev *pdev) static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct ipr_ioa_cfg *ioa_cfg; + unsigned long flags; int rc, i; rc = ipr_probe_ioa(pdev, dev_id); @@ -10402,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) __ipr_remove(pdev); return rc; } + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + ioa_cfg->scan_enabled = 1; + schedule_work(&ioa_cfg->work_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); - scsi_scan_host(ioa_cfg->host); ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { @@ -10413,7 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) } } - schedule_work(&ioa_cfg->work_q); + scsi_scan_host(ioa_cfg->host); + return 0; } @@ -10565,6 +10577,10 @@ static struct pci_device_id ipr_pci_table[] = { PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 56c570683..cdb51960b 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -60,6 +60,7 @@ #define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D #define PCI_DEVICE_ID_IBM_CROCODILE 0x034A +#define PCI_DEVICE_ID_IBM_RATTLESNAKE 0x04DA #define IPR_SUBS_DEV_ID_2780 0x0264 #define IPR_SUBS_DEV_ID_5702 0x0266 @@ -111,6 +112,8 @@ #define IPR_SUBS_DEV_ID_2CCA 0x04C7 #define IPR_SUBS_DEV_ID_2CD2 0x04C8 #define IPR_SUBS_DEV_ID_2CCD 0x04C9 +#define IPR_SUBS_DEV_ID_580A 0x04FC +#define IPR_SUBS_DEV_ID_580B 0x04FB #define IPR_NAME "ipr" /* @@ -1475,6 +1478,7 @@ struct ipr_ioa_cfg { u8 in_ioa_bringdown:1; u8 ioa_unit_checked:1; u8 dump_taken:1; + u8 scan_enabled:1; u8 scan_done:1; u8 needs_hard_reset:1; u8 dual_raid:1; diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 30f9ef0c0..e72673b0a 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -908,9 +908,17 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) { struct fc_exch_pool *pool; struct fc_exch *ep = NULL; + u16 cpu = xid & fc_cpu_mask; + + if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { + printk_ratelimited(KERN_ERR + "libfc: lookup request for XID = %d, " + "indicates invalid CPU %d\n", xid, cpu); + return NULL; + } if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) { - pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); + pool = per_cpu_ptr(mp->pool, cpu); spin_lock_bh(&pool->lock); ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); if (ep) { diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e01a29863..04ce7cfb6 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -301,7 +301,6 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *fc_stats; struct fc_lport *lport = shost_priv(shost); - struct timespec v0, v1; unsigned int cpu; u64 fcp_in_bytes = 0; u64 fcp_out_bytes = 0; @@ -309,9 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) fc_stats = &lport->host_stats; memset(fc_stats, 0, sizeof(struct fc_host_statistics)); - jiffies_to_timespec(jiffies, &v0); - jiffies_to_timespec(lport->boot_time, &v1); - fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); + fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ; for_each_possible_cpu(cpu) { struct fc_stats *stats; @@ -2090,7 +2087,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) struct fc_rport *rport; struct fc_rport_priv *rdata; int rc = -EINVAL; - u32 did; + u32 did, tov; job->reply->reply_payload_rcv_len = 0; if (rsp) @@ -2121,15 +2118,20 @@ int fc_lport_bsg_request(struct fc_bsg_job *job) case FC_BSG_HST_CT: did = ntoh24(job->request->rqst_data.h_ct.port_id); - if (did == FC_FID_DIR_SERV) + if (did == FC_FID_DIR_SERV) { rdata = lport->dns_rdata; - else + if (!rdata) + break; + tov = rdata->e_d_tov; + } else { rdata = lport->tt.rport_lookup(lport, did); + if (!rdata) + break; + tov = rdata->e_d_tov; + kref_put(&rdata->kref, lport->tt.rport_destroy); + } - if (!rdata) - break; - - rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); + rc = fc_lport_ct_request(job, lport, did, tov); break; case FC_BSG_HST_ELS_NOLOGIN: diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 589ff9aed..93f596182 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -95,17 +95,23 @@ static const char *fc_rport_state_names[] = { * @lport: The local port to lookup the remote port on * @port_id: The remote port ID to look up * - * The caller must hold either disc_mutex or rcu_read_lock(). + * The reference count of the fc_rport_priv structure is + * increased by one. */ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, u32 port_id) { - struct fc_rport_priv *rdata; + struct fc_rport_priv *rdata = NULL, *tmp_rdata; - list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) - if (rdata->ids.port_id == port_id) - return rdata; - return NULL; + rcu_read_lock(); + list_for_each_entry_rcu(tmp_rdata, &lport->disc.rports, peers) + if (tmp_rdata->ids.port_id == port_id && + kref_get_unless_zero(&tmp_rdata->kref)) { + rdata = tmp_rdata; + break; + } + rcu_read_unlock(); + return rdata; } /** @@ -340,7 +346,6 @@ static void fc_rport_work(struct work_struct *work) fc_remote_port_delete(rport); } - mutex_lock(&lport->disc.disc_mutex); mutex_lock(&rdata->rp_mutex); if (rdata->rp_state == RPORT_ST_DELETE) { if (port_id == FC_FID_DIR_SERV) { @@ -370,7 +375,6 @@ static void fc_rport_work(struct work_struct *work) fc_rport_enter_ready(rdata); mutex_unlock(&rdata->rp_mutex); } - mutex_unlock(&lport->disc.disc_mutex); break; default: @@ -702,7 +706,7 @@ out: err: mutex_unlock(&rdata->rp_mutex); put: - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); + kref_put(&rdata->kref, lport->tt.rport_destroy); return; bad: FC_RPORT_DBG(rdata, "Bad FLOGI response\n"); @@ -762,8 +766,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); disc = &lport->disc; - mutex_lock(&disc->disc_mutex); - if (!lport->point_to_multipoint) { rjt_data.reason = ELS_RJT_UNSUP; rjt_data.explan = ELS_EXPL_NONE; @@ -808,7 +810,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_FIP; rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; - goto reject; + goto reject_put; case RPORT_ST_FLOGI: case RPORT_ST_PLOGI_WAIT: case RPORT_ST_PLOGI: @@ -825,13 +827,13 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_BUSY; rjt_data.explan = ELS_EXPL_NONE; - goto reject; + goto reject_put; } if (fc_rport_login_complete(rdata, fp)) { mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; - goto reject; + goto reject_put; } fp = fc_frame_alloc(lport, sizeof(*flp)); @@ -851,12 +853,13 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); out: mutex_unlock(&rdata->rp_mutex); - mutex_unlock(&disc->disc_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); fc_frame_free(rx_fp); return; +reject_put: + kref_put(&rdata->kref, lport->tt.rport_destroy); reject: - mutex_unlock(&disc->disc_mutex); lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); fc_frame_free(rx_fp); } @@ -923,7 +926,7 @@ out: fc_frame_free(fp); err: mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); + kref_put(&rdata->kref, lport->tt.rport_destroy); } static bool @@ -1477,14 +1480,11 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) struct fc_rport_priv *rdata; struct fc_seq_els_data els_data; - mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp)); - if (!rdata) { - mutex_unlock(&lport->disc.disc_mutex); + if (!rdata) goto reject; - } + mutex_lock(&rdata->rp_mutex); - mutex_unlock(&lport->disc.disc_mutex); switch (rdata->rp_state) { case RPORT_ST_PRLI: @@ -1494,6 +1494,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) break; default: mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); goto reject; } @@ -1524,6 +1525,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) } mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); return; reject: @@ -1907,7 +1909,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) sid = fc_frame_sid(fp); - mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, sid); if (rdata) { mutex_lock(&rdata->rp_mutex); @@ -1916,10 +1917,10 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) fc_rport_enter_delete(rdata, RPORT_EV_LOGO); mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } else FC_RPORT_ID_DBG(lport, sid, "Received LOGO from non-logged-in port\n"); - mutex_unlock(&lport->disc.disc_mutex); fc_frame_free(fp); } diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 935c43095..763f012fd 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -233,15 +233,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->task_state_flags = SAS_TASK_STATE_PENDING; qc->lldd_task = task; - switch (qc->tf.protocol) { - case ATA_PROT_NCQ: - task->ata_task.use_ncq = 1; - /* fall through */ - case ATAPI_PROT_DMA: - case ATA_PROT_DMA: - task->ata_task.dma_xfer = 1; - break; - } + task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol); + task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol); if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, task); @@ -253,6 +246,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, NULL); sas_free_task(task); + qc->lldd_task = NULL; ret = AC_ERR_SYSTEM; } diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index d5bd42059..b48485946 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -647,6 +647,7 @@ struct lpfc_hba { #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ #define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ #define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ +#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -694,7 +695,8 @@ struct lpfc_hba { uint8_t wwnn[8]; uint8_t wwpn[8]; uint32_t RandomData[7]; - uint32_t fcp_embed_io; + uint8_t fcp_embed_io; + uint8_t mds_diags_support; /* HBA Config Parameters */ uint32_t cfg_ack0; @@ -741,6 +743,7 @@ struct lpfc_hba { #define OAS_FIND_ANY_VPORT 0x01 #define OAS_FIND_ANY_TARGET 0x02 #define OAS_LUN_VALID 0x04 + uint32_t cfg_oas_priority; uint32_t cfg_XLanePriority; uint32_t cfg_enable_bg; uint32_t cfg_hostmem_hgp; @@ -751,6 +754,8 @@ struct lpfc_hba { uint32_t cfg_iocb_cnt; uint32_t cfg_suppress_link_up; uint32_t cfg_rrq_xri_bitmap_sz; + uint32_t cfg_delay_discovery; + uint32_t cfg_sli_mode; #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ @@ -759,6 +764,7 @@ struct lpfc_hba { #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ uint32_t cfg_enable_SmartSAN; + uint32_t cfg_enable_mds_diags; lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; @@ -779,9 +785,9 @@ struct lpfc_hba { atomic_t fcp_qidx; /* next work queue to post work to */ - unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ - unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ - unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ + phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */ + phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */ + phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */ void __iomem *slim_memmap_p; /* Kernel memory mapped address for PCI BAR0 */ void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for @@ -827,6 +833,7 @@ struct lpfc_hba { struct timer_list fcp_poll_timer; struct timer_list eratt_poll; + uint32_t eratt_poll_interval; /* * stat counters @@ -999,6 +1006,18 @@ struct lpfc_hba { spinlock_t devicelock; /* lock for luns list */ mempool_t *device_data_mem_pool; struct list_head luns; +#define LPFC_TRANSGRESSION_HIGH_TEMPERATURE 0x0080 +#define LPFC_TRANSGRESSION_LOW_TEMPERATURE 0x0040 +#define LPFC_TRANSGRESSION_HIGH_VOLTAGE 0x0020 +#define LPFC_TRANSGRESSION_LOW_VOLTAGE 0x0010 +#define LPFC_TRANSGRESSION_HIGH_TXBIAS 0x0008 +#define LPFC_TRANSGRESSION_LOW_TXBIAS 0x0004 +#define LPFC_TRANSGRESSION_HIGH_TXPOWER 0x0002 +#define LPFC_TRANSGRESSION_LOW_TXPOWER 0x0001 +#define LPFC_TRANSGRESSION_HIGH_RXPOWER 0x8000 +#define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000 + uint16_t sfp_alarm; + uint16_t sfp_warning; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index cfec2eca4..f10199088 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -48,6 +48,7 @@ #include "lpfc_compat.h" #include "lpfc_crtn.h" #include "lpfc_vport.h" +#include "lpfc_attr.h" #define LPFC_DEF_DEVLOSS_TMO 30 #define LPFC_MIN_DEVLOSS_TMO 1 @@ -1620,6 +1621,11 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn); } +static inline bool lpfc_rangecheck(uint val, uint min, uint max) +{ + return val >= min && val <= max; +} + /** * lpfc_param_show - Return a cfg attribute value in decimal * @@ -1697,7 +1703,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ static int \ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ { \ - if (val >= minval && val <= maxval) {\ + if (lpfc_rangecheck(val, minval, maxval)) {\ phba->cfg_##attr = val;\ return 0;\ }\ @@ -1732,7 +1738,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \ static int \ lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ { \ - if (val >= minval && val <= maxval) {\ + if (lpfc_rangecheck(val, minval, maxval)) {\ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \ "3052 lpfc_" #attr " changed from %d to %d\n", \ phba->cfg_##attr, val); \ @@ -1856,7 +1862,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \ static int \ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ { \ - if (val >= minval && val <= maxval) {\ + if (lpfc_rangecheck(val, minval, maxval)) {\ vport->cfg_##attr = val;\ return 0;\ }\ @@ -1888,7 +1894,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \ static int \ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ - if (val >= minval && val <= maxval) {\ + if (lpfc_rangecheck(val, minval, maxval)) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ "3053 lpfc_" #attr \ " changed from %d (x%x) to %d (x%x)\n", \ @@ -1939,102 +1945,6 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ } -#define LPFC_ATTR(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_init(name, defval, minval, maxval) - -#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_show(name)\ -lpfc_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_show(name)\ -lpfc_param_init(name, defval, minval, maxval)\ -lpfc_param_set(name, defval, minval, maxval)\ -lpfc_param_store(name)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ - lpfc_##name##_show, lpfc_##name##_store) - -#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_hex_show(name)\ -lpfc_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_param_hex_show(name)\ -lpfc_param_init(name, defval, minval, maxval)\ -lpfc_param_set(name, defval, minval, maxval)\ -lpfc_param_store(name)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ - lpfc_##name##_show, lpfc_##name##_store) - -#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_init(name, defval, minval, maxval) - -#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \ -static uint64_t lpfc_##name = defval;\ -module_param(lpfc_##name, ullong, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -lpfc_vport_param_set(name, defval, minval, maxval)\ -lpfc_vport_param_store(name)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ - lpfc_##name##_show, lpfc_##name##_store) - -#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_hex_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) - -#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ -static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, S_IRUGO);\ -MODULE_PARM_DESC(lpfc_##name, desc);\ -lpfc_vport_param_hex_show(name)\ -lpfc_vport_param_init(name, defval, minval, maxval)\ -lpfc_vport_param_set(name, defval, minval, maxval)\ -lpfc_vport_param_store(name)\ -static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ - lpfc_##name##_show, lpfc_##name##_store) - static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); @@ -2400,6 +2310,69 @@ lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, lpfc_oas_tgt_show, lpfc_oas_tgt_store); +/** + * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count + **/ +static ssize_t +lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority); +} + +/** + * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + unsigned long val; + int ret; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + ret = kstrtoul(buf, 0, &val); + if (ret || (val > 0x7f)) + return -EINVAL; + + if (val) + phba->cfg_oas_priority = (uint8_t)val; + else + phba->cfg_oas_priority = phba->cfg_XLanePriority; + return count; +} +static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR, + lpfc_oas_priority_show, lpfc_oas_priority_store); + /** * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled * for Optimized Access Storage (OAS) operations. @@ -2462,6 +2435,7 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, else phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; phba->cfg_oas_flags &= ~OAS_LUN_VALID; + phba->cfg_oas_priority = phba->cfg_XLanePriority; phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; return count; } @@ -2524,7 +2498,6 @@ lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, return -EINVAL; phba->cfg_oas_lun_state = val; - return strlen(buf); } static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, @@ -2572,7 +2545,8 @@ static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, */ static size_t lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], - uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state) + uint8_t tgt_wwpn[], uint64_t lun, + uint32_t oas_state, uint8_t pri) { int rc = 0; @@ -2582,7 +2556,8 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], if (oas_state) { if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, - (struct lpfc_name *)tgt_wwpn, lun)) + (struct lpfc_name *)tgt_wwpn, + lun, pri)) rc = -ENOMEM; } else { lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, @@ -2648,13 +2623,13 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], static ssize_t lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], uint8_t tgt_wwpn[], uint64_t lun, - uint32_t oas_state) + uint32_t oas_state, uint8_t pri) { int rc; rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, - oas_state); + oas_state, pri); return rc; } @@ -2744,16 +2719,16 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, return -EINVAL; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3372 Try to set vport 0x%llx target 0x%llx lun:%lld " - "with oas set to %d\n", + "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx " + "priority 0x%x with oas state %d\n", wwn_to_u64(phba->cfg_oas_vpt_wwpn), wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, - phba->cfg_oas_lun_state); + phba->cfg_oas_priority, phba->cfg_oas_lun_state); rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, - phba->cfg_oas_tgt_wwpn, scsi_lun, - phba->cfg_oas_lun_state); - + phba->cfg_oas_tgt_wwpn, scsi_lun, + phba->cfg_oas_lun_state, + phba->cfg_oas_priority); if (rc) return rc; @@ -2772,19 +2747,14 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); -int lpfc_sli_mode = 0; -module_param(lpfc_sli_mode, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" - " 0 - auto (SLI-3 if supported)," - " 2 - select SLI-2 even on SLI-3 capable HBAs," - " 3 - select SLI-3"); +LPFC_ATTR(sli_mode, 0, 0, 3, + "SLI mode selector:" + " 0 - auto (SLI-3 if supported)," + " 2 - select SLI-2 even on SLI-3 capable HBAs," + " 3 - select SLI-3"); -int lpfc_enable_npiv = 1; -module_param(lpfc_enable_npiv, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); -lpfc_param_show(enable_npiv); -lpfc_param_init(enable_npiv, 1, 0, 1); -static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); +LPFC_ATTR_R(enable_npiv, 1, 0, 1, + "Enable NPIV functionality"); LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); @@ -4754,11 +4724,8 @@ MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); * accept and FCID/Fabric name/Fabric portname is changed. * Default value is 0. */ -int lpfc_delay_discovery; -module_param(lpfc_delay_discovery, int, S_IRUGO); -MODULE_PARM_DESC(lpfc_delay_discovery, - "Delay NPort discovery when Clean Address bit is cleared. " - "Allowed values: 0,1."); +LPFC_ATTR(delay_discovery, 0, 0, 1, + "Delay NPort discovery when Clean Address bit is cleared."); /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count @@ -4780,6 +4747,14 @@ LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Protection Scatter Gather Segment Count"); +/* + * lpfc_enable_mds_diags: Enable MDS Diagnostics + * 0 = MDS Diagnostics disabled (default) + * 1 = MDS Diagnostics enabled + * Value range is [0,1]. Default value is 0. + */ +LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); + struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, &dev_attr_bg_guard_err, @@ -4857,6 +4832,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_xlane_vpt, &dev_attr_lpfc_xlane_lun_state, &dev_attr_lpfc_xlane_lun_status, + &dev_attr_lpfc_xlane_priority, &dev_attr_lpfc_sg_seg_cnt, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, @@ -4876,6 +4852,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_sriov_hw_max_virtfn, &dev_attr_protocol, &dev_attr_lpfc_xlane_supported, + &dev_attr_lpfc_enable_mds_diags, NULL, }; @@ -5849,6 +5826,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_oas_lun_state = 0; phba->cfg_oas_lun_status = 0; phba->cfg_oas_flags = 0; + phba->cfg_oas_priority = 0; lpfc_enable_bg_init(phba, lpfc_enable_bg); if (phba->sli_rev == LPFC_SLI_REV4) phba->cfg_poll = 0; @@ -5866,7 +5844,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); + lpfc_delay_discovery_init(phba, lpfc_delay_discovery); + lpfc_sli_mode_init(phba, lpfc_sli_mode); phba->cfg_enable_dss = 1; + lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); return; } diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h new file mode 100644 index 000000000..b2bd28e96 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_attr.h @@ -0,0 +1,116 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define LPFC_ATTR(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_init(name, defval, minval, maxval) + +#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_set(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_hex_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_param_hex_show(name)\ +lpfc_param_init(name, defval, minval, maxval)\ +lpfc_param_set(name, defval, minval, maxval)\ +lpfc_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_init(name, defval, minval, maxval) + +#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \ +static uint64_t lpfc_##name = defval;\ +module_param(lpfc_##name, ullong, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +lpfc_vport_param_set(name, defval, minval, maxval)\ +lpfc_vport_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) + +#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_hex_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL) + +#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ +static uint lpfc_##name = defval;\ +module_param(lpfc_##name, uint, S_IRUGO);\ +MODULE_PARM_DESC(lpfc_##name, desc);\ +lpfc_vport_param_hex_show(name)\ +lpfc_vport_param_init(name, defval, minval, maxval)\ +lpfc_vport_param_set(name, defval, minval, maxval)\ +lpfc_vport_param_store(name)\ +static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ + lpfc_##name##_show, lpfc_##name##_store) diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 4e55b3518..bd7576d45 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -359,9 +359,6 @@ extern struct scsi_host_template lpfc_template_s3; extern struct scsi_host_template lpfc_vport_template; extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; -extern int lpfc_sli_mode; -extern int lpfc_enable_npiv; -extern int lpfc_delay_discovery; int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); @@ -492,7 +489,7 @@ struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, struct lpfc_name *, struct lpfc_name *, uint64_t); bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *, - struct lpfc_name *, uint64_t); + struct lpfc_name *, uint64_t, uint8_t); bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, struct lpfc_name *, uint64_t); bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index a38816e96..63e48d427 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1510,6 +1510,10 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) if (!lpfc_is_link_up(phba)) return; + /* Must be connected to a Fabric */ + if (!(vport->fc_flag & FC_FABRIC)) + return; + if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) return; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 0498f5760..c0af32f24 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -594,6 +594,7 @@ static uint8_t lpfc_check_clean_addr_bit(struct lpfc_vport *vport, struct serv_parm *sp) { + struct lpfc_hba *phba = vport->phba; uint8_t fabric_param_changed = 0; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -615,7 +616,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport, * - lpfc_delay_discovery module parameter is set. */ if (fabric_param_changed && !sp->cmn.clean_address_bit && - (vport->fc_prevDID || lpfc_delay_discovery)) { + (vport->fc_prevDID || phba->cfg_delay_discovery)) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_DELAYED; spin_unlock_irq(shost->host_lock); @@ -3299,6 +3300,12 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, FC_VPORT_FABRIC_REJ_WWN); } break; + case LSRJT_VENDOR_UNIQUE: + if ((stat.un.b.vendorUnique == 0x45) && + (cmd == ELS_CMD_FLOGI)) { + goto out_retry; + } + break; } break; @@ -3344,6 +3351,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((vport->load_flag & FC_UNLOADING) != 0) retry = 0; +out_retry: if (retry) { if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { /* Stop retrying PLOGI and FDISC if in FCF discovery */ @@ -4609,7 +4617,7 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) return sentplogi; } -void +uint32_t lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, uint32_t word0) { @@ -4617,9 +4625,11 @@ lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); desc->payload.els_req = word0; desc->length = cpu_to_be32(sizeof(desc->payload)); + + return sizeof(struct fc_rdp_link_service_desc); } -void +uint32_t lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, uint8_t *page_a0, uint8_t *page_a2) { @@ -4680,9 +4690,11 @@ lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, desc->sfp_info.flags = cpu_to_be16(flag); desc->length = cpu_to_be32(sizeof(desc->sfp_info)); + + return sizeof(struct fc_rdp_sfp_desc); } -void +uint32_t lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, READ_LNK_VAR *stat) { @@ -4707,134 +4719,181 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); desc->length = cpu_to_be32(sizeof(desc->info)); + + return sizeof(struct fc_rdp_link_error_status_desc); } -void +uint32_t lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, struct lpfc_vport *vport) { + uint32_t bbCredit; + desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); - desc->bbc_info.port_bbc = cpu_to_be32( - vport->fc_sparam.cmn.bbCreditMsb | - vport->fc_sparam.cmn.bbCreditlsb << 8); - if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) - desc->bbc_info.attached_port_bbc = cpu_to_be32( - vport->phba->fc_fabparam.cmn.bbCreditMsb | - vport->phba->fc_fabparam.cmn.bbCreditlsb << 8); - else + bbCredit = vport->fc_sparam.cmn.bbCreditLsb | + (vport->fc_sparam.cmn.bbCreditMsb << 8); + desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); + if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { + bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | + (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); + desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); + } else { desc->bbc_info.attached_port_bbc = 0; + } desc->bbc_info.rtt = 0; desc->length = cpu_to_be32(sizeof(desc->bbc_info)); + + return sizeof(struct fc_rdp_bbc_desc); } -void -lpfc_rdp_res_oed_temp_desc(struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) +uint32_t +lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_TEMP_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TEMP_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_TEMP_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_TEMP_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void -lpfc_rdp_res_oed_voltage_desc(struct fc_rdp_oed_sfp_desc *desc, +uint32_t +lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void -lpfc_rdp_res_oed_txbias_desc(struct fc_rdp_oed_sfp_desc *desc, +uint32_t +lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_BIAS_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_BIAS_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_BIAS_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_BIAS_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void -lpfc_rdp_res_oed_txpower_desc(struct fc_rdp_oed_sfp_desc *desc, +uint32_t +lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TXPOWER_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_TXPOWER_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void -lpfc_rdp_res_oed_rxpower_desc(struct fc_rdp_oed_sfp_desc *desc, +uint32_t +lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, + struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) { - uint32_t flags; + uint32_t flags = 0; desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); - desc->oed_info.hi_alarm = - cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_ALARM]); - desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_RXPOWER_LOW_ALARM]); - desc->oed_info.hi_warning = - cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_WARNING]); - desc->oed_info.lo_warning = - cpu_to_be16(page_a2[SSF_RXPOWER_LOW_WARNING]); - flags = 0xf; /* All four are valid */ + desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; + desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; + desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; + desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; + + if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) + flags |= RDP_OET_HIGH_ALARM; + if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) + flags |= RDP_OET_LOW_ALARM; + if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) + flags |= RDP_OET_HIGH_WARNING; + if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) + flags |= RDP_OET_LOW_WARNING; + flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); desc->oed_info.function_flags = cpu_to_be32(flags); desc->length = cpu_to_be32(sizeof(desc->oed_info)); + return sizeof(struct fc_rdp_oed_sfp_desc); } -void +uint32_t lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, uint8_t *page_a0, struct lpfc_vport *vport) { @@ -4845,9 +4904,10 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2); memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); desc->length = cpu_to_be32(sizeof(desc->opd_info)); + return sizeof(struct fc_rdp_opd_sfp_desc); } -int +uint32_t lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) { if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) @@ -4864,7 +4924,7 @@ lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) return sizeof(struct fc_fec_rdp_desc); } -void +uint32_t lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) { uint16_t rdp_cap = 0; @@ -4923,9 +4983,10 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); desc->length = cpu_to_be32(sizeof(desc->info)); + return sizeof(struct fc_rdp_port_speed_desc); } -void +uint32_t lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, struct lpfc_hba *phba) { @@ -4939,9 +5000,10 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, sizeof(desc->port_names.wwpn)); desc->length = cpu_to_be32(sizeof(desc->port_names)); + return sizeof(struct fc_rdp_port_name_desc); } -void +uint32_t lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { @@ -4962,6 +5024,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, } desc->length = cpu_to_be32(sizeof(desc->port_names)); + return sizeof(struct fc_rdp_port_name_desc); } void @@ -4976,8 +5039,9 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, uint8_t *pcmd; struct ls_rjt *stat; struct fc_rdp_res_frame *rdp_res; - uint32_t cmdsize; - int rc, fec_size; + uint32_t cmdsize, len; + uint16_t *flag_ptr; + int rc; if (status != SUCCESS) goto error; @@ -5008,39 +5072,61 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + /* Update Alarm and Warning */ + flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); + phba->sfp_alarm |= *flag_ptr; + flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); + phba->sfp_warning |= *flag_ptr; + /* For RDP payload */ - lpfc_rdp_res_link_service(&rdp_res->link_service_desc, ELS_CMD_RDP); + len = 8; + len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) + (len + pcmd), ELS_CMD_RDP); - lpfc_rdp_res_sfp_desc(&rdp_res->sfp_desc, + len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), rdp_context->page_a0, rdp_context->page_a2); - lpfc_rdp_res_speed(&rdp_res->portspeed_desc, phba); - lpfc_rdp_res_link_error(&rdp_res->link_error_desc, - &rdp_context->link_stat); - lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba); - lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc, - vport, ndlp); - lpfc_rdp_res_bbc_desc(&rdp_res->bbc_desc, &rdp_context->link_stat, - vport); - lpfc_rdp_res_oed_temp_desc(&rdp_res->oed_temp_desc, - rdp_context->page_a2); - lpfc_rdp_res_oed_voltage_desc(&rdp_res->oed_voltage_desc, - rdp_context->page_a2); - lpfc_rdp_res_oed_txbias_desc(&rdp_res->oed_txbias_desc, - rdp_context->page_a2); - lpfc_rdp_res_oed_txpower_desc(&rdp_res->oed_txpower_desc, - rdp_context->page_a2); - lpfc_rdp_res_oed_rxpower_desc(&rdp_res->oed_rxpower_desc, - rdp_context->page_a2); - lpfc_rdp_res_opd_desc(&rdp_res->opd_desc, rdp_context->page_a0, vport); - fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc, + len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), + phba); + len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) + (len + pcmd), &rdp_context->link_stat); + len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) + (len + pcmd), phba); + len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) + (len + pcmd), vport, ndlp); + len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), &rdp_context->link_stat); - rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE); + /* Check if nport is logged, BZ190632 */ + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) + goto lpfc_skip_descriptor; + + len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), + &rdp_context->link_stat, vport); + len += lpfc_rdp_res_oed_temp_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_voltage_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_txbias_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_txpower_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_oed_rxpower_desc(phba, + (struct fc_rdp_oed_sfp_desc *)(len + pcmd), + rdp_context->page_a2); + len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), + rdp_context->page_a0, vport); + +lpfc_skip_descriptor: + rdp_res->length = cpu_to_be32(len - 8); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; /* Now that we know the true size of the payload, update the BPL */ bpl = (struct ulp_bde64 *) (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); - bpl->tus.f.bdeSize = (fec_size + RDP_DESC_PAYLOAD_SIZE + 8); + bpl->tus.f.bdeSize = len; bpl->tus.f.bdeFlags = 0; bpl->tus.w = le32_to_cpu(bpl->tus.w); @@ -5165,6 +5251,12 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, be32_to_cpu(rdp_req->nport_id_desc.nport_id), be32_to_cpu(rdp_req->nport_id_desc.length)); + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) && + !phba->cfg_enable_SmartSAN) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_expl = LSEXP_PORT_LOGIN_REQ; + goto error; + } if (sizeof(struct fc_rdp_nport_desc) != be32_to_cpu(rdp_req->rdp_des_length)) goto rjt_logerr; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 39f0fd000..822654322 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -346,7 +346,7 @@ struct csp { uint8_t fcphHigh; /* FC Word 0, byte 0 */ uint8_t fcphLow; uint8_t bbCreditMsb; - uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ + uint8_t bbCreditLsb; /* FC Word 0, byte 3 */ /* * Word 1 Bit 31 in common service parameter is overloaded. @@ -1206,6 +1206,12 @@ struct fc_rdp_bbc_desc { struct fc_rdp_bbc_info bbc_info; }; +/* Optical Element Type Transgression Flags */ +#define RDP_OET_LOW_WARNING 0x1 +#define RDP_OET_HIGH_WARNING 0x2 +#define RDP_OET_LOW_ALARM 0x4 +#define RDP_OET_HIGH_ALARM 0x8 + #define RDP_OED_TEMPERATURE 0x1 #define RDP_OED_VOLTAGE 0x2 #define RDP_OED_TXBIAS 0x3 @@ -1233,8 +1239,8 @@ struct fc_rdp_opd_sfp_info { uint8_t vendor_name[16]; uint8_t model_number[16]; uint8_t serial_number[16]; - uint8_t reserved[2]; uint8_t revision[2]; + uint8_t reserved[2]; uint8_t date[8]; }; @@ -1261,27 +1267,17 @@ struct fc_rdp_res_frame { struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ - struct fc_rdp_bbc_desc bbc_desc; /* FC Word 34-38*/ - struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 39-43*/ - struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 44-48*/ - struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 49-53*/ - struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 54-58*/ - struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 59-63*/ - struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 64-80*/ - struct fc_fec_rdp_desc fec_desc; /* FC word 81-84*/ + struct fc_fec_rdp_desc fec_desc; /* FC word 34-37*/ + struct fc_rdp_bbc_desc bbc_desc; /* FC Word 38-42*/ + struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 43-47*/ + struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 48-52*/ + struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 53-57*/ + struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 58-62*/ + struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 63-67*/ + struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 68-84*/ }; -#define RDP_DESC_PAYLOAD_SIZE (sizeof(struct fc_rdp_link_service_desc) \ - + sizeof(struct fc_rdp_sfp_desc) \ - + sizeof(struct fc_rdp_port_speed_desc) \ - + sizeof(struct fc_rdp_link_error_status_desc) \ - + (sizeof(struct fc_rdp_port_name_desc) * 2) \ - + sizeof(struct fc_rdp_bbc_desc) \ - + (sizeof(struct fc_rdp_oed_sfp_desc) * 5) \ - + sizeof(struct fc_rdp_opd_sfp_desc)) - - /******** FDMI ********/ /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 0c7070bf2..ee8022737 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -544,6 +544,8 @@ struct lpfc_register { uint32_t word0; }; +#define LPFC_PORT_SEM_UE_RECOVERABLE 0xE000 +#define LPFC_PORT_SEM_MASK 0xF000 /* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */ #define LPFC_UERR_STATUS_HI 0x00A4 #define LPFC_UERR_STATUS_LO 0x00A0 @@ -937,6 +939,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_READ_OBJECT_LIST 0xAD #define LPFC_MBOX_OPCODE_DELETE_OBJECT 0xAE #define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 +#define LPFC_MBOX_OPCODE_SET_FEATURES 0xBF /* FCoE Opcodes */ #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 @@ -2590,10 +2593,8 @@ struct lpfc_mbx_memory_dump_type3 { #define SFF_RXPOWER_B1 104 #define SFF_RXPOWER_B0 105 #define SSF_STATUS_CONTROL 110 -#define SSF_ALARM_FLAGS_B1 112 -#define SSF_ALARM_FLAGS_B0 113 -#define SSF_WARNING_FLAGS_B1 116 -#define SSF_WARNING_FLAGS_B0 117 +#define SSF_ALARM_FLAGS 112 +#define SSF_WARNING_FLAGS 116 #define SSF_EXT_TATUS_CONTROL_B1 118 #define SSF_EXT_TATUS_CONTROL_B0 119 #define SSF_A2_VENDOR_SPECIFIC 120 @@ -2887,8 +2888,37 @@ struct lpfc_sli4_parameters { #define cfg_ext_embed_cb_SHIFT 0 #define cfg_ext_embed_cb_MASK 0x00000001 #define cfg_ext_embed_cb_WORD word19 +#define cfg_mds_diags_SHIFT 1 +#define cfg_mds_diags_MASK 0x00000001 +#define cfg_mds_diags_WORD word19 }; +#define LPFC_SET_UE_RECOVERY 0x10 +#define LPFC_SET_MDS_DIAGS 0x11 +struct lpfc_mbx_set_feature { + struct mbox_header header; + uint32_t feature; + uint32_t param_len; + uint32_t word6; +#define lpfc_mbx_set_feature_UER_SHIFT 0 +#define lpfc_mbx_set_feature_UER_MASK 0x00000001 +#define lpfc_mbx_set_feature_UER_WORD word6 +#define lpfc_mbx_set_feature_mds_SHIFT 0 +#define lpfc_mbx_set_feature_mds_MASK 0x00000001 +#define lpfc_mbx_set_feature_mds_WORD word6 +#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1 +#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001 +#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6 + uint32_t word7; +#define lpfc_mbx_set_feature_UERP_SHIFT 0 +#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff +#define lpfc_mbx_set_feature_UERP_WORD word7 +#define lpfc_mbx_set_feature_UESR_SHIFT 16 +#define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff +#define lpfc_mbx_set_feature_UESR_WORD word7 +}; + + struct lpfc_mbx_get_sli4_parameters { struct mbox_header header; struct lpfc_sli4_parameters sli4_parameters; @@ -3281,6 +3311,7 @@ struct lpfc_mqe { struct lpfc_mbx_get_prof_cfg get_prof_cfg; struct lpfc_mbx_wr_object wr_object; struct lpfc_mbx_get_port_name get_port_name; + struct lpfc_mbx_set_feature set_feature; struct lpfc_mbx_memory_dump_type3 mem_dump_type3; struct lpfc_mbx_nop nop; } un; @@ -3443,6 +3474,8 @@ struct lpfc_acqe_fc_la { #define LPFC_FC_LA_TYPE_LINK_UP 0x1 #define LPFC_FC_LA_TYPE_LINK_DOWN 0x2 #define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3 +#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4 +#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5 #define lpfc_acqe_fc_la_port_type_SHIFT 6 #define lpfc_acqe_fc_la_port_type_MASK 0x00000003 #define lpfc_acqe_fc_la_port_type_WORD word0 diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h new file mode 100644 index 000000000..5733feafe --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -0,0 +1,122 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * Portions Copyright (C) 2004-2005 Christoph Hellwig * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#include + +const struct pci_device_id lpfc_id_table[] = { + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0 } +}; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6029c4839..b100a22b3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -52,6 +52,7 @@ #include "lpfc_crtn.h" #include "lpfc_vport.h" #include "lpfc_version.h" +#include "lpfc_ids.h" char *_dump_buf_data; unsigned long _dump_buf_data_order; @@ -568,7 +569,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); + jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, @@ -1587,35 +1588,39 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, int rc; uint32_t intr_mode; - /* - * On error status condition, driver need to wait for port - * ready before performing reset. - */ - rc = lpfc_sli4_pdev_status_reg_wait(phba); - if (!rc) { - /* need reset: attempt for port recovery */ - if (en_rn_msg) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2887 Reset Needed: Attempting Port " - "Recovery...\n"); - lpfc_offline_prep(phba, mbx_action); - lpfc_offline(phba); - /* release interrupt for possible resource change */ - lpfc_sli4_disable_intr(phba); - lpfc_sli_brdrestart(phba); - /* request and enable interrupt */ - intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); - if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3175 Failed to enable interrupt\n"); - return -EIO; - } else { - phba->intr_mode = intr_mode; - } - rc = lpfc_online(phba); - if (rc == 0) - lpfc_unblock_mgmt_io(phba); + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_2) { + /* + * On error status condition, driver need to wait for port + * ready before performing reset. + */ + rc = lpfc_sli4_pdev_status_reg_wait(phba); + if (rc) + return rc; } + + /* need reset: attempt for port recovery */ + if (en_rn_msg) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2887 Reset Needed: Attempting Port " + "Recovery...\n"); + lpfc_offline_prep(phba, mbx_action); + lpfc_offline(phba); + /* release interrupt for possible resource change */ + lpfc_sli4_disable_intr(phba); + lpfc_sli_brdrestart(phba); + /* request and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3175 Failed to enable interrupt\n"); + return -EIO; + } + phba->intr_mode = intr_mode; + rc = lpfc_online(phba); + if (rc == 0) + lpfc_unblock_mgmt_io(phba); + return rc; } @@ -1636,10 +1641,11 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) struct lpfc_register portstat_reg = {0}; uint32_t reg_err1, reg_err2; uint32_t uerrlo_reg, uemasklo_reg; - uint32_t pci_rd_rc1, pci_rd_rc2; + uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; bool en_rn_msg = true; struct temp_event temp_event_data; - int rc; + struct lpfc_register portsmphr_reg; + int rc, i; /* If the pci channel is offline, ignore possible errors, since * we cannot communicate with the pci card anyway. @@ -1647,6 +1653,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) if (pci_channel_offline(phba->pcidev)) return; + memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: @@ -1659,6 +1666,55 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) /* consider PCI bus read error as pci_channel_offline */ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) return; + if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { + lpfc_sli4_offline_eratt(phba); + return; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "7623 Checking UE recoverable"); + + for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { + if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0)) + continue; + + smphr_port_status = bf_get(lpfc_port_smphr_port_status, + &portsmphr_reg); + if ((smphr_port_status & LPFC_PORT_SEM_MASK) == + LPFC_PORT_SEM_UE_RECOVERABLE) + break; + /*Sleep for 1Sec, before checking SEMAPHORE */ + msleep(1000); + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "4827 smphr_port_status x%x : Waited %dSec", + smphr_port_status, i); + + /* Recoverable UE, reset the HBA device */ + if ((smphr_port_status & LPFC_PORT_SEM_MASK) == + LPFC_PORT_SEM_UE_RECOVERABLE) { + for (i = 0; i < 20; i++) { + msleep(1000); + if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0) && + (LPFC_POST_STAGE_PORT_READY == + bf_get(lpfc_port_smphr_port_status, + &portsmphr_reg))) { + rc = lpfc_sli4_port_sta_fn_reset(phba, + LPFC_MBX_NO_WAIT, en_rn_msg); + if (rc == 0) + return; + lpfc_printf_log(phba, + KERN_ERR, LOG_INIT, + "4215 Failed to recover UE"); + break; + } + } + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "7624 Firmware not ready: Failing UE recovery," + " waited %dSec", i); lpfc_sli4_offline_eratt(phba); break; @@ -1681,6 +1737,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) "taking port offline Data: x%x x%x\n", reg_err1, reg_err2); + phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; temp_event_data.event_code = LPFC_CRIT_TEMP; temp_event_data.data = 0xFFFFFFFF; @@ -3985,6 +4042,8 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) { struct lpfc_dmabuf *mp; LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + struct lpfc_mbx_read_top *la; int rc; if (bf_get(lpfc_trailer_type, acqe_fc) != @@ -4055,6 +4114,24 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; pmb->vport = phba->pport; + if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { + /* Parse and translate status field */ + mb = &pmb->u.mb; + mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, + (void *)acqe_fc); + + /* Parse and translate link attention fields */ + la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; + la->eventTag = acqe_fc->event_tag; + bf_set(lpfc_mbx_read_top_att_type, la, + LPFC_FC_LA_TYPE_LINK_DOWN); + + /* Invoke the mailbox command callback function */ + lpfc_mbx_cmpl_read_topology(phba, pmb); + + return; + } + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) goto out_free_dmabuf; @@ -4107,6 +4184,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) "3190 Over Temperature:%d Celsius- Port Name %c\n", acqe_sli->event_data1, port_name); + phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; shost = lpfc_shost_from_vport(phba->pport); fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(temp_event_data), @@ -4408,7 +4486,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * the corresponding FCF bit in the roundrobin bitmap. */ spin_lock_irq(&phba->hbalock); - if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && + (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { spin_unlock_irq(&phba->hbalock); /* Update FLOGI FCF failover eligible FCF bmask */ lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); @@ -4775,20 +4854,17 @@ static int lpfc_enable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; - int bars = 0; /* Obtain PCI device reference */ if (!phba->pcidev) goto out_error; else pdev = phba->pcidev; - /* Select PCI BARs */ - bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Enable PCI device */ if (pci_enable_device_mem(pdev)) goto out_error; /* Request PCI resource for the device */ - if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) + if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) goto out_disable_device; /* Set up device as PCI master and save state for EEH */ pci_set_master(pdev); @@ -4805,7 +4881,7 @@ out_disable_device: pci_disable_device(pdev); out_error: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1401 Failed to enable pci device, bars:x%x\n", bars); + "1401 Failed to enable pci device\n"); return -ENODEV; } @@ -4820,17 +4896,14 @@ static void lpfc_disable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; - int bars; /* Obtain PCI device reference */ if (!phba->pcidev) return; else pdev = phba->pcidev; - /* Select PCI BARs */ - bars = pci_select_bars(pdev, IORESOURCE_MEM); /* Release PCI resource and disable PCI device */ - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(pdev); pci_disable_device(pdev); return; @@ -5363,6 +5436,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_bsmbx; } } + /* * Get sli4 parameters that override parameters from Port capabilities. * If this call fails, it isn't critical unless the SLI4 parameters come @@ -6091,6 +6165,7 @@ lpfc_hba_alloc(struct pci_dev *pdev) kfree(phba); return NULL; } + phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; spin_lock_init(&phba->ct_ev_lock); INIT_LIST_HEAD(&phba->ct_ev_waiters); @@ -9527,6 +9602,14 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->fcp_embed_io = 1; else phba->fcp_embed_io = 0; + + /* + * Check if the SLI port supports MDS Diagnostics + */ + if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) + phba->mds_diags_support = 1; + else + phba->mds_diags_support = 0; return 0; } @@ -9722,7 +9805,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) struct lpfc_vport **vports; struct lpfc_hba *phba = vport->phba; int i; - int bars = pci_select_bars(pdev, IORESOURCE_MEM); spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; @@ -9797,7 +9879,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) lpfc_hba_free(phba); - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(pdev); pci_disable_device(pdev); } @@ -11298,106 +11380,6 @@ lpfc_fof_queue_destroy(struct lpfc_hba *phba) return 0; } -static struct pci_device_id lpfc_id_table[] = { - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, - PCI_ANY_ID, PCI_ANY_ID, }, - { 0 } -}; - MODULE_DEVICE_TABLE(pci, lpfc_id_table); static const struct pci_error_handlers lpfc_err_handler = { @@ -11452,21 +11434,17 @@ lpfc_init(void) printk(KERN_ERR "Could not register lpfcmgmt device, " "misc_register returned with status %d", error); - if (lpfc_enable_npiv) { - lpfc_transport_functions.vport_create = lpfc_vport_create; - lpfc_transport_functions.vport_delete = lpfc_vport_delete; - } + lpfc_transport_functions.vport_create = lpfc_vport_create; + lpfc_transport_functions.vport_delete = lpfc_vport_delete; lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); if (lpfc_transport_template == NULL) return -ENOMEM; - if (lpfc_enable_npiv) { - lpfc_vport_transport_template = - fc_attach_transport(&lpfc_vport_transport_functions); - if (lpfc_vport_transport_template == NULL) { - fc_release_transport(lpfc_transport_template); - return -ENOMEM; - } + lpfc_vport_transport_template = + fc_attach_transport(&lpfc_vport_transport_functions); + if (lpfc_vport_transport_template == NULL) { + fc_release_transport(lpfc_transport_template); + return -ENOMEM; } /* Initialize in case vector mapping is needed */ @@ -11478,8 +11456,7 @@ lpfc_init(void) error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); - if (lpfc_enable_npiv) - fc_release_transport(lpfc_vport_transport_template); + fc_release_transport(lpfc_vport_transport_template); } return error; @@ -11498,8 +11475,7 @@ lpfc_exit(void) misc_deregister(&lpfc_mgmt_dev); pci_unregister_driver(&lpfc_driver); fc_release_transport(lpfc_transport_template); - if (lpfc_enable_npiv) - fc_release_transport(lpfc_vport_transport_template); + fc_release_transport(lpfc_vport_transport_template); if (_dump_buf_data) { printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " "_dump_buf_data at 0x%p\n", diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c7e5695da..d197aa176 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -3335,8 +3335,11 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * OAS, set the oas iocb related flags. */ if ((phba->cfg_fof) && ((struct lpfc_device_data *) - scsi_cmnd->device->hostdata)->oas_enabled) + scsi_cmnd->device->hostdata)->oas_enabled) { lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); + lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) + scsi_cmnd->device->hostdata)->priority; + } return 0; } @@ -5607,6 +5610,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, sizeof(struct lpfc_name)); lun_info->device_id.lun = lun; lun_info->oas_enabled = false; + lun_info->priority = phba->cfg_XLanePriority; lun_info->available = false; return lun_info; } @@ -5798,7 +5802,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, **/ bool lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, - struct lpfc_name *target_wwpn, uint64_t lun) + struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) { struct lpfc_device_data *lun_info; @@ -5825,6 +5829,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, false); if (lun_info) { lun_info->oas_enabled = true; + lun_info->priority = pri; lun_info->available = false; list_add_tail(&lun_info->listentry, &phba->luns); spin_unlock_irqrestore(&phba->devicelock, flags); @@ -5886,6 +5891,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, struct scsi_host_template lpfc_template_s3 = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, @@ -5910,6 +5916,7 @@ struct scsi_host_template lpfc_template_s3 = { struct scsi_host_template lpfc_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, @@ -5935,6 +5942,7 @@ struct scsi_host_template lpfc_template = { struct scsi_host_template lpfc_vport_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 18b9260cc..8cb80daba 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -51,6 +51,7 @@ struct lpfc_device_data { struct list_head listentry; struct lpfc_rport_data *rport_data; struct lpfc_device_id device_id; + uint8_t priority; bool oas_enabled; bool available; }; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 70edf21ae..7080ce292 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1323,21 +1323,18 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { lockdep_assert_held(&phba->hbalock); + BUG_ON(!piocb || !piocb->vport); + list_add_tail(&piocb->list, &pring->txcmplq); piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; if ((unlikely(pring->ringno == LPFC_ELS_RING)) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) && - (!(piocb->vport->load_flag & FC_UNLOADING))) { - if (!piocb->vport) - BUG(); - else - mod_timer(&piocb->vport->els_tmofunc, - jiffies + - msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); - } - + (!(piocb->vport->load_flag & FC_UNLOADING))) + mod_timer(&piocb->vport->els_tmofunc, + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); return 0; } @@ -2947,8 +2944,8 @@ void lpfc_poll_eratt(unsigned long ptr) else cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); - /* 64-bit integer division not supporte on 32-bit x86 - use do_div */ - do_div(cnt, LPFC_ERATT_POLL_INTERVAL); + /* 64-bit integer division not supported on 32-bit x86 - use do_div */ + do_div(cnt, phba->eratt_poll_interval); phba->sli.slistat.sli_ips = cnt; phba->sli.slistat.sli_prev_intr = sli_intr; @@ -2963,7 +2960,7 @@ void lpfc_poll_eratt(unsigned long ptr) /* Restart the timer for next eratt poll */ mod_timer(&phba->eratt_poll, jiffies + - msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); return; } @@ -4665,13 +4662,13 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) int mode = 3, i; int longs; - switch (lpfc_sli_mode) { + switch (phba->cfg_sli_mode) { case 2: if (phba->cfg_enable_npiv) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, - "1824 NPIV enabled: Override lpfc_sli_mode " + "1824 NPIV enabled: Override sli_mode " "parameter (%d) to auto (0).\n", - lpfc_sli_mode); + phba->cfg_sli_mode); break; } mode = 2; @@ -4681,8 +4678,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) break; default: lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, - "1819 Unrecognized lpfc_sli_mode " - "parameter: %d.\n", lpfc_sli_mode); + "1819 Unrecognized sli_mode parameter: %d.\n", + phba->cfg_sli_mode); break; } @@ -4690,12 +4687,14 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) rc = lpfc_sli_config_port(phba, mode); - if (rc && lpfc_sli_mode == 3) + if (rc && phba->cfg_sli_mode == 3) lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, "1820 Unable to select SLI-3. " "Not supported by adapter.\n"); if (rc && mode != 2) rc = lpfc_sli_config_port(phba, 2); + else if (rc && mode == 2) + rc = lpfc_sli_config_port(phba, 3); if (rc) goto lpfc_sli_hba_setup_error; @@ -5690,6 +5689,38 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) return rc; } +void +lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, + uint32_t feature) +{ + uint32_t len; + + len = sizeof(struct lpfc_mbx_set_feature) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_FEATURES, len, + LPFC_SLI4_MBX_EMBED); + + switch (feature) { + case LPFC_SET_UE_RECOVERY: + bf_set(lpfc_mbx_set_feature_UER, + &mbox->u.mqe.un.set_feature, 1); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; + mbox->u.mqe.un.set_feature.param_len = 8; + break; + case LPFC_SET_MDS_DIAGS: + bf_set(lpfc_mbx_set_feature_mds, + &mbox->u.mqe.un.set_feature, 1); + bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, + &mbox->u.mqe.un.set_feature, 0); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; + mbox->u.mqe.un.set_feature.param_len = 8; + break; + } + + return; +} + /** * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. * @phba: Pointer to HBA context object. @@ -6414,6 +6445,30 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->pport->cfg_lun_queue_depth = rc; } + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_0) { + lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc == MBX_SUCCESS) { + phba->hba_flag |= HBA_RECOVERABLE_UE; + /* Set 1Sec interval to detect UE */ + phba->eratt_poll_interval = 1; + phba->sli4_hba.ue_to_sr = bf_get( + lpfc_mbx_set_feature_UESR, + &mboxq->u.mqe.un.set_feature); + phba->sli4_hba.ue_to_rp = bf_get( + lpfc_mbx_set_feature_UERP, + &mboxq->u.mqe.un.set_feature); + } + } + + if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { + /* Enable MDS Diagnostics only if the SLI Port supports it */ + lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) + phba->mds_diags_support = 0; + } /* * Discover the port's supported feature set and match it against the @@ -6612,7 +6667,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); + jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); /* Enable PCIe device Advanced Error Reporting (AER) if configured */ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { @@ -8383,8 +8438,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); if (iocbq->iocb_flag & LPFC_IO_OAS) { bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); - if (phba->cfg_XLanePriority) { - bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + if (iocbq->priority) { + bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, + (iocbq->priority << 1)); + } else { bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, (phba->cfg_XLanePriority << 1)); } @@ -8439,8 +8497,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); if (iocbq->iocb_flag & LPFC_IO_OAS) { bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); - if (phba->cfg_XLanePriority) { - bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); + bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); + if (iocbq->priority) { + bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, + (iocbq->priority << 1)); + } else { bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, (phba->cfg_XLanePriority << 1)); } @@ -8494,8 +8555,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, iocbq->iocb.ulpFCP2Rcvy); if (iocbq->iocb_flag & LPFC_IO_OAS) { bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); - if (phba->cfg_XLanePriority) { - bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); + if (iocbq->priority) { + bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, + (iocbq->priority << 1)); + } else { bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, (phba->cfg_XLanePriority << 1)); } @@ -10136,6 +10200,7 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, struct lpfc_iocbq *iocbq; int sum, i; + spin_lock_irq(&phba->hbalock); for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; @@ -10143,6 +10208,7 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, ctx_cmd) == 0) sum++; } + spin_unlock_irq(&phba->hbalock); return sum; } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7fe99ff80..74227a28b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -57,6 +57,7 @@ struct lpfc_iocbq { struct lpfc_cq_event cq_event; IOCB_t iocb; /* IOCB cmd */ + uint8_t priority; /* OAS priority */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ uint32_t iocb_flag; #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index cd780c294..0b88b5703 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2015 Emulex. All rights reserved. * + * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -511,6 +511,8 @@ struct lpfc_sli4_hba { uint32_t ue_mask_lo; uint32_t ue_mask_hi; + uint32_t ue_to_sr; + uint32_t ue_to_rp; struct lpfc_register sli_intf; struct lpfc_pc_sli4_params pc_sli4_params; struct msix_entry *msix_entries; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index fa0d531bf..c9bf20eb7 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.1.0.0." +#define LPFC_DRIVER_VERSION "11.2.0.0." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6bff13e7a..cd91a684c 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -4903,13 +4903,22 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) u16 ioc_status; u16 sz; u8 device_missing_delay; + u8 num_phys; - mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys); - if (!ioc->sas_hba.num_phys) { + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } + ioc->sas_hba.phy = kcalloc(num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!ioc->sas_hba.phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.num_phys = num_phys; /* sas_iounit page 0 */ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * @@ -4969,13 +4978,6 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; - ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys, - sizeof(struct _sas_phy), GFP_KERNEL); - if (!ioc->sas_hba.phy) { - pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", - ioc->name, __FILE__, __LINE__, __func__); - goto out; - } for (i = 0; i < ioc->sas_hba.num_phys ; i++) { if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, i))) { @@ -9033,8 +9035,11 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev) /* TODO - dump whatever for debugging purposes */ - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; + /* This called only if scsih_pci_error_detected returns + * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still + * works, no need to reset slot. + */ + return PCI_ERS_RESULT_RECOVERED; } /* diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 6a84b82d7..ff93286bc 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -705,6 +705,11 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, goto out_fail; } + if (!sas_node->parent_dev) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } port = sas_port_alloc_num(sas_node->parent_dev); if ((sas_port_add(port))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 3b11aad03..2f2a9910e 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -726,7 +726,7 @@ static int _osd_req_list_objects(struct osd_request *or, return PTR_ERR(bio); } - bio->bi_rw &= ~REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_READ, 0); or->in.bio = bio; or->in.total_bytes = bio->bi_iter.bi_size; return 0; @@ -824,7 +824,7 @@ void osd_req_write(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); WARN_ON(or->out.bio || or->out.total_bytes); - WARN_ON(0 == (bio->bi_rw & REQ_WRITE)); + WARN_ON(!op_is_write(bio_op(bio))); or->out.bio = bio; or->out.total_bytes = len; } @@ -839,7 +839,7 @@ int osd_req_write_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */ + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); osd_req_write(or, obj, offset, bio, len); return 0; } @@ -875,7 +875,7 @@ void osd_req_read(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); WARN_ON(or->in.bio || or->in.total_bytes); - WARN_ON(bio->bi_rw & REQ_WRITE); + WARN_ON(op_is_write(bio_op(bio))); or->in.bio = bio; or->in.total_bytes = len; } @@ -956,7 +956,7 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); /* integrity check the continuation before the bio is linked * with the other data segments since the continuation @@ -1077,7 +1077,7 @@ int osd_req_write_sg_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= REQ_WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); osd_req_write_sg(or, obj, bio, sglist, numentries); return 0; @@ -1558,18 +1558,25 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or, static struct request *_make_request(struct request_queue *q, bool has_write, struct _osd_io_info *oii, gfp_t flags) { - if (oii->bio) - return blk_make_request(q, oii->bio, flags); - else { - struct request *req; - - req = blk_get_request(q, has_write ? WRITE : READ, flags); - if (IS_ERR(req)) - return req; + struct request *req; + struct bio *bio = oii->bio; + int ret; - blk_rq_set_block_pc(req); + req = blk_get_request(q, has_write ? WRITE : READ, flags); + if (IS_ERR(req)) return req; + blk_rq_set_block_pc(req); + + for_each_bio(bio) { + struct bio *bounce_bio = bio; + + blk_queue_bounce(req->q, &bounce_bio); + ret = blk_rq_append_bio(req, bounce_bio); + if (ret) + return ERR_PTR(ret); } + + return req; } static int _init_blk_request(struct osd_request *or, diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 6bd7bf4f4..9fc675f57 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1249,7 +1249,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) /* Chip documentation for the 8070 and 8072 SPCv */ /* states that a 500ms minimum delay is required */ - /* before issuing commands. Otherwise, the firmare */ + /* before issuing commands. Otherwise, the firmware */ /* will enter an unrecoverable state. */ if (pm8001_ha->chip_id == chip_8070 || diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 4dc06a13c..fe7469c90 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -146,92 +146,6 @@ static struct bin_attribute sysfs_fw_dump_attr = { .write = qla2x00_sysfs_write_fw_dump, }; -static ssize_t -qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) -{ - struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, - struct device, kobj))); - struct qla_hw_data *ha = vha->hw; - - if (!ha->fw_dump_template || !ha->fw_dump_template_len) - return 0; - - ql_dbg(ql_dbg_user, vha, 0x70e2, - "chunk <- off=%llx count=%zx\n", off, count); - return memory_read_from_buffer(buf, count, &off, - ha->fw_dump_template, ha->fw_dump_template_len); -} - -static ssize_t -qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) -{ - struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, - struct device, kobj))); - struct qla_hw_data *ha = vha->hw; - uint32_t size; - - if (off == 0) { - if (ha->fw_dump) - vfree(ha->fw_dump); - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); - - ha->fw_dump = NULL; - ha->fw_dump_len = 0; - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; - - size = qla27xx_fwdt_template_size(buf); - ql_dbg(ql_dbg_user, vha, 0x70d1, - "-> allocating fwdt (%x bytes)...\n", size); - ha->fw_dump_template = vmalloc(size); - if (!ha->fw_dump_template) { - ql_log(ql_log_warn, vha, 0x70d2, - "Failed allocate fwdt (%x bytes).\n", size); - return -ENOMEM; - } - ha->fw_dump_template_len = size; - } - - if (off + count > ha->fw_dump_template_len) { - count = ha->fw_dump_template_len - off; - ql_dbg(ql_dbg_user, vha, 0x70d3, - "chunk -> truncating to %zx bytes.\n", count); - } - - ql_dbg(ql_dbg_user, vha, 0x70d4, - "chunk -> off=%llx count=%zx\n", off, count); - memcpy(ha->fw_dump_template + off, buf, count); - - if (off + count == ha->fw_dump_template_len) { - size = qla27xx_fwdt_calculate_dump_size(vha); - ql_dbg(ql_dbg_user, vha, 0x70d5, - "-> allocating fwdump (%x bytes)...\n", size); - ha->fw_dump = vmalloc(size); - if (!ha->fw_dump) { - ql_log(ql_log_warn, vha, 0x70d6, - "Failed allocate fwdump (%x bytes).\n", size); - return -ENOMEM; - } - ha->fw_dump_len = size; - } - - return count; -} -static struct bin_attribute sysfs_fw_dump_template_attr = { - .attr = { - .name = "fw_dump_template", - .mode = S_IRUSR | S_IWUSR, - }, - .size = 0, - .read = qla2x00_sysfs_read_fw_dump_template, - .write = qla2x00_sysfs_write_fw_dump_template, -}; - static ssize_t qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -973,7 +887,6 @@ static struct sysfs_entry { int is4GBp_only; } bin_file_entries[] = { { "fw_dump", &sysfs_fw_dump_attr, }, - { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 }, { "nvram", &sysfs_nvram_attr, }, { "optrom", &sysfs_optrom_attr, }, { "optrom_ctl", &sysfs_optrom_ctl_attr, }, @@ -1000,8 +913,6 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) continue; - if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) - continue; ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); @@ -1858,6 +1769,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) if (!fcport) return; + if (test_bit(UNLOADING, &fcport->vha->dpc_flags)) + return; + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) return; @@ -1900,10 +1814,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) int rval; struct link_statistics *stats; dma_addr_t stats_dma; - struct fc_host_statistics *pfc_host_stat; + struct fc_host_statistics *p = &vha->fc_host_stat; - pfc_host_stat = &vha->fc_host_stat; - memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); + memset(p, -1, sizeof(*p)); if (IS_QLAFX00(vha->hw)) goto done; @@ -1918,17 +1831,17 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) goto done; stats = dma_alloc_coherent(&ha->pdev->dev, - sizeof(struct link_statistics), &stats_dma, GFP_KERNEL); - if (stats == NULL) { + sizeof(*stats), &stats_dma, GFP_KERNEL); + if (!stats) { ql_log(ql_log_warn, vha, 0x707d, "Failed to allocate memory for stats.\n"); goto done; } - memset(stats, 0, DMA_POOL_SIZE); + memset(stats, 0, sizeof(*stats)); rval = QLA_FUNCTION_FAILED; if (IS_FWI2_CAPABLE(ha)) { - rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0); } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && !ha->dpc_active) { /* Must be in a 'READY' state for statistics retrieval. */ @@ -1939,47 +1852,68 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) if (rval != QLA_SUCCESS) goto done_free; - pfc_host_stat->link_failure_count = stats->link_fail_cnt; - pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt; - pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt; - pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; - pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt; - pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt; + p->link_failure_count = stats->link_fail_cnt; + p->loss_of_sync_count = stats->loss_sync_cnt; + p->loss_of_signal_count = stats->loss_sig_cnt; + p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt; + p->invalid_tx_word_count = stats->inval_xmit_word_cnt; + p->invalid_crc_count = stats->inval_crc_cnt; if (IS_FWI2_CAPABLE(ha)) { - pfc_host_stat->lip_count = stats->lip_cnt; - pfc_host_stat->tx_frames = stats->tx_frames; - pfc_host_stat->rx_frames = stats->rx_frames; - pfc_host_stat->dumped_frames = stats->discarded_frames; - pfc_host_stat->nos_count = stats->nos_rcvd; - pfc_host_stat->error_frames = + p->lip_count = stats->lip_cnt; + p->tx_frames = stats->tx_frames; + p->rx_frames = stats->rx_frames; + p->dumped_frames = stats->discarded_frames; + p->nos_count = stats->nos_rcvd; + p->error_frames = stats->dropped_frames + stats->discarded_frames; - pfc_host_stat->rx_words = vha->qla_stats.input_bytes; - pfc_host_stat->tx_words = vha->qla_stats.output_bytes; + p->rx_words = vha->qla_stats.input_bytes; + p->tx_words = vha->qla_stats.output_bytes; } - pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests; - pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests; - pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests; - pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; - pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; - pfc_host_stat->seconds_since_last_reset = + p->fcp_control_requests = vha->qla_stats.control_requests; + p->fcp_input_requests = vha->qla_stats.input_requests; + p->fcp_output_requests = vha->qla_stats.output_requests; + p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; + p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; + p->seconds_since_last_reset = get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; - do_div(pfc_host_stat->seconds_since_last_reset, HZ); + do_div(p->seconds_since_last_reset, HZ); done_free: dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), stats, stats_dma); done: - return pfc_host_stat; + return p; } static void qla2x00_reset_host_stats(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct link_statistics *stats; + dma_addr_t stats_dma; + memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); + + if (IS_FWI2_CAPABLE(ha)) { + stats = dma_alloc_coherent(&ha->pdev->dev, + sizeof(*stats), &stats_dma, GFP_KERNEL); + if (!stats) { + ql_log(ql_log_warn, vha, 0x70d7, + "Failed to allocate memory for stats.\n"); + return; + } + + /* reset firmware statistics */ + qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); + + dma_free_coherent(&ha->pdev->dev, sizeof(*stats), + stats, stats_dma); + } } static void diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 392c147d5..643014f82 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -2246,53 +2246,94 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); struct link_statistics *stats = NULL; dma_addr_t stats_dma; - int rval = QLA_FUNCTION_FAILED; + int rval; + uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd; + uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; if (test_bit(UNLOADING, &vha->dpc_flags)) - goto done; + return -ENODEV; if (unlikely(pci_channel_offline(ha->pdev))) - goto done; + return -ENODEV; if (qla2x00_reset_active(vha)) - goto done; + return -EBUSY; if (!IS_FWI2_CAPABLE(ha)) - goto done; + return -EPERM; stats = dma_alloc_coherent(&ha->pdev->dev, - sizeof(struct link_statistics), &stats_dma, GFP_KERNEL); + sizeof(*stats), &stats_dma, GFP_KERNEL); if (!stats) { ql_log(ql_log_warn, vha, 0x70e2, - "Failed to allocate memory for stats.\n"); - goto done; + "Failed to allocate memory for stats.\n"); + return -ENOMEM; } - memset(stats, 0, sizeof(struct link_statistics)); + memset(stats, 0, sizeof(*stats)); - rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); + rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); - if (rval != QLA_SUCCESS) - goto done_free; + if (rval == QLA_SUCCESS) { + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3, + (uint8_t *)stats, sizeof(*stats)); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); + } - ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3, - (uint8_t *)stats, sizeof(struct link_statistics)); + bsg_job->reply->reply_payload_rcv_len = sizeof(*stats); + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; - sg_copy_from_buffer(bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, stats, sizeof(struct link_statistics)); - bsg_job->reply->reply_payload_rcv_len = sizeof(struct link_statistics); + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); - bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + dma_free_coherent(&ha->pdev->dev, sizeof(*stats), + stats, stats_dma); - bsg_job->reply_len = sizeof(struct fc_bsg_reply); + return 0; +} + +static int +qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + int rval; + struct qla_dport_diag *dd; + + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) + return -EPERM; + + dd = kmalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) { + ql_log(ql_log_warn, vha, 0x70db, + "Failed to allocate memory for dport.\n"); + return -ENOMEM; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); + + rval = qla26xx_dport_diagnostics( + vha, dd->buf, sizeof(dd->buf), dd->options); + if (rval == QLA_SUCCESS) { + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); + } + + bsg_job->reply->reply_payload_rcv_len = sizeof(*dd); + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(*bsg_job->reply); bsg_job->reply->result = DID_OK << 16; bsg_job->job_done(bsg_job); -done_free: - dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics), - stats, stats_dma); -done: - return rval; + kfree(dd); + + return 0; } static int @@ -2360,8 +2401,12 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) return qla27xx_get_bbcr_data(bsg_job); case QL_VND_GET_PRIV_STATS: + case QL_VND_GET_PRIV_STATS_EX: return qla2x00_get_priv_stats(bsg_job); + case QL_VND_DPORT_DIAGNOSTICS: + return qla2x00_do_dport_diagnostics(bsg_job); + default: return -ENOSYS; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index c80192d45..d97dfd521 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -29,6 +29,8 @@ #define QL_VND_SET_FLASH_UPDATE_CAPS 0x16 #define QL_VND_GET_BBCR_DATA 0x17 #define QL_VND_GET_PRIV_STATS 0x18 +#define QL_VND_DPORT_DIAGNOSTICS 0x19 +#define QL_VND_GET_PRIV_STATS_EX 0x1A /* BSG Vendor specific subcode returns */ #define EXT_STATUS_OK 0 @@ -266,4 +268,15 @@ struct qla_bbcr_data { uint16_t mbx1; /* Port state */ uint8_t reserved[9]; } __packed; + +struct qla_dport_diag { + uint16_t options; + uint32_t buf[16]; + uint8_t unused[62]; +} __packed; + +/* D_Port options */ +#define QLA_DPORT_RESULT 0x0 +#define QLA_DPORT_START 0x2 + #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index b64c504ff..45af34ddc 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -11,12 +11,11 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x018f | 0x0146 | + * | Module Init and Probe | 0x0191 | 0x0146 | * | | | 0x015b-0x0160 | - * | | | 0x016e-0x0170 | - * | Mailbox commands | 0x1192 | | - * | | | | - * | Device Discovery | 0x2016 | 0x2020-0x2022, | + * | | | 0x016e | + * | Mailbox commands | 0x1199 | 0x1193 | + * | Device Discovery | 0x2004 | 0x2016 | * | | | 0x2011-0x2012, | * | | | 0x2099-0x20a4 | * | Queue Command and IO tracing | 0x3074 | 0x300b | @@ -26,11 +25,11 @@ * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | - * | Async Events | 0x5089 | 0x502b-0x502f | - * | | | 0x505e | + * | Async Events | 0x5090 | 0x502b-0x502f | + * | | | 0x5047 | * | | | 0x5084,0x5075 | * | | | 0x503d,0x5044 | - * | | | 0x507b,0x505f | + * | | | 0x505f | * | Timer Routines | 0x6012 | | * | User Space Interactions | 0x70e3 | 0x7018,0x702e | * | | | 0x7020,0x7024 | @@ -39,9 +38,9 @@ * | | | 0x70a5-0x70a6 | * | | | 0x70a8,0x70ab | * | | | 0x70ad-0x70ae | + * | | | 0x70d0-0x70d6 | * | | | 0x70d7-0x70db | - * | | | 0x70de-0x70df | - * | Task Management | 0x803d | 0x8000,0x800b | + * | Task Management | 0x8042 | 0x8000,0x800b | * | | | 0x8019 | * | | | 0x8025,0x8026 | * | | | 0x8031,0x8032 | @@ -2697,29 +2696,24 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) void ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, - uint8_t *b, uint32_t size) + uint8_t *buf, uint size) { - uint32_t cnt; - uint8_t c; + uint cnt; if (!ql_mask_match(level)) return; - ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 " - "9 Ah Bh Ch Dh Eh Fh\n"); - ql_dbg(level, vha, id, "----------------------------------" - "----------------------------\n"); - - ql_dbg(level, vha, id, " "); - for (cnt = 0; cnt < size;) { - c = *b++; - printk("%02x", (uint32_t) c); - cnt++; - if (!(cnt % 16)) + ql_dbg(level, vha, id, + "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); + ql_dbg(level, vha, id, + "----- -----------------------------------------------\n"); + for (cnt = 0; cnt < size; cnt++, buf++) { + if (cnt % 16 == 0) + ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU); + printk(" %02x", *buf); + if (cnt % 16 == 15) printk("\n"); - else - printk(" "); } - if (cnt % 16) - ql_dbg(level, vha, id, "\n"); + if (cnt % 16 != 0) + printk("\n"); } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 47f8b9b49..ae4a74756 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -829,6 +829,7 @@ struct mbx_cmd_32 { #define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */ #define MBA_INIT_REQUIRED 0x8061 /* Initialization required */ #define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */ +#define MBA_TEMPERATURE_ALERT 0x8070 /* Temperature Alert */ #define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */ #define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */ #define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change @@ -3028,6 +3029,7 @@ struct qla_hw_data { uint32_t mr_reset_hdlr_active:1; uint32_t mr_intr_valid:1; + uint32_t dport_enabled:1; uint32_t fawwpn_enabled:1; uint32_t exlogins_enabled:1; uint32_t exchoffld_enabled:1; @@ -3128,7 +3130,7 @@ struct qla_hw_data { #define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 #define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261 - uint32_t device_type; + uint32_t isp_type; #define DT_ISP2100 BIT_0 #define DT_ISP2200 BIT_1 #define DT_ISP2300 BIT_2 @@ -3153,6 +3155,7 @@ struct qla_hw_data { #define DT_ISP2261 BIT_21 #define DT_ISP_LAST (DT_ISP2261 << 1) + uint32_t device_type; #define DT_T10_PI BIT_25 #define DT_IIDMA BIT_26 #define DT_FWI2 BIT_27 @@ -3160,7 +3163,8 @@ struct qla_hw_data { #define DT_OEM_001 BIT_29 #define DT_ISP2200A BIT_30 #define DT_EXTENDED_IDS BIT_31 -#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1)) + +#define DT_MASK(ha) ((ha)->isp_type & (DT_ISP_LAST - 1)) #define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100) #define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200) #define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300) @@ -3370,6 +3374,8 @@ struct qla_hw_data { uint32_t fw_shared_ram_start; uint32_t fw_shared_ram_end; + uint32_t fw_ddr_ram_start; + uint32_t fw_ddr_ram_end; uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ uint8_t fw_seriallink_options[4]; @@ -3505,7 +3511,6 @@ struct qla_hw_data { int cur_vport_count; struct qla_chip_state_84xx *cs84xx; - struct qla_statistics qla_stats; struct isp_operations *isp_ops; struct workqueue_struct *wq; struct qlfc_fw fw_buf; @@ -3656,6 +3661,7 @@ typedef struct scsi_qla_host { #define PFLG_DISCONNECTED 0 /* PCI device removed */ #define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */ #define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */ +#define PCI_ERR 30 uint32_t device_flags; #define SWITCH_FOUND BIT_0 diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 4c0f3a774..8a2368b32 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1288,7 +1288,7 @@ struct vp_rpt_id_entry_24xx { uint8_t vp_idx_map[16]; - uint8_t reserved_4[28]; + uint8_t reserved_4[24]; uint16_t bbcr; uint8_t reserved_5[6]; }; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index fe943772f..6ca00813c 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -344,7 +344,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, extern int qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, - dma_addr_t); + dma_addr_t, uint); extern int qla24xx_abort_command(srb_t *); extern int qla24xx_async_abort_command(srb_t *); @@ -445,6 +445,9 @@ qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *); extern int qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); +extern int +qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); + /* * Global Function Prototypes in qla_isr.c source file. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index c56cdb35f..5b09296b4 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -624,6 +624,9 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; + memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); + memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); + /* Clear adapter flags. */ vha->flags.online = 0; ha->flags.chip_reset_done = 0; @@ -2053,6 +2056,14 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha) if (IS_QLA6312(ha)) ha->fw_options[2] |= BIT_13; + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2100, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + /* Update firmware options. */ qla2x00_set_fw_options(vha, ha->fw_options); } @@ -2070,6 +2081,14 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) if (ql2xfwholdabts) ha->fw_options[3] |= BIT_12; + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2101, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) return; @@ -2269,13 +2288,13 @@ qla2x00_init_rings(scsi_qla_host_t *vha) mid_init_cb->options = cpu_to_le16(BIT_1); mid_init_cb->init_cb.execution_throttle = cpu_to_le16(ha->cur_fw_xcb_count); - /* D-Port Status */ - if (IS_DPORT_CAPABLE(ha)) - mid_init_cb->init_cb.firmware_options_1 |= - cpu_to_le16(BIT_7); - /* Enable FA-WWPN */ + ha->flags.dport_enabled = + (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0; + ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", + (ha->flags.dport_enabled) ? "enabled" : "disabled"); + /* FA-WWPN Status */ ha->flags.fawwpn_enabled = - (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0; + (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n", (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); } @@ -6513,6 +6532,14 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) if (ql2xfwholdabts) ha->fw_options[3] |= BIT_12; + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2103, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + if (!ql2xetsenable) goto out; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index a92a62dea..987f1c729 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -710,16 +710,23 @@ skip_rio: case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ ql_log(ql_log_warn, vha, 0x5007, - "ISP Response Transfer Error.\n"); + "ISP Response Transfer Error (%x).\n", mb[1]); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ ql_dbg(ql_dbg_async, vha, 0x5008, - "Asynchronous WAKEUP_THRES.\n"); + "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); + break; + case MBA_LOOP_INIT_ERR: + ql_log(ql_log_warn, vha, 0x5090, + "LOOP INIT ERROR (%x).\n", mb[1]); + ha->isp_ops->fw_dump(vha, 1); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; + case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ ql_dbg(ql_dbg_async, vha, 0x5009, "LIP occurred (%x).\n", mb[1]); @@ -1152,12 +1159,20 @@ global_port_update: case MBA_DPORT_DIAGNOSTICS: ql_dbg(ql_dbg_async, vha, 0x5052, - "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1], + "D-Port Diagnostics: %04x result=%s\n", + mb[0], mb[1] == 0 ? "start" : - mb[1] == 1 ? "done (ok)" : + mb[1] == 1 ? "done (pass)" : mb[1] == 2 ? "done (error)" : "other"); break; + case MBA_TEMPERATURE_ALERT: + ql_dbg(ql_dbg_async, vha, 0x505e, + "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); + if (mb[1] == 0x12) + schedule_work(&ha->board_disable); + break; + default: ql_dbg(ql_dbg_async, vha, 0x5057, "Unknown AEN:%04x %04x %04x %04x\n", @@ -3086,6 +3101,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) /* Enable MSI-X vectors for the base queue */ for (i = 0; i < 2; i++) { qentry = &ha->msix_entries[i]; + qentry->rsp = rsp; + rsp->msix = qentry; if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, @@ -3097,8 +3114,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) if (ret) goto msix_register_fail; qentry->have_irq = 1; - qentry->rsp = rsp; - rsp->msix = qentry; /* Register for CPU affinity notification. */ irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); @@ -3119,12 +3134,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) */ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { qentry = &ha->msix_entries[ATIO_VECTOR]; + qentry->rsp = rsp; + rsp->msix = qentry; ret = request_irq(qentry->vector, qla83xx_msix_entries[ATIO_VECTOR].handler, 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); qentry->have_irq = 1; - qentry->rsp = rsp; - rsp->msix = qentry; } msix_register_fail: diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 968b84613..23698c998 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -64,6 +64,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } + /* if PCI error, then avoid mbx processing.*/ + if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { + ql_log(ql_log_warn, vha, 0x1191, + "PCI error, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + reg = ha->iobase; io_lock_on = base_vha->flags.init_done; @@ -266,6 +273,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) uint16_t mb0; uint32_t ictrl; + uint16_t w; if (IS_FWI2_CAPABLE(ha)) { mb0 = RD_REG_WORD(®->isp24.mailbox0); @@ -279,15 +287,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); - /* - * Attempt to capture a firmware dump for further analysis - * of the current firmware state. We do not need to do this - * if we are intentionally generating a dump. - */ - if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) - ha->isp_ops->fw_dump(vha, 0); + /* Capture FW dump only, if PCI device active */ + if (!pci_channel_offline(vha->hw->pdev)) { + pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); + if (w == 0xffff || ictrl == 0xffffffff) { + /* This is special case if there is unload + * of driver happening and if PCI device go + * into bad state due to PCI error condition + * then only PCI ERR flag would be set. + * we will do premature exit for above case. + */ + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + set_bit(PCI_ERR, &base_vha->dpc_flags); + ha->flags.mbox_busy = 0; + rval = QLA_FUNCTION_TIMEOUT; + goto premature_exit; + } - rval = QLA_FUNCTION_TIMEOUT; + /* Attempt to capture firmware dump for further + * anallysis of the current formware state. we do not + * need to do this if we are intentionally generating + * a dump + */ + if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) + ha->isp_ops->fw_dump(vha, 0); + rval = QLA_FUNCTION_TIMEOUT; + } } ha->flags.mbox_busy = 0; @@ -379,7 +404,7 @@ mbx_done: "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); - ql_dbg(ql_dbg_disc, vha, 0x1115, + ql_dbg(ql_dbg_mbx, vha, 0x1198, "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n", RD_REG_DWORD(®->isp24.host_status), ha->fw_dump_cap_flags, @@ -388,7 +413,7 @@ mbx_done: mbx_reg = ®->isp24.mailbox0; for (i = 0; i < 6; i++) - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1199, "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++)); } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); @@ -782,8 +807,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; if (IS_QLA27XX(ha)) - mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 | - MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8; + mcp->in_mb |= + MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| + MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8; mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; @@ -842,6 +868,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ha->pep_version[2] = mcp->mb[14] & 0xff; ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; + ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; + ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; } failed: @@ -1844,7 +1872,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) states[0] = mcp->mb[1]; if (IS_FWI2_CAPABLE(vha->hw)) { states[1] = mcp->mb[2]; - states[2] = mcp->mb[3]; + states[2] = mcp->mb[3]; /* SFP info */ states[3] = mcp->mb[4]; states[4] = mcp->mb[5]; states[5] = mcp->mb[6]; /* DPORT status */ @@ -2759,15 +2787,16 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - uint32_t *iter, dwords; + uint32_t *iter = (void *)stats; + ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, "Entered %s.\n", __func__); mcp->mb[0] = MBC_GET_LINK_STATUS; - mcp->mb[2] = MSW(stats_dma); - mcp->mb[3] = LSW(stats_dma); + mcp->mb[2] = MSW(LSD(stats_dma)); + mcp->mb[3] = LSW(LSD(stats_dma)); mcp->mb[6] = MSW(MSD(stats_dma)); mcp->mb[7] = LSW(MSD(stats_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; @@ -2796,12 +2825,9 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); rval = QLA_FUNCTION_FAILED; } else { - /* Copy over data -- firmware data is LE. */ + /* Re-endianize - firmware data is le32. */ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, "Done %s.\n", __func__); - dwords = offsetof(struct link_statistics, - link_up_cnt) / 4; - iter = &stats->link_fail_cnt; for ( ; dwords--; iter++) le32_to_cpus(iter); } @@ -2815,7 +2841,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, - dma_addr_t stats_dma) + dma_addr_t stats_dma, uint options) { int rval; mbx_cmd_t mc; @@ -2832,7 +2858,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, mcp->mb[7] = LSW(MSD(stats_dma)); mcp->mb[8] = sizeof(struct link_statistics) / 4; mcp->mb[9] = vha->vp_idx; - mcp->mb[10] = 0; + mcp->mb[10] = options; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; @@ -2847,7 +2873,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, "Done %s.\n", __func__); - /* Copy over data -- firmware data is LE. */ + /* Re-endianize - firmware data is le32. */ dwords = sizeof(struct link_statistics) / 4; iter = &stats->link_fail_cnt; for ( ; dwords--; iter++) @@ -5722,3 +5748,54 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, return rval; } + +int +qla26xx_dport_diagnostics(scsi_qla_host_t *vha, + void *dd_buf, uint size, uint options) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + dma_addr_t dd_dma; + + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, + "Entered %s.\n", __func__); + + dd_dma = dma_map_single(&vha->hw->pdev->dev, + dd_buf, size, DMA_FROM_DEVICE); + if (!dd_dma) { + ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + memset(dd_buf, 0, size); + + mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; + mcp->mb[1] = options; + mcp->mb[2] = MSW(LSD(dd_dma)); + mcp->mb[3] = LSW(LSD(dd_dma)); + mcp->mb[6] = MSW(MSD(dd_dma)); + mcp->mb[7] = LSW(MSD(dd_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->buf_size = size; + mcp->flags = MBX_DMA_IN; + mcp->tov = MBX_TOV_SECONDS * 4; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, + "Done %s.\n", __func__); + } + + dma_unmap_single(&vha->hw->pdev->dev, dd_dma, + size, DMA_FROM_DEVICE); + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 59c477883..6201dce35 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1183,7 +1183,6 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 #define qla82xx_get_temp_val(x) ((x) >> 16) -#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF) #define qla82xx_get_temp_state(x) ((x) & 0xffff) #define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e007785db..a44c7c9bd 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -80,6 +80,7 @@ MODULE_PARM_DESC(ql2xallocfwdump, int ql2xextended_error_logging; module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); +module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xextended_error_logging, "Option to enable extended error logging,\n" "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n" @@ -106,6 +107,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd, int ql2xfdmienable=1; module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR); +module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfdmienable, "Enables FDMI registrations. " "0 - no FDMI. Default is 1 - perform FDMI."); @@ -157,6 +159,7 @@ MODULE_PARM_DESC(ql2xmultique_tag, int ql2xfwloadbin; module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); +module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfwloadbin, "Option to specify location from which to load ISP firmware:.\n" " 2 -- load firmware via the reject_firmware() (hotplug).\n" @@ -894,12 +897,16 @@ static void qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); while (((qla2x00_reset_active(vha)) || ha->dpc_active || ha->flags.mbox_busy) || test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || - test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) + test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + break; msleep(1000); + } } int @@ -936,6 +943,30 @@ sp_get(struct srb *sp) atomic_inc(&sp->ref_count); } +#define ISP_REG_DISCONNECT 0xffffffffU +/************************************************************************** +* qla2x00_isp_reg_stat +* +* Description: +* Read the host status register of ISP before aborting the command. +* +* Input: +* ha = pointer to host adapter structure. +* +* +* Returns: +* Either true or false. +* +* Note: Return true if there is register disconnect. +**************************************************************************/ +static inline +uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) +{ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + return ((RD_REG_DWORD(®->host_status)) == ISP_REG_DISCONNECT); +} + /************************************************************************** * qla2xxx_eh_abort * @@ -963,6 +994,11 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) int rval, wait = 0; struct qla_hw_data *ha = vha->hw; + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8042, + "PCI/Register disconnect, exiting.\n"); + return FAILED; + } if (!CMD_SP(cmd)) return SUCCESS; @@ -1146,6 +1182,12 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) scsi_qla_host_t *vha = shost_priv(cmd->device->host); struct qla_hw_data *ha = vha->hw; + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803e, + "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, ha->isp_ops->lun_reset); } @@ -1156,6 +1198,12 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) scsi_qla_host_t *vha = shost_priv(cmd->device->host); struct qla_hw_data *ha = vha->hw; + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, ha->isp_ops->target_reset); } @@ -1183,6 +1231,13 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) int ret = FAILED; unsigned int id; uint64_t lun; + struct qla_hw_data *ha = vha->hw; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8040, + "PCI/Register disconnect, exiting.\n"); + return FAILED; + } id = cmd->device->id; lun = cmd->device->lun; @@ -1252,6 +1307,13 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) uint64_t lun; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x8041, + "PCI/Register disconnect, exiting.\n"); + schedule_work(&ha->board_disable); + return SUCCESS; + } + id = cmd->device->id; lun = cmd->device->lun; @@ -2103,27 +2165,27 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type = DT_EXTENDED_IDS; switch (ha->pdev->device) { case PCI_DEVICE_ID_QLOGIC_ISP2100: - ha->device_type |= DT_ISP2100; + ha->isp_type |= DT_ISP2100; ha->device_type &= ~DT_EXTENDED_IDS; ha->fw_srisc_address = RISC_START_ADDRESS_2100; break; case PCI_DEVICE_ID_QLOGIC_ISP2200: - ha->device_type |= DT_ISP2200; + ha->isp_type |= DT_ISP2200; ha->device_type &= ~DT_EXTENDED_IDS; ha->fw_srisc_address = RISC_START_ADDRESS_2100; break; case PCI_DEVICE_ID_QLOGIC_ISP2300: - ha->device_type |= DT_ISP2300; + ha->isp_type |= DT_ISP2300; ha->device_type |= DT_ZIO_SUPPORTED; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2312: - ha->device_type |= DT_ISP2312; + ha->isp_type |= DT_ISP2312; ha->device_type |= DT_ZIO_SUPPORTED; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2322: - ha->device_type |= DT_ISP2322; + ha->isp_type |= DT_ISP2322; ha->device_type |= DT_ZIO_SUPPORTED; if (ha->pdev->subsystem_vendor == 0x1028 && ha->pdev->subsystem_device == 0x0170) @@ -2131,60 +2193,60 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP6312: - ha->device_type |= DT_ISP6312; + ha->isp_type |= DT_ISP6312; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP6322: - ha->device_type |= DT_ISP6322; + ha->isp_type |= DT_ISP6322; ha->fw_srisc_address = RISC_START_ADDRESS_2300; break; case PCI_DEVICE_ID_QLOGIC_ISP2422: - ha->device_type |= DT_ISP2422; + ha->isp_type |= DT_ISP2422; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2432: - ha->device_type |= DT_ISP2432; + ha->isp_type |= DT_ISP2432; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8432: - ha->device_type |= DT_ISP8432; + ha->isp_type |= DT_ISP8432; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP5422: - ha->device_type |= DT_ISP5422; + ha->isp_type |= DT_ISP5422; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP5432: - ha->device_type |= DT_ISP5432; + ha->isp_type |= DT_ISP5432; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2532: - ha->device_type |= DT_ISP2532; + ha->isp_type |= DT_ISP2532; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8001: - ha->device_type |= DT_ISP8001; + ha->isp_type |= DT_ISP8001; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8021: - ha->device_type |= DT_ISP8021; + ha->isp_type |= DT_ISP8021; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; @@ -2192,7 +2254,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) qla82xx_init_flags(ha); break; case PCI_DEVICE_ID_QLOGIC_ISP8044: - ha->device_type |= DT_ISP8044; + ha->isp_type |= DT_ISP8044; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->fw_srisc_address = RISC_START_ADDRESS_2400; @@ -2200,7 +2262,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) qla82xx_init_flags(ha); break; case PCI_DEVICE_ID_QLOGIC_ISP2031: - ha->device_type |= DT_ISP2031; + ha->isp_type |= DT_ISP2031; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2208,7 +2270,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP8031: - ha->device_type |= DT_ISP8031; + ha->isp_type |= DT_ISP8031; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2216,10 +2278,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISPF001: - ha->device_type |= DT_ISPFX00; + ha->isp_type |= DT_ISPFX00; break; case PCI_DEVICE_ID_QLOGIC_ISP2071: - ha->device_type |= DT_ISP2071; + ha->isp_type |= DT_ISP2071; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2227,7 +2289,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2271: - ha->device_type |= DT_ISP2271; + ha->isp_type |= DT_ISP2271; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2235,7 +2297,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; case PCI_DEVICE_ID_QLOGIC_ISP2261: - ha->device_type |= DT_ISP2261; + ha->isp_type |= DT_ISP2261; ha->device_type |= DT_ZIO_SUPPORTED; ha->device_type |= DT_FWI2; ha->device_type |= DT_IIDMA; @@ -2901,6 +2963,10 @@ skip_dpc: qlt_add_target(ha, base_vha); clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); + + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return -ENODEV; + return 0; probe_init_failed: @@ -3128,6 +3194,12 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_wait_for_hba_ready(base_vha); + /* if UNLOAD flag is already set, then continue unload, + * where it was set first. + */ + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return; + set_bit(UNLOADING, &base_vha->dpc_flags); if (IS_QLAFX00(ha)) @@ -4907,6 +4979,12 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + /* if UNLOAD flag is already set, then continue unload, + * where it was set first. + */ + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return; + ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); @@ -5002,6 +5080,9 @@ qla2x00_do_dpc(void *data) "DPC handler waking up, dpc_flags=0x%lx.\n", base_vha->dpc_flags); + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + break; + qla2x00_do_work(base_vha); if (IS_P3P_TYPE(ha)) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ca39deb4f..bff9689f5 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -215,8 +215,8 @@ static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); vha->hw->tgt.num_pend_cmds++; - if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds) - vha->hw->qla_stats.stat_max_pend_cmds = + if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) + vha->qla_stats.stat_max_pend_cmds = vha->hw->tgt.num_pend_cmds; spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } @@ -5231,8 +5231,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { vha->hw->tgt.num_qfull_cmds_dropped++; if (vha->hw->tgt.num_qfull_cmds_dropped > - vha->hw->qla_stats.stat_max_qfull_cmds_dropped) - vha->hw->qla_stats.stat_max_qfull_cmds_dropped = + vha->qla_stats.stat_max_qfull_cmds_dropped) + vha->qla_stats.stat_max_qfull_cmds_dropped = vha->hw->tgt.num_qfull_cmds_dropped; ql_dbg(ql_dbg_io, vha, 0x3068, @@ -5263,8 +5263,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, vha->hw->tgt.num_qfull_cmds_dropped++; if (vha->hw->tgt.num_qfull_cmds_dropped > - vha->hw->qla_stats.stat_max_qfull_cmds_dropped) - vha->hw->qla_stats.stat_max_qfull_cmds_dropped = + vha->qla_stats.stat_max_qfull_cmds_dropped) + vha->qla_stats.stat_max_qfull_cmds_dropped = vha->hw->tgt.num_qfull_cmds_dropped; qlt_chk_exch_leak_thresh_hold(vha); @@ -5293,8 +5293,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, vha->hw->tgt.num_qfull_cmds_alloc++; if (vha->hw->tgt.num_qfull_cmds_alloc > - vha->hw->qla_stats.stat_max_qfull_cmds_alloc) - vha->hw->qla_stats.stat_max_qfull_cmds_alloc = + vha->qla_stats.stat_max_qfull_cmds_alloc) + vha->qla_stats.stat_max_qfull_cmds_alloc = vha->hw->tgt.num_qfull_cmds_alloc; } diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index c3e622524..36935c9ed 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -357,6 +357,13 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, ent->t262.start_addr = start; ent->t262.end_addr = end; } + } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) { + start = vha->hw->fw_ddr_ram_start; + end = vha->hw->fw_ddr_ram_end; + if (buf) { + ent->t262.start_addr = start; + ent->t262.end_addr = end; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd022, "%s: unknown area %x\n", __func__, ent->t262.ram_area); @@ -364,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, goto done; } - if (end < start || end == 0) { + if (end <= start || start == 0 || end == 0) { ql_dbg(ql_dbg_misc, vha, 0xd023, "%s: unusable range (start=%x end=%x)\n", __func__, ent->t262.end_addr, ent->t262.start_addr); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 0bc93fa46..3cb1964b7 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.33-k" +#define QLA2XXX_VERSION "8.07.00.38-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 7 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 0f9ba41e2..6a219a084 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -890,7 +890,7 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return 0; } -/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */ +/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, int arr_len) { @@ -909,7 +909,35 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, return 0; } -/* Returns number of bytes fetched into 'arr' or -1 if error. */ +/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else + * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple + * calls, not required to write in ascending offset order. Assumes resid + * set to scsi_bufflen() prior to any calls. + */ +static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, + int arr_len, unsigned int off_dst) +{ + int act_len, n; + struct scsi_data_buffer *sdb = scsi_in(scp); + off_t skip = off_dst; + + if (sdb->length <= off_dst) + return 0; + if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) + return DID_ERROR << 16; + + act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, + arr, arr_len, skip); + pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n", + __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid); + n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len); + sdb->resid = min(sdb->resid, n); + return 0; +} + +/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into + * 'arr' or -1 if error. + */ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, int arr_len) { @@ -3269,6 +3297,8 @@ static int resp_get_lba_status(struct scsi_cmnd *scp, return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); } +#define RL_BUCKET_ELEMS 8 + /* Even though each pseudo target has a REPORT LUNS "well known logical unit" * (W-LUN), the normal Linux scanning logic does not associate it with a * device (e.g. /dev/sg7). The following magic will make that association: @@ -3285,12 +3315,14 @@ static int resp_report_luns(struct scsi_cmnd *scp, unsigned char select_report; u64 lun; struct scsi_lun *lun_p; - u8 *arr; + u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)]; unsigned int lun_cnt; /* normal LUN count (max: 256) */ unsigned int wlun_cnt; /* report luns W-LUN count */ unsigned int tlun_cnt; /* total LUN count */ unsigned int rlen; /* response length (in bytes) */ - int i, res; + int k, j, n, res; + unsigned int off_rsp = 0; + const int sz_lun = sizeof(struct scsi_lun); clear_luns_changed_on_target(devip); @@ -3329,33 +3361,40 @@ static int resp_report_luns(struct scsi_cmnd *scp, --lun_cnt; tlun_cnt = lun_cnt + wlun_cnt; - - rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8; - arr = vmalloc(rlen); - if (!arr) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, - INSUFF_RES_ASCQ); - return check_condition_result; - } - memset(arr, 0, rlen); + rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */ + scsi_set_resid(scp, scsi_bufflen(scp)); pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n", select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0); - /* luns start at byte 8 in response following the header */ - lun_p = (struct scsi_lun *)&arr[8]; - - /* LUNs use single level peripheral device addressing method */ + /* loops rely on sizeof response header same as sizeof lun (both 8) */ lun = sdebug_no_lun_0 ? 1 : 0; - for (i = 0; i < lun_cnt; i++) - int_to_scsilun(lun++, lun_p++); - - if (wlun_cnt) - int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++); - - put_unaligned_be32(rlen - 8, &arr[0]); - - res = fill_from_dev_buffer(scp, arr, rlen); - vfree(arr); + for (k = 0, j = 0, res = 0; true; ++k, j = 0) { + memset(arr, 0, sizeof(arr)); + lun_p = (struct scsi_lun *)&arr[0]; + if (k == 0) { + put_unaligned_be32(rlen, &arr[0]); + ++lun_p; + j = 1; + } + for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) { + if ((k * RL_BUCKET_ELEMS) + j > lun_cnt) + break; + int_to_scsilun(lun++, lun_p); + } + if (j < RL_BUCKET_ELEMS) + break; + n = j * sz_lun; + res = p_fill_from_dev_buffer(scp, arr, n, off_rsp); + if (res) + return res; + off_rsp += n; + } + if (wlun_cnt) { + int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p); + ++j; + } + if (j > 0) + res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp); return res; } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index eaccd651c..246456925 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -246,6 +246,10 @@ static struct { {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 57a4b9973..85c8a51bc 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -29,6 +29,7 @@ extern int scsi_init_hosts(void); extern void scsi_exit_hosts(void); /* scsi.c */ +extern bool scsi_use_blk_mq; extern int scsi_setup_command_freelist(struct Scsi_Host *shost); extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); #ifdef CONFIG_SCSI_LOGGING diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 3f0ff0721..60b651bfa 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -340,22 +340,6 @@ static int do_sas_phy_delete(struct device *dev, void *data) return 0; } -/** - * is_sas_attached - check if device is SAS attached - * @sdev: scsi device to check - * - * returns true if the device is SAS attached - */ -int is_sas_attached(struct scsi_device *sdev) -{ - struct Scsi_Host *shost = sdev->host; - - return shost->transportt->host_attrs.ac.class == - &sas_host_class.class; -} -EXPORT_SYMBOL(is_sas_attached); - - /** * sas_remove_children - tear down a devices SAS data structures * @dev: device belonging to the sas object diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 60bff78e9..d3e852ad5 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1012,7 +1012,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) } else if (rq_data_dir(rq) == READ) { SCpnt->cmnd[0] = READ_6; } else { - scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); + scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n", + req_op(rq), (unsigned long long) rq->cmd_flags); goto out; } @@ -1137,21 +1138,26 @@ static int sd_init_command(struct scsi_cmnd *cmd) { struct request *rq = cmd->request; - if (rq->cmd_flags & REQ_DISCARD) + switch (req_op(rq)) { + case REQ_OP_DISCARD: return sd_setup_discard_cmnd(cmd); - else if (rq->cmd_flags & REQ_WRITE_SAME) + case REQ_OP_WRITE_SAME: return sd_setup_write_same_cmnd(cmd); - else if (rq->cmd_flags & REQ_FLUSH) + case REQ_OP_FLUSH: return sd_setup_flush_cmnd(cmd); - else + case REQ_OP_READ: + case REQ_OP_WRITE: return sd_setup_read_write_cmnd(cmd); + default: + BUG(); + } } static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; - if (rq->cmd_flags & REQ_DISCARD) + if (req_op(rq) == REQ_OP_DISCARD) __free_page(rq->completion_data); if (SCpnt->cmnd != rq->cmd) { @@ -1613,8 +1619,7 @@ static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, return -EOPNOTSUPP; return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, old_key, new_key, 0, - (1 << 0) /* APTPL */ | - (1 << 2) /* ALL_TG_PT */); + (1 << 0) /* APTPL */); } static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, @@ -1774,7 +1779,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) unsigned char op = SCpnt->cmnd[0]; unsigned char unmap = SCpnt->cmnd[1] & 8; - if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) { + if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) { if (!result) { good_bytes = blk_rq_bytes(req); scsi_set_resid(SCpnt, 0); @@ -2988,7 +2993,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_revalidate_disk(gd); - gd->driverfs_dev = &sdp->sdev_gendev; gd->flags = GENHD_FL_EXT_DEVT; if (sdp->removable) { gd->flags |= GENHD_FL_REMOVABLE; @@ -2996,7 +3000,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) } blk_pm_runtime_init(sdp->request_queue, dev); - add_disk(gd); + device_add_disk(dev, gd); if (sdkp->capacity) sd_dif_config_host(sdkp); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 53ef1cb64..8c9a35c91 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); - if (is_sas_attached(sdev)) + if (scsi_is_sas_rphy(&sdev->sdev_gendev)) efd.addr = sas_get_address(sdev); if (efd.addr) { @@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) if (!edev) return; + enclosure_unregister(edev); + ses_dev = edev->scratch; edev->scratch = NULL; @@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) kfree(edev->component[0].scratch); put_device(&edev->edev); - enclosure_unregister(edev); } static void ses_intf_remove(struct device *cdev, diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index b0fefd67c..b106596cc 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -113,11 +113,11 @@ snic_queue_report_tgt_req(struct snic *snic) pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(snic->pdev, pa)) { - kfree(buf); - snic_req_free(snic, rqi); SNIC_HOST_ERR(snic->shost, "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n", buf); + kfree(buf); + snic_req_free(snic, rqi); ret = -EINVAL; goto error; diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h index c5f9e1917..2a045a57e 100644 --- a/drivers/scsi/snic/snic_fwint.h +++ b/drivers/scsi/snic/snic_fwint.h @@ -92,7 +92,7 @@ enum snic_io_status { }; /* end of enum snic_io_status */ /* - * snic_io_hdr : host <--> firmare + * snic_io_hdr : host <--> firmware * * for any other message that will be queued to firmware should * have the following request header diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 64c867405..ed179348d 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -713,7 +713,6 @@ static int sr_probe(struct device *dev) get_capabilities(cd); sr_vendor_init(cd); - disk->driverfs_dev = &sdev->sdev_gendev; set_capacity(disk, cd->capacity); disk->private_data = &cd->driver; disk->queue = sdev->request_queue; @@ -730,7 +729,7 @@ static int sr_probe(struct device *dev) dev_set_drvdata(dev, cd); disk->flags |= GENHD_FL_REMOVABLE; - add_disk(disk); + device_add_disk(&sdev->sdev_gendev, disk); sdev_printk(KERN_DEBUG, sdev, "Attached scsi CD-ROM %s\n", cd->cdi.name); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3ddcabb79..8ccfc9ea8 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -966,6 +966,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, if (scmnd->result) { if (scsi_normalize_sense(scmnd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr) && + !(sense_hdr.sense_key == NOT_READY && + sense_hdr.asc == 0x03A) && do_logging(STORVSC_LOGGING_ERROR)) scsi_print_sense_hdr(scmnd->device, "storvsc", &sense_hdr); diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 097894a1f..479669092 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -61,6 +61,14 @@ config SCSI_UFSHCD_PCI If unsure, say N. +config SCSI_UFS_DWC_TC_PCI + tristate "DesignWare pci support using a G210 Test Chip" + depends on SCSI_UFSHCD && PCI + ---help--- + Synopsys Test Chip is a PHY for prototyping purposes. + + If unsure, say N. + config SCSI_UFSHCD_PLATFORM tristate "Platform bus based UFS Controller support" depends on SCSI_UFSHCD @@ -72,6 +80,14 @@ config SCSI_UFSHCD_PLATFORM If unsure, say N. +config SCSI_UFS_DWC_TC_PLATFORM + tristate "DesignWare platform support using a G210 Test Chip" + depends on SCSI_UFSHCD_PLATFORM + ---help--- + Synopsys Test Chip is a PHY for prototyping purposes. + + If unsure, say N. + config SCSI_UFS_QCOM tristate "QCOM specific hooks to UFS controller platform driver" depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 8303bcce7..6e77cb0bf 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -1,4 +1,6 @@ # UFSHCD makefile +obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o +obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c new file mode 100644 index 000000000..c09a0fef0 --- /dev/null +++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c @@ -0,0 +1,181 @@ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ufshcd.h" +#include "ufshcd-dwc.h" +#include "tc-dwc-g210.h" + +#include +#include + +/* Test Chip type expected values */ +#define TC_G210_20BIT 20 +#define TC_G210_40BIT 40 +#define TC_G210_INV 0 + +static int tc_type = TC_G210_INV; +module_param(tc_type, int, 0); +MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)"); + +static int tc_dwc_g210_pci_suspend(struct device *dev) +{ + return ufshcd_system_suspend(dev_get_drvdata(dev)); +} + +static int tc_dwc_g210_pci_resume(struct device *dev) +{ + return ufshcd_system_resume(dev_get_drvdata(dev)); +} + +static int tc_dwc_g210_pci_runtime_suspend(struct device *dev) +{ + return ufshcd_runtime_suspend(dev_get_drvdata(dev)); +} + +static int tc_dwc_g210_pci_runtime_resume(struct device *dev) +{ + return ufshcd_runtime_resume(dev_get_drvdata(dev)); +} + +static int tc_dwc_g210_pci_runtime_idle(struct device *dev) +{ + return ufshcd_runtime_idle(dev_get_drvdata(dev)); +} + +/** + * struct ufs_hba_dwc_vops - UFS DWC specific variant operations + */ +static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = { + .name = "tc-dwc-g210-pci", + .link_startup_notify = ufshcd_dwc_link_startup_notify, +}; + +/** + * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state + * @pdev: pointer to PCI device handle + */ +static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev) +{ + ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); +} + +/** + * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space + * data structure memory + * @pdev - pointer to PCI handle + */ +static void tc_dwc_g210_pci_remove(struct pci_dev *pdev) +{ + struct ufs_hba *hba = pci_get_drvdata(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + ufshcd_remove(hba); +} + +/** + * tc_dwc_g210_pci_probe - probe routine of the driver + * @pdev: pointer to PCI device handle + * @id: PCI device id + * + * Returns 0 on success, non-zero value on failure + */ +static int +tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + int err; + + /* Check Test Chip type and set the specific setup routine */ + if (tc_type == TC_G210_20BIT) { + tc_dwc_g210_pci_hba_vops.phy_initialization = + tc_dwc_g210_config_20_bit; + } else if (tc_type == TC_G210_40BIT) { + tc_dwc_g210_pci_hba_vops.phy_initialization = + tc_dwc_g210_config_40_bit; + } else { + dev_err(&pdev->dev, "test chip version not specified\n"); + return -EPERM; + } + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return err; + } + + pci_set_master(pdev); + + err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); + if (err < 0) { + dev_err(&pdev->dev, "request and iomap failed\n"); + return err; + } + + mmio_base = pcim_iomap_table(pdev)[0]; + + err = ufshcd_alloc_host(&pdev->dev, &hba); + if (err) { + dev_err(&pdev->dev, "Allocation failed\n"); + return err; + } + + INIT_LIST_HEAD(&hba->clk_list_head); + + hba->vops = &tc_dwc_g210_pci_hba_vops; + + err = ufshcd_init(hba, mmio_base, pdev->irq); + if (err) { + dev_err(&pdev->dev, "Initialization failed\n"); + return err; + } + + pci_set_drvdata(pdev, hba); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = { + .suspend = tc_dwc_g210_pci_suspend, + .resume = tc_dwc_g210_pci_resume, + .runtime_suspend = tc_dwc_g210_pci_runtime_suspend, + .runtime_resume = tc_dwc_g210_pci_runtime_resume, + .runtime_idle = tc_dwc_g210_pci_runtime_idle, +}; + +static const struct pci_device_id tc_dwc_g210_pci_tbl[] = { + { PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl); + +static struct pci_driver tc_dwc_g210_pci_driver = { + .name = "tc-dwc-g210-pci", + .id_table = tc_dwc_g210_pci_tbl, + .probe = tc_dwc_g210_pci_probe, + .remove = tc_dwc_g210_pci_remove, + .shutdown = tc_dwc_g210_pci_shutdown, + .driver = { + .pm = &tc_dwc_g210_pci_pm_ops + }, +}; + +module_pci_driver(tc_dwc_g210_pci_driver); + +MODULE_AUTHOR("Joao Pinto "); +MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c new file mode 100644 index 000000000..2d3f5270f --- /dev/null +++ b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c @@ -0,0 +1,113 @@ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include "ufshcd-pltfrm.h" +#include "ufshcd-dwc.h" +#include "tc-dwc-g210.h" + +/** + * UFS DWC specific variant operations + */ +static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = { + .name = "tc-dwc-g210-pltfm", + .link_startup_notify = ufshcd_dwc_link_startup_notify, + .phy_initialization = tc_dwc_g210_config_20_bit, +}; + +static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = { + .name = "tc-dwc-g210-pltfm", + .link_startup_notify = ufshcd_dwc_link_startup_notify, + .phy_initialization = tc_dwc_g210_config_40_bit, +}; + +static const struct of_device_id tc_dwc_g210_pltfm_match[] = { + { + .compatible = "snps,g210-tc-6.00-20bit", + .data = &tc_dwc_g210_20bit_pltfm_hba_vops, + }, + { + .compatible = "snps,g210-tc-6.00-40bit", + .data = &tc_dwc_g210_40bit_pltfm_hba_vops, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match); + +/** + * tc_dwc_g210_pltfm_probe() + * @pdev: pointer to platform device structure + * + */ +static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev) +{ + int err; + const struct of_device_id *of_id; + struct ufs_hba_variant_ops *vops; + struct device *dev = &pdev->dev; + + of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node); + vops = (struct ufs_hba_variant_ops *)of_id->data; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * tc_dwc_g210_pltfm_remove() + * @pdev: pointer to platform device structure + * + */ +static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + + return 0; +} + +static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, + .runtime_suspend = ufshcd_pltfrm_runtime_suspend, + .runtime_resume = ufshcd_pltfrm_runtime_resume, + .runtime_idle = ufshcd_pltfrm_runtime_idle, +}; + +static struct platform_driver tc_dwc_g210_pltfm_driver = { + .probe = tc_dwc_g210_pltfm_probe, + .remove = tc_dwc_g210_pltfm_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "tc-dwc-g210-pltfm", + .pm = &tc_dwc_g210_pltfm_pm_ops, + .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match), + }, +}; + +module_platform_driver(tc_dwc_g210_pltfm_driver); + +MODULE_ALIAS("platform:tc-dwc-g210-pltfm"); +MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver"); +MODULE_AUTHOR("Joao Pinto "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c new file mode 100644 index 000000000..70db6d999 --- /dev/null +++ b/drivers/scsi/ufs/tc-dwc-g210.c @@ -0,0 +1,319 @@ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ufshcd.h" +#include "unipro.h" + +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" + +/** + * tc_dwc_g210_setup_40bit_rmmi() + * This function configures Synopsys TC specific atributes (40-bit RMMI) + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba) +{ + const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, + { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, + { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL }, + { UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL }, + { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL }, + { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL }, + { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL }, + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); +} + +/** + * tc_dwc_g210_setup_20bit_rmmi_lane0() + * This function configures Synopsys TC 20-bit RMMI Lane 0 + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba) +{ + const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL }, + { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f, + DME_LOCAL }, + { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL }, + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); +} + +/** + * tc_dwc_g210_setup_20bit_rmmi_lane1() + * This function configures Synopsys TC 20-bit RMMI Lane 1 + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba) +{ + int connected_rx_lanes = 0; + int connected_tx_lanes = 0; + int ret = 0; + + const struct ufshcd_dme_attr_val setup_tx_attrs[] = { + { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6, + DME_LOCAL }, + }; + + const struct ufshcd_dme_attr_val setup_rx_attrs[] = { + { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E, + DME_LOCAL }, + { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f, + DME_LOCAL }, + }; + + /* Get the available lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), + &connected_rx_lanes); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), + &connected_tx_lanes); + + if (connected_tx_lanes == 2) { + + ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs, + ARRAY_SIZE(setup_tx_attrs)); + + if (ret) + goto out; + } + + if (connected_rx_lanes == 2) { + ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs, + ARRAY_SIZE(setup_rx_attrs)); + } + +out: + return ret; +} + +/** + * tc_dwc_g210_setup_20bit_rmmi() + * This function configures Synopsys TC specific atributes (20-bit RMMI) + * @hba: Pointer to drivers structure + * + * Returns 0 on success or non-zero value on failure + */ +static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba) +{ + int ret = 0; + + const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, + { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, + { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL }, + { UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL }, + { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL }, + { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL }, + { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL }, + }; + + ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs, + ARRAY_SIZE(setup_attrs)); + if (ret) + goto out; + + /* Lane 0 configuration*/ + ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba); + if (ret) + goto out; + + /* Lane 1 configuration*/ + ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba); + if (ret) + goto out; + +out: + return ret; +} + +/** + * tc_dwc_g210_config_40_bit() + * This function configures Local (host) Synopsys 40-bit TC specific attributes + * + * @hba: Pointer to drivers structure + * + * Returns 0 on success non-zero value on failure + */ +int tc_dwc_g210_config_40_bit(struct ufs_hba *hba) +{ + int ret = 0; + + dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n"); + ret = tc_dwc_g210_setup_40bit_rmmi(hba); + if (ret) { + dev_err(hba->dev, "Configuration failed\n"); + goto out; + } + + /* To write Shadow register bank to effective configuration block */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); + if (ret) + goto out; + + /* To configure Debug OMC */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); + +out: + return ret; +} +EXPORT_SYMBOL(tc_dwc_g210_config_40_bit); + +/** + * tc_dwc_g210_config_20_bit() + * This function configures Local (host) Synopsys 20-bit TC specific attributes + * + * @hba: Pointer to drivers structure + * + * Returns 0 on success non-zero value on failure + */ +int tc_dwc_g210_config_20_bit(struct ufs_hba *hba) +{ + int ret = 0; + + dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n"); + ret = tc_dwc_g210_setup_20bit_rmmi(hba); + if (ret) { + dev_err(hba->dev, "Configuration failed\n"); + goto out; + } + + /* To write Shadow register bank to effective configuration block */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01); + if (ret) + goto out; + + /* To configure Debug OMC */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01); + +out: + return ret; +} +EXPORT_SYMBOL(tc_dwc_g210_config_20_bit); + +MODULE_AUTHOR("Joao Pinto "); +MODULE_DESCRIPTION("Synopsys G210 Test Chip driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/ufs/tc-dwc-g210.h b/drivers/scsi/ufs/tc-dwc-g210.h new file mode 100644 index 000000000..fb177db12 --- /dev/null +++ b/drivers/scsi/ufs/tc-dwc-g210.h @@ -0,0 +1,19 @@ +/* + * Synopsys G210 Test Chip driver + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _TC_DWC_G210_H +#define _TC_DWC_G210_H + +int tc_dwc_g210_config_40_bit(struct ufs_hba *hba); +int tc_dwc_g210_config_20_bit(struct ufs_hba *hba); + +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c new file mode 100644 index 000000000..5fd16c722 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-dwc.c @@ -0,0 +1,154 @@ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "ufshcd.h" +#include "unipro.h" + +#include "ufshcd-dwc.h" +#include "ufshci-dwc.h" + +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n) +{ + int ret = 0; + int attr_node = 0; + + for (attr_node = 0; attr_node < n; attr_node++) { + ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel, + ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer); + + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs); + +/** + * ufshcd_dwc_program_clk_div() + * This function programs the clk divider value. This value is needed to + * provide 1 microsecond tick to unipro layer. + * @hba: Private Structure pointer + * @divider_val: clock divider value to be programmed + * + */ +static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val) +{ + ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV); +} + +/** + * ufshcd_dwc_link_is_up() + * Check if link is up + * @hba: private structure poitner + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dwc_link_is_up(struct ufs_hba *hba) +{ + int dme_result = 0; + + ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result); + + if (dme_result == UFSHCD_LINK_IS_UP) { + ufshcd_set_link_active(hba); + return 0; + } + + return 1; +} + +/** + * ufshcd_dwc_connection_setup() + * This function configures both the local side (host) and the peer side + * (device) unipro attributes to establish the connection to application/ + * cport. + * This function is not required if the hardware is properly configured to + * have this connection setup on reset. But invoking this function does no + * harm and should be fine even working with any ufs device. + * + * @hba: pointer to drivers private data + * + * Returns 0 on success non-zero value on failure + */ +static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) +{ + const struct ufshcd_dme_attr_val setup_attrs[] = { + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER }, + { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER }, + { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER }, + { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER }, + { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER }, + { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER } + }; + + return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs)); +} + +/** + * ufshcd_dwc_link_startup_notify() + * UFS Host DWC specific link startup sequence + * @hba: private structure poitner + * @status: Callback notify status + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int err = 0; + + if (status == PRE_CHANGE) { + ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125); + + if (hba->vops->phy_initialization) { + err = hba->vops->phy_initialization(hba); + if (err) { + dev_err(hba->dev, "Phy setup failed (%d)\n", + err); + goto out; + } + } + } else { /* POST_CHANGE */ + err = ufshcd_dwc_link_is_up(hba); + if (err) { + dev_err(hba->dev, "Link is not up\n"); + goto out; + } + + err = ufshcd_dwc_connection_setup(hba); + if (err) + dev_err(hba->dev, "Connection setup failed (%d)\n", + err); + } + +out: + return err; +} +EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify); + +MODULE_AUTHOR("Joao Pinto "); +MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/scsi/ufs/ufshcd-dwc.h b/drivers/scsi/ufs/ufshcd-dwc.h new file mode 100644 index 000000000..c8be295e0 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-dwc.h @@ -0,0 +1,26 @@ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _UFSHCD_DWC_H +#define _UFSHCD_DWC_H + +struct ufshcd_dme_attr_val { + u32 attr_sel; + u32 mib_val; + u8 peer; +}; + +int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status); +int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba, + const struct ufshcd_dme_attr_val *v, int n); +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 718f12e09..db53f38da 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -372,6 +372,6 @@ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init); MODULE_AUTHOR("Santosh Yaragnavi "); MODULE_AUTHOR("Vinayak Holikatti "); -MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver"); +MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(UFSHCD_DRIVER_VERSION); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index f8fa72c31..f08d41a2d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1173,7 +1173,7 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) * @cmd_dir: requests data direction */ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, - u32 *upiu_flags, enum dma_data_direction cmd_dir) + u32 *upiu_flags, enum dma_data_direction cmd_dir) { struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; u32 data_direction; @@ -1299,47 +1299,55 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) } /** - * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) + * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) + * for Device Management Purposes * @hba - per adapter instance * @lrb - pointer to local reference block */ -static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { u32 upiu_flags; int ret = 0; - switch (lrbp->command_type) { - case UTP_CMD_TYPE_SCSI: - if (likely(lrbp->cmd)) { - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, - lrbp->cmd->sc_data_direction); - ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); - } else { - ret = -EINVAL; - } - break; - case UTP_CMD_TYPE_DEV_MANAGE: - ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); - if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) - ufshcd_prepare_utp_query_req_upiu( - hba, lrbp, upiu_flags); - else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) - ufshcd_prepare_utp_nop_upiu(lrbp); - else - ret = -EINVAL; - break; - case UTP_CMD_TYPE_UFS: - /* For UFS native command implementation */ - ret = -ENOTSUPP; - dev_err(hba->dev, "%s: UFS native command are not supported\n", - __func__); - break; - default: - ret = -ENOTSUPP; - dev_err(hba->dev, "%s: unknown command type: 0x%x\n", - __func__, lrbp->command_type); - break; - } /* end of switch */ + if (hba->ufs_version == UFSHCI_VERSION_20) + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + else + lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; + + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); + if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) + ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); + else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) + ufshcd_prepare_utp_nop_upiu(lrbp); + else + ret = -EINVAL; + + return ret; +} + +/** + * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU) + * for SCSI Purposes + * @hba - per adapter instance + * @lrb - pointer to local reference block + */ +static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + u32 upiu_flags; + int ret = 0; + + if (hba->ufs_version == UFSHCI_VERSION_20) + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + else + lrbp->command_type = UTP_CMD_TYPE_SCSI; + + if (likely(lrbp->cmd)) { + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, + lrbp->cmd->sc_data_direction); + ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); + } else { + ret = -EINVAL; + } return ret; } @@ -1451,10 +1459,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; - lrbp->command_type = UTP_CMD_TYPE_SCSI; - /* form UPIU before issuing the command */ - ufshcd_compose_upiu(hba, lrbp); + ufshcd_comp_scsi_upiu(hba, lrbp); + err = ufshcd_map_sg(lrbp); if (err) { lrbp->cmd = NULL; @@ -1479,11 +1486,10 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, lrbp->sense_buffer = NULL; lrbp->task_tag = tag; lrbp->lun = 0; /* device management cmd is not specific to any LUN */ - lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; lrbp->intr_cmd = true; /* No interrupt aggregation */ hba->dev_cmd.type = cmd_type; - return ufshcd_compose_upiu(hba, lrbp); + return ufshcd_comp_devman_upiu(hba, lrbp); } static int @@ -2131,7 +2137,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, buff_ascii = kmalloc(ascii_len, GFP_KERNEL); if (!buff_ascii) { err = -ENOMEM; - goto out_free_buff; + goto out; } /* @@ -2150,7 +2156,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, size - QUERY_DESC_HDR_SIZE); memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; -out_free_buff: kfree(buff_ascii); } out: @@ -3539,7 +3544,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); __ufshcd_release(hba); - } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { + } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || + lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { if (hba->dev_cmd.complete) complete(hba->dev_cmd.complete); } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 4bb65669f..430bef111 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -264,6 +264,7 @@ struct ufs_pwr_mode_info { * @suspend: called during host controller PM callback * @resume: called during host controller PM callback * @dbg_register_dump: used to dump controller debug information + * @phy_initialization: used to initialize phys */ struct ufs_hba_variant_ops { const char *name; @@ -285,6 +286,7 @@ struct ufs_hba_variant_ops { int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *hba); + int (*phy_initialization)(struct ufs_hba *); }; /* clock gating state */ @@ -567,11 +569,16 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) { +/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/ +#ifndef CONFIG_SCSI_UFS_DWC if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) return true; else return false; +#else +return true; +#endif } #define ufshcd_writel(hba, val, reg) \ diff --git a/drivers/scsi/ufs/ufshci-dwc.h b/drivers/scsi/ufs/ufshci-dwc.h new file mode 100644 index 000000000..ca341fece --- /dev/null +++ b/drivers/scsi/ufs/ufshci-dwc.h @@ -0,0 +1,36 @@ +/* + * UFS Host driver for Synopsys Designware Core + * + * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) + * + * Authors: Joao Pinto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _UFSHCI_DWC_H +#define _UFSHCI_DWC_H + +/* DWC HC UFSHCI specific Registers */ +enum dwc_specific_registers { + DWC_UFS_REG_HCLKDIV = 0xFC, +}; + +/* Clock Divider Values: Hex equivalent of frequency in MHz */ +enum clk_div_values { + DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e, + DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d, + DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8, +}; + +/* Selector Index */ +enum selector_index { + SELIND_LN0_TX = 0x00, + SELIND_LN1_TX = 0x01, + SELIND_LN0_RX = 0x04, + SELIND_LN1_RX = 0x05, +}; + +#endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 4cb1cc63f..9599741ff 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -220,6 +220,12 @@ enum { #define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16) #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) +/* Link Status*/ +enum link_status { + UFSHCD_LINK_IS_DOWN = 1, + UFSHCD_LINK_IS_UP = 2, +}; + /* UIC Commands */ enum uic_cmd_dme { UIC_CMD_DME_GET = 0x01, @@ -279,6 +285,11 @@ enum { UTP_CMD_TYPE_DEV_MANAGE = 0x2, }; +/* To accommodate UFS2.0 required Command type */ +enum { + UTP_CMD_TYPE_UFS_STORAGE = 0x1, +}; + enum { UTP_SCSI_COMMAND = 0x00000000, UTP_NATIVE_UFS_COMMAND = 0x10000000, diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index e2854e45f..eff8b5675 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -36,6 +36,10 @@ #define TX_LCC_SEQUENCER 0x0032 #define TX_MIN_ACTIVATETIME 0x0033 #define TX_PWM_G6_G7_SYNC_LENGTH 0x0034 +#define TX_REFCLKFREQ 0x00EB +#define TX_CFGCLKFREQVAL 0x00EC +#define CFGEXTRATTR 0x00F0 +#define DITHERCTRL2 0x00F1 /* * M-RX Configuration Attributes @@ -51,10 +55,40 @@ #define RX_TERMINATION_FORCE_ENABLE 0x0089 #define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F #define RX_HIBERN8TIME_CAPABILITY 0x0092 +#define RX_REFCLKFREQ 0x00EB +#define RX_CFGCLKFREQVAL 0x00EC +#define CFGWIDEINLN 0x00F0 +#define CFGRXCDR8 0x00BA +#define ENARXDIRECTCFG4 0x00F2 +#define CFGRXOVR8 0x00BD +#define RXDIRECTCTRL2 0x00C7 +#define ENARXDIRECTCFG3 0x00F3 +#define RXCALCTRL 0x00B4 +#define ENARXDIRECTCFG2 0x00F4 +#define CFGRXOVR4 0x00E9 +#define RXSQCTRL 0x00B5 +#define CFGRXOVR6 0x00BF #define is_mphy_tx_attr(attr) (attr < RX_MODE) #define RX_MIN_ACTIVATETIME_UNIT_US 100 #define HIBERN8TIME_UNIT_US 100 + +/* + * Common Block Attributes + */ +#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B) +#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF) +#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD) +#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6) +#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA) +#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0) +#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1) +#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3) +#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8) +#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB) + +#define UNIPRO_CB_OFFSET(x) (0x8000 | x) + /* * PHY Adpater attributes */ @@ -119,6 +153,11 @@ #define PA_TACTIVATE_TIME_UNIT_US 10 #define PA_HIBERN8_TIME_UNIT_US 100 +/*Other attributes*/ +#define VS_MPHYCFGUPDT 0xD085 +#define VS_DEBUGOMC 0xD09E +#define VS_POWERSTATE 0xD083 + /* PHY Adapter Protocol Constants */ #define PA_MAXDATALANES 4 diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 6164634af..4a0d3cdc6 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -17,7 +17,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Arvind Kumar + * Maintained by: Jim Gill * */ diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 12712c92f..c097d2ccb 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -17,7 +17,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * - * Maintained by: Arvind Kumar + * Maintained by: Jim Gill * */ diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index 0c0f17b9a..409f95984 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c @@ -192,7 +192,7 @@ #ifdef WD7000_DEBUG #define dprintk printk #else -#define dprintk(format,args...) +#define dprintk no_printk #endif /* @@ -1591,8 +1591,8 @@ static int wd7000_biosparam(struct scsi_device *sdev, { char b[BDEVNAME_SIZE]; - dprintk("wd7000_biosparam: dev=%s, size=%d, ", - bdevname(bdev, b), capacity); + dprintk("wd7000_biosparam: dev=%s, size=%llu, ", + bdevname(bdev, b), (u64)capacity); (void)b; /* unused var warning? */ /* diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c index eb51e612c..04573a56c 100644 --- a/drivers/scsi/wd719x.c +++ b/drivers/scsi/wd719x.c @@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev) scsi_host_put(sh); } -static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = { +static const struct pci_device_id wd719x_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) }, {} }; -- cgit v1.2.3-54-g00ecf