From d635711daa98be86d4c7fd01499c34f566b54ccb Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Fri, 10 Jun 2016 05:30:17 -0300 Subject: Linux-libre 4.6.2-gnu --- drivers/scsi/ufs/ufshcd.c | 1211 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 1038 insertions(+), 173 deletions(-) (limited to 'drivers/scsi/ufs/ufshcd.c') diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 85cd2564c..f8fa72c31 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3,7 +3,7 @@ * * This code is based on drivers/scsi/ufs/ufshcd.c * Copyright (C) 2011-2013 Samsung India Software Operations - * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * Authors: * Santosh Yaraganavi @@ -39,8 +39,10 @@ #include #include - +#include +#include #include "ufshcd.h" +#include "ufs_quirks.h" #include "unipro.h" #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ @@ -58,13 +60,25 @@ #define QUERY_REQ_RETRIES 10 /* Query request timeout */ #define QUERY_REQ_TIMEOUT 30 /* msec */ +/* + * Query request timeout for fDeviceInit flag + * fDeviceInit query response time for some devices is too large that default + * QUERY_REQ_TIMEOUT may not be enough for such devices. + */ +#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */ /* Task management command timeout */ #define TM_CMD_TIMEOUT 100 /* msecs */ +/* maximum number of retries for a general UIC command */ +#define UFS_UIC_COMMAND_RETRIES 3 + /* maximum number of link-startup retries */ #define DME_LINKSTARTUP_RETRIES 3 +/* Maximum retries for Hibern8 enter */ +#define UIC_HIBERN8_ENTER_RETRIES 3 + /* maximum number of reset retries before giving up */ #define MAX_HOST_RESET_RETRIES 5 @@ -92,7 +106,7 @@ static u32 ufs_query_desc_max_size[] = { QUERY_DESC_INTERCONNECT_MAX_SIZE, QUERY_DESC_STRING_MAX_SIZE, QUERY_DESC_RFU_MAX_SIZE, - QUERY_DESC_GEOMETRY_MAZ_SIZE, + QUERY_DESC_GEOMETRY_MAX_SIZE, QUERY_DESC_POWER_MAX_SIZE, QUERY_DESC_RFU_MAX_SIZE, }; @@ -119,9 +133,11 @@ enum { /* UFSHCD UIC layer error flags */ enum { UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ - UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ - UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ - UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */ + UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */ + UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */ + UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */ + UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ }; /* Interrupt configuration options */ @@ -181,6 +197,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba); static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused); static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -190,6 +207,10 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *desired_pwr_mode); static int ufshcd_change_power_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *pwr_mode); +static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) +{ + return tag >= 0 && tag < hba->nutrs; +} static inline int ufshcd_enable_irq(struct ufs_hba *hba) { @@ -215,6 +236,16 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) } } +/* replace non-printable or non-ASCII characters with spaces */ +static inline void ufshcd_remove_non_printable(char *val) +{ + if (!val) + return; + + if (*val < 0x20 || *val > 0x7e) + *val = ' '; +} + /* * ufshcd_wait_for_register - wait for register value to change * @hba - per-adapter interface @@ -223,11 +254,13 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) * @val - wait condition * @interval_us - polling interval in microsecs * @timeout_ms - timeout in millisecs + * @can_sleep - perform sleep or just spin * * Returns -ETIMEDOUT on error, zero on success */ -static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long interval_us, unsigned long timeout_ms) +int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, + unsigned long timeout_ms, bool can_sleep) { int err = 0; unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); @@ -236,9 +269,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, val = val & mask; while ((ufshcd_readl(hba, reg) & mask) != val) { - /* wakeup within 50us of expiry */ - usleep_range(interval_us, interval_us + 50); - + if (can_sleep) + usleep_range(interval_us, interval_us + 50); + else + udelay(interval_us); if (time_after(jiffies, timeout)) { if ((ufshcd_readl(hba, reg) & mask) != val) err = -ETIMEDOUT; @@ -359,6 +393,16 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); } +/** + * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field + * @hba: per adapter instance + * @tag: position of the bit to be cleared + */ +static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) +{ + __clear_bit(tag, &hba->outstanding_reqs); +} + /** * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY * @reg: Register value of host controller status @@ -374,11 +418,9 @@ static inline int ufshcd_get_lists_status(u32 reg) * 1 UTRLRDY * 2 UTMRLRDY * 3 UCRDY - * 4 HEI - * 5 DEI - * 6-7 reserved + * 4-7 reserved */ - return (((reg) & (0xFF)) >> 1) ^ (0x07); + return ((reg & 0xFF) >> 1) ^ 0x07; } /** @@ -528,6 +570,34 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; } +u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) +{ + /* HCI version 1.0 and 1.1 supports UniPro 1.41 */ + if ((hba->ufs_version == UFSHCI_VERSION_10) || + (hba->ufs_version == UFSHCI_VERSION_11)) + return UFS_UNIPRO_VER_1_41; + else + return UFS_UNIPRO_VER_1_6; +} +EXPORT_SYMBOL(ufshcd_get_local_unipro_ver); + +static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) +{ + /* + * If both host and device support UniPro ver1.6 or later, PA layer + * parameters tuning happens during link startup itself. + * + * We can manually tune PA layer parameters if either host or device + * doesn't support UniPro ver 1.6 or later. But to keep manual tuning + * logic simple, we will only do manual tuning if local unipro version + * doesn't support ver1.6 or later. + */ + if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6) + return true; + else + return false; +} + static void ufshcd_ungate_work(struct work_struct *work) { int ret; @@ -582,6 +652,11 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_gating.active_reqs++; + if (ufshcd_eh_in_progress(hba)) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + return 0; + } + start: switch (hba->clk_gating.state) { case CLKS_ON: @@ -697,7 +772,8 @@ static void __ufshcd_release(struct ufs_hba *hba) if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || hba->lrb_in_use || hba->outstanding_tasks - || hba->active_uic_cmd || hba->uic_async_done) + || hba->active_uic_cmd || hba->uic_async_done + || ufshcd_eh_in_progress(hba)) return; hba->clk_gating.state = REQ_CLKS_OFF; @@ -953,13 +1029,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result * @hba: per adapter instance * @uic_cmd: UIC command + * @completion: initialize the completion only if this is set to true * * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called * with mutex held and host_lock locked. * Returns 0 only if success. */ static int -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, + bool completion) { if (!ufshcd_ready_for_uic_cmd(hba)) { dev_err(hba->dev, @@ -967,7 +1045,8 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) return -EIO; } - init_completion(&uic_cmd->done); + if (completion) + init_completion(&uic_cmd->done); ufshcd_dispatch_uic_cmd(hba, uic_cmd); @@ -992,7 +1071,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_add_delay_before_dme_cmd(hba); spin_lock_irqsave(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, uic_cmd); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); spin_unlock_irqrestore(hba->host->host_lock, flags); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -1035,6 +1114,7 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) cpu_to_le32(lower_32_bits(sg->dma_address)); prd_table[i].upper_addr = cpu_to_le32(upper_32_bits(sg->dma_address)); + prd_table[i].reserved = 0; } } else { lrbp->utr_descriptor_ptr->prd_table_length = 0; @@ -1117,7 +1197,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, /* Transfer request descriptor header fields */ req_desc->header.dword_0 = cpu_to_le32(dword_0); - + /* dword_1 is reserved, hence it is set to 0 */ + req_desc->header.dword_1 = 0; /* * assigning invalid value for command status. Controller * updates OCS on command completion, with the command @@ -1125,6 +1206,10 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, */ req_desc->header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); + /* dword_3 is reserved, hence it is set to 0 */ + req_desc->header.dword_3 = 0; + + req_desc->prd_table_length = 0; } /** @@ -1137,6 +1222,7 @@ static void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) { struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + unsigned short cdb_len; /* command descriptor fields */ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( @@ -1151,8 +1237,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(lrbp->cmd->sdb.length); - memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, - (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE))); + cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE); + memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE); + memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len); + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } /** @@ -1189,6 +1278,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) memcpy(descp, query->descriptor, len); + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) @@ -1201,6 +1291,11 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); + /* clear rest of the fields of basic header */ + ucd_req_ptr->header.dword_1 = 0; + ucd_req_ptr->header.dword_2 = 0; + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } /** @@ -1293,6 +1388,12 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hba = shost_priv(host); tag = cmd->request->tag; + if (!ufshcd_valid_tag(hba, tag)) { + dev_err(hba->dev, + "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", + __func__, tag, cmd, cmd->request); + BUG(); + } spin_lock_irqsave(hba->host->host_lock, flags); switch (hba->ufshcd_state) { @@ -1312,6 +1413,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) cmd->scsi_done(cmd); goto out_unlock; } + + /* if error handling is in progress, don't issue commands */ + if (ufshcd_eh_in_progress(hba)) { + set_host_byte(cmd, DID_ERROR); + cmd->scsi_done(cmd); + goto out_unlock; + } spin_unlock_irqrestore(hba->host->host_lock, flags); /* acquire the tag to make sure device cmds don't use it */ @@ -1396,7 +1504,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag) */ err = ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, - mask, ~mask, 1000, 1000); + mask, ~mask, 1000, 1000, true); return err; } @@ -1475,9 +1583,17 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, if (!time_left) { err = -ETIMEDOUT; + dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", + __func__, lrbp->task_tag); if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) - /* sucessfully cleared the command, retry if needed */ + /* successfully cleared the command, retry if needed */ err = -EAGAIN; + /* + * in case of an error, after clearing the doorbell, + * we also need to clear the outstanding_request + * field in hba + */ + ufshcd_outstanding_req_clear(hba, lrbp->task_tag); } return err; @@ -1555,6 +1671,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, hba->dev_cmd.complete = &wait; + /* Make sure descriptors are ready before ringing the doorbell */ + wmb(); spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_send_command(hba, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -1591,6 +1709,29 @@ static inline void ufshcd_init_query(struct ufs_hba *hba, (*request)->upiu_req.selector = selector; } +static int ufshcd_query_flag_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum flag_idn idn, bool *flag_res) +{ + int ret; + int retries; + + for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { + ret = ufshcd_query_flag(hba, opcode, idn, flag_res); + if (ret) + dev_dbg(hba->dev, + "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", + __func__, opcode, idn, ret, retries); + return ret; +} + /** * ufshcd_query_flag() - API function for sending flag query requests * hba: per-adapter instance @@ -1600,12 +1741,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba, * * Returns 0 for success, non-zero in case of failure */ -static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, +int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res) { struct ufs_query_req *request = NULL; struct ufs_query_res *response = NULL; int err, index = 0, selector = 0; + int timeout = QUERY_REQ_TIMEOUT; BUG_ON(!hba); @@ -1638,7 +1780,10 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, goto out_unlock; } - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + if (idn == QUERY_FLAG_IDN_FDEVICEINIT) + timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT; + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); if (err) { dev_err(hba->dev, @@ -1722,20 +1867,43 @@ out: } /** - * ufshcd_query_descriptor - API function for sending descriptor requests - * hba: per-adapter instance - * opcode: attribute opcode - * idn: attribute idn to access - * index: index field - * selector: selector field - * desc_buf: the buffer that contains the descriptor - * buf_len: length parameter passed to the device + * ufshcd_query_attr_retry() - API function for sending query + * attribute with retries + * @hba: per-adapter instance + * @opcode: attribute opcode + * @idn: attribute idn to access + * @index: index field + * @selector: selector field + * @attr_val: the attribute value after the query request + * completes * - * Returns 0 for success, non-zero in case of failure. - * The buf_len parameter will contain, on return, the length parameter - * received on the response. - */ -static int ufshcd_query_descriptor(struct ufs_hba *hba, + * Returns 0 for success, non-zero in case of failure +*/ +static int ufshcd_query_attr_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val) +{ + int ret = 0; + u32 retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + ret = ufshcd_query_attr(hba, opcode, idn, index, + selector, attr_val); + if (ret) + dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", + __func__, ret, retries); + else + break; + } + + if (ret) + dev_err(hba->dev, + "%s: query attribute, idn %d, failed with error %d after %d retires\n", + __func__, idn, ret, QUERY_REQ_RETRIES); + return ret; +} + +static int __ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode, enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len) { @@ -1799,6 +1967,39 @@ out: return err; } +/** + * ufshcd_query_descriptor_retry - API function for sending descriptor + * requests + * hba: per-adapter instance + * opcode: attribute opcode + * idn: attribute idn to access + * index: index field + * selector: selector field + * desc_buf: the buffer that contains the descriptor + * buf_len: length parameter passed to the device + * + * Returns 0 for success, non-zero in case of failure. + * The buf_len parameter will contain, on return, the length parameter + * received on the response. + */ +int ufshcd_query_descriptor_retry(struct ufs_hba *hba, + enum query_opcode opcode, enum desc_idn idn, u8 index, + u8 selector, u8 *desc_buf, int *buf_len) +{ + int err; + int retries; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = __ufshcd_query_descriptor(hba, opcode, idn, index, + selector, desc_buf, buf_len); + if (!err || err == -EINVAL) + break; + } + + return err; +} +EXPORT_SYMBOL(ufshcd_query_descriptor_retry); + /** * ufshcd_read_desc_param - read the specified descriptor parameter * @hba: Pointer to adapter instance @@ -1841,9 +2042,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, return -ENOMEM; } - ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, - desc_id, desc_index, 0, desc_buf, - &buff_len); + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, desc_buf, + &buff_len); if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || (desc_buf[QUERY_DESC_LENGTH_OFFSET] != @@ -1881,6 +2082,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba, return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); } +int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) +{ + return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); +} +EXPORT_SYMBOL(ufshcd_read_device_desc); + +/** + * ufshcd_read_string_desc - read string descriptor + * @hba: pointer to adapter instance + * @desc_index: descriptor index + * @buf: pointer to buffer where descriptor would be read + * @size: size of buf + * @ascii: if true convert from unicode to ascii characters + * + * Return 0 in case of success, non-zero otherwise + */ +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf, + u32 size, bool ascii) +{ + int err = 0; + + err = ufshcd_read_desc(hba, + QUERY_DESC_IDN_STRING, desc_index, buf, size); + + if (err) { + dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", + __func__, QUERY_REQ_RETRIES, err); + goto out; + } + + if (ascii) { + int desc_len; + int ascii_len; + int i; + char *buff_ascii; + + desc_len = buf[0]; + /* remove header and divide by 2 to move from UTF16 to UTF8 */ + ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; + if (size < ascii_len + QUERY_DESC_HDR_SIZE) { + dev_err(hba->dev, "%s: buffer allocated size is too small\n", + __func__); + err = -ENOMEM; + goto out; + } + + buff_ascii = kmalloc(ascii_len, GFP_KERNEL); + if (!buff_ascii) { + err = -ENOMEM; + goto out_free_buff; + } + + /* + * the descriptor contains string in UTF16 format + * we need to convert to utf-8 so it can be displayed + */ + utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], + desc_len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + + /* replace non-printable or non-ASCII characters with spaces */ + for (i = 0; i < ascii_len; i++) + ufshcd_remove_non_printable(&buff_ascii[i]); + + memset(buf + QUERY_DESC_HDR_SIZE, 0, + size - QUERY_DESC_HDR_SIZE); + memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); + buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; +out_free_buff: + kfree(buff_ascii); + } +out: + return err; +} +EXPORT_SYMBOL(ufshcd_read_string_desc); + /** * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter * @hba: Pointer to adapter instance @@ -2128,6 +2405,7 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, }; const char *set = action[!!peer]; int ret; + int retries = UFS_UIC_COMMAND_RETRIES; uic_cmd.command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; @@ -2135,10 +2413,18 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); uic_cmd.argument3 = mib_val; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) - dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", - set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + } while (ret && peer && --retries); + + if (!retries) + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, + retries); return ret; } @@ -2163,6 +2449,7 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, }; const char *get = action[!!peer]; int ret; + int retries = UFS_UIC_COMMAND_RETRIES; struct ufs_pa_layer_attr orig_pwr_info; struct ufs_pa_layer_attr temp_pwr_info; bool pwr_mode_change = false; @@ -2193,14 +2480,19 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; uic_cmd.argument1 = attr_sel; - ret = ufshcd_send_uic_cmd(hba, &uic_cmd); - if (ret) { - dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", - get, UIC_GET_ATTR_ID(attr_sel), ret); - goto out; - } + do { + /* for peer attributes we retry upon failure */ + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", + get, UIC_GET_ATTR_ID(attr_sel), ret); + } while (ret && peer && --retries); - if (mib_val) + if (!retries) + dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", + get, UIC_GET_ATTR_ID(attr_sel), retries); + + if (mib_val && !ret) *mib_val = uic_cmd.argument3; if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) @@ -2233,6 +2525,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) unsigned long flags; u8 status; int ret; + bool reenable_intr = false; mutex_lock(&hba->uic_cmd_mutex); init_completion(&uic_async_done); @@ -2240,15 +2533,17 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) spin_lock_irqsave(hba->host->host_lock, flags); hba->uic_async_done = &uic_async_done; - ret = __ufshcd_send_uic_cmd(hba, cmd); - spin_unlock_irqrestore(hba->host->host_lock, flags); - if (ret) { - dev_err(hba->dev, - "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", - cmd->command, cmd->argument3, ret); - goto out; + if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { + ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); + /* + * Make sure UIC command completion interrupt is disabled before + * issuing UIC command. + */ + wmb(); + reenable_intr = true; } - ret = ufshcd_wait_for_uic_cmd(hba, cmd); + ret = __ufshcd_send_uic_cmd(hba, cmd, false); + spin_unlock_irqrestore(hba->host->host_lock, flags); if (ret) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", @@ -2274,7 +2569,10 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) } out: spin_lock_irqsave(hba->host->host_lock, flags); + hba->active_uic_cmd = NULL; hba->uic_async_done = NULL; + if (reenable_intr) + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); spin_unlock_irqrestore(hba->host->host_lock, flags); mutex_unlock(&hba->uic_cmd_mutex); @@ -2315,13 +2613,65 @@ out: return ret; } -static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +static int ufshcd_link_recovery(struct ufs_hba *hba) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + ret = ufshcd_host_reset_and_restore(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (ret) + dev_err(hba->dev, "%s: link recovery failed, err %d", + __func__, ret); + + return ret; +} + +static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) { + int ret; struct uic_command uic_cmd = {0}; uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; + ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + + if (ret) { + dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", + __func__, ret); + + /* + * If link recovery fails then return error so that caller + * don't retry the hibern8 enter again. + */ + if (ufshcd_link_recovery(hba)) + ret = -ENOLINK; + } + + return ret; +} + +static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) +{ + int ret = 0, retries; - return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); + for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { + ret = __ufshcd_uic_hibern8_enter(hba); + if (!ret || ret == -ENOLINK) + goto out; + } +out: + return ret; } static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) @@ -2332,8 +2682,9 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); if (ret) { - ufshcd_set_link_off(hba); - ret = ufshcd_host_reset_and_restore(hba); + dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", + __func__, ret); + ret = ufshcd_link_recovery(hba); } return ret; @@ -2513,17 +2864,12 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba, */ static int ufshcd_complete_dev_init(struct ufs_hba *hba) { - int i, retries, err = 0; + int i; + int err; bool flag_res = 1; - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - /* Set the fDeviceInit flag */ - err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, NULL); - if (!err || err == -ETIMEDOUT) - break; - dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); - } + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, NULL); if (err) { dev_err(hba->dev, "%s setting fDeviceInit flag failed with error %d\n", @@ -2531,18 +2877,11 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) goto out; } - /* poll for max. 100 iterations for fDeviceInit flag to clear */ - for (i = 0; i < 100 && !err && flag_res; i++) { - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - err = ufshcd_query_flag(hba, - UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); - if (!err || err == -ETIMEDOUT) - break; - dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, - err); - } - } + /* poll for max. 1000 iterations for fDeviceInit flag to clear */ + for (i = 0; i < 1000 && !err && flag_res; i++) + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + if (err) dev_err(hba->dev, "%s reading fDeviceInit flag failed with error %d\n", @@ -2563,7 +2902,7 @@ out: * To bring UFS host controller to operational state, * 1. Enable required interrupts * 2. Configure interrupt aggregation - * 3. Program UTRL and UTMRL base addres + * 3. Program UTRL and UTMRL base address * 4. Configure run-stop-registers * * Returns 0 on success, non-zero value on failure @@ -2592,9 +2931,14 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), REG_UTP_TASK_REQ_LIST_BASE_H); + /* + * Make sure base address and interrupt setup are updated before + * enabling the run/stop registers below. + */ + wmb(); + /* * UCRDY, UTMRLDY and UTRLRDY bits must be 1 - * DEI, HEI bits must be 0 */ reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); if (!(ufshcd_get_lists_status(reg))) { @@ -2610,6 +2954,23 @@ out: return err; } +/** + * ufshcd_hba_stop - Send controller to reset state + * @hba: per adapter instance + * @can_sleep: perform sleep or just spin + */ +static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep) +{ + int err; + + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); + err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, + CONTROLLER_ENABLE, CONTROLLER_DISABLE, + 10, 1, can_sleep); + if (err) + dev_err(hba->dev, "%s: Controller disable failed\n", __func__); +} + /** * ufshcd_hba_enable - initialize the controller * @hba: per adapter instance @@ -2630,18 +2991,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) * development and testing of this driver. msleep can be changed to * mdelay and retry count can be reduced based on the controller. */ - if (!ufshcd_is_hba_active(hba)) { - + if (!ufshcd_is_hba_active(hba)) /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); - - /* - * This delay is based on the testing done with UFS host - * controller FPGA. The delay can be changed based on the - * host controller used. - */ - msleep(5); - } + ufshcd_hba_stop(hba, true); /* UniPro link is disabled at this point */ ufshcd_set_link_off(hba); @@ -3090,7 +3442,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) scsi_status = result & MASK_SCSI_STATUS; result = ufshcd_scsi_cmd_status(lrbp, scsi_status); - if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) + /* + * Currently we are only supporting BKOPs exception + * events hence we can ignore BKOPs exception event + * during power management callbacks. BKOPs exception + * event is not expected to be raised in runtime suspend + * callback as it allows the urgent bkops. + * During system suspend, we are anyway forcefully + * disabling the bkops and if urgent bkops is needed + * it will be enabled on system resume. Long term + * solution could be to abort the system suspend if + * UFS device needs urgent BKOPs. + */ + if (!hba->pm_op_in_progress && + ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) schedule_work(&hba->eeh_work); break; case UPIU_TRANSACTION_REJECT_UPIU: @@ -3149,31 +3514,18 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) } /** - * ufshcd_transfer_req_compl - handle SCSI and query command completion + * __ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance + * @completed_reqs: requests to complete */ -static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, + unsigned long completed_reqs) { struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; - unsigned long completed_reqs; - u32 tr_doorbell; int result; int index; - /* Resetting interrupt aggregation counters first and reading the - * DOOR_BELL afterward allows us to handle all the completed requests. - * In order to prevent other interrupts starvation the DB is read once - * after reset. The down side of this solution is the possibility of - * false interrupt if device completes another request after resetting - * aggregation and before reading the DB. - */ - if (ufshcd_is_intr_aggr_allowed(hba)) - ufshcd_reset_intr_aggr(hba); - - tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - for_each_set_bit(index, &completed_reqs, hba->nutrs) { lrbp = &hba->lrb[index]; cmd = lrbp->cmd; @@ -3202,6 +3554,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) wake_up(&hba->dev_cmd.tag_wq); } +/** + * ufshcd_transfer_req_compl - handle SCSI and query command completion + * @hba: per adapter instance + */ +static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +{ + unsigned long completed_reqs; + u32 tr_doorbell; + + /* Resetting interrupt aggregation counters first and reading the + * DOOR_BELL afterward allows us to handle all the completed requests. + * In order to prevent other interrupts starvation the DB is read once + * after reset. The down side of this solution is the possibility of + * false interrupt if device completes another request after resetting + * aggregation and before reading the DB. + */ + if (ufshcd_is_intr_aggr_allowed(hba)) + ufshcd_reset_intr_aggr(hba); + + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = tr_doorbell ^ hba->outstanding_reqs; + + __ufshcd_transfer_req_compl(hba, completed_reqs); +} + /** * ufshcd_disable_ee - disable exception event * @hba: per-adapter instance @@ -3222,7 +3599,7 @@ static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) val = hba->ee_ctrl_mask & ~mask; val &= 0xFFFF; /* 2 bytes */ - err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); if (!err) hba->ee_ctrl_mask &= ~mask; @@ -3250,7 +3627,7 @@ static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) val = hba->ee_ctrl_mask | mask; val &= 0xFFFF; /* 2 bytes */ - err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); if (!err) hba->ee_ctrl_mask |= mask; @@ -3276,7 +3653,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) if (hba->auto_bkops_enabled) goto out; - err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, QUERY_FLAG_IDN_BKOPS_EN, NULL); if (err) { dev_err(hba->dev, "%s: failed to enable bkops %d\n", @@ -3325,7 +3702,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) goto out; } - err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, + err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, QUERY_FLAG_IDN_BKOPS_EN, NULL); if (err) { dev_err(hba->dev, "%s: failed to disable bkops %d\n", @@ -3356,7 +3733,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) { - return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); } @@ -3414,15 +3791,52 @@ out: */ static int ufshcd_urgent_bkops(struct ufs_hba *hba) { - return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); + return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); } static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) { - return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); } +static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) +{ + int err; + u32 curr_status = 0; + + if (hba->is_urgent_bkops_lvl_checked) + goto enable_auto_bkops; + + err = ufshcd_get_bkops_status(hba, &curr_status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } + + /* + * We are seeing that some devices are raising the urgent bkops + * exception events even when BKOPS status doesn't indicate performace + * impacted or critical. Handle these device by determining their urgent + * bkops status at runtime. + */ + if (curr_status < BKOPS_STATUS_PERF_IMPACT) { + dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", + __func__, curr_status); + /* update the current status as the urgent bkops level */ + hba->urgent_bkops_lvl = curr_status; + hba->is_urgent_bkops_lvl_checked = true; + } + +enable_auto_bkops: + err = ufshcd_enable_auto_bkops(hba); +out: + if (err < 0) + dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", + __func__, err); +} + /** * ufshcd_exception_event_handler - handle exceptions raised by device * @work: pointer to work data @@ -3446,17 +3860,95 @@ static void ufshcd_exception_event_handler(struct work_struct *work) } status &= hba->ee_ctrl_mask; - if (status & MASK_EE_URGENT_BKOPS) { - err = ufshcd_urgent_bkops(hba); - if (err < 0) - dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", - __func__, err); - } + + if (status & MASK_EE_URGENT_BKOPS) + ufshcd_bkops_exception_event_handler(hba); + out: pm_runtime_put_sync(hba->dev); return; } +/* Complete requests that have door-bell cleared */ +static void ufshcd_complete_requests(struct ufs_hba *hba) +{ + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); +} + +/** + * ufshcd_quirk_dl_nac_errors - This function checks if error handling is + * to recover from the DL NAC errors or not. + * @hba: per-adapter instance + * + * Returns true if error handling is required, false otherwise + */ +static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) +{ + unsigned long flags; + bool err_handling = true; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* + * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the + * device fatal error and/or DL NAC & REPLAY timeout errors. + */ + if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) + goto out; + + if ((hba->saved_err & DEVICE_FATAL_ERROR) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) + goto out; + + if ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { + int err; + /* + * wait for 50ms to see if we can get any other errors or not. + */ + spin_unlock_irqrestore(hba->host->host_lock, flags); + msleep(50); + spin_lock_irqsave(hba->host->host_lock, flags); + + /* + * now check if we have got any other severe errors other than + * DL NAC error? + */ + if ((hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) + goto out; + + /* + * As DL NAC is the only error received so far, send out NOP + * command to confirm if link is still active or not. + * - If we don't get any response then do error recovery. + * - If we get response then clear the DL NAC error bit. + */ + + spin_unlock_irqrestore(hba->host->host_lock, flags); + err = ufshcd_verify_dev_init(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + + if (err) + goto out; + + /* Link seems to be alive hence ignore the DL NAC errors */ + if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) + hba->saved_err &= ~UIC_ERROR; + /* clear NAC error */ + hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + if (!hba->saved_uic_err) { + err_handling = false; + goto out; + } + } +out: + spin_unlock_irqrestore(hba->host->host_lock, flags); + return err_handling; +} + /** * ufshcd_err_handler - handle UFS errors that require s/w attention * @work: pointer to work structure @@ -3469,6 +3961,7 @@ static void ufshcd_err_handler(struct work_struct *work) u32 err_tm = 0; int err = 0; int tag; + bool needs_reset = false; hba = container_of(work, struct ufs_hba, eh_work); @@ -3476,40 +3969,86 @@ static void ufshcd_err_handler(struct work_struct *work) ufshcd_hold(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ufshcd_state == UFSHCD_STATE_RESET) { - spin_unlock_irqrestore(hba->host->host_lock, flags); + if (hba->ufshcd_state == UFSHCD_STATE_RESET) goto out; - } hba->ufshcd_state = UFSHCD_STATE_RESET; ufshcd_set_eh_in_progress(hba); /* Complete requests that have door-bell cleared by h/w */ - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_complete_requests(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + bool ret; + + spin_unlock_irqrestore(hba->host->host_lock, flags); + /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */ + ret = ufshcd_quirk_dl_nac_errors(hba); + spin_lock_irqsave(hba->host->host_lock, flags); + if (!ret) + goto skip_err_handling; + } + if ((hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR | + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | + UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) + needs_reset = true; + + /* + * if host reset is required then skip clearing the pending + * transfers forcefully because they will automatically get + * cleared after link startup. + */ + if (needs_reset) + goto skip_pending_xfer_clear; + /* release lock as clear command might sleep */ + spin_unlock_irqrestore(hba->host->host_lock, flags); /* Clear pending transfer requests */ - for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) - if (ufshcd_clear_cmd(hba, tag)) - err_xfer |= 1 << tag; + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { + if (ufshcd_clear_cmd(hba, tag)) { + err_xfer = true; + goto lock_skip_pending_xfer_clear; + } + } /* Clear pending task management requests */ - for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) - if (ufshcd_clear_tm_cmd(hba, tag)) - err_tm |= 1 << tag; + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { + if (ufshcd_clear_tm_cmd(hba, tag)) { + err_tm = true; + goto lock_skip_pending_xfer_clear; + } + } - /* Complete the requests that are cleared by s/w */ +lock_skip_pending_xfer_clear: spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Complete the requests that are cleared by s/w */ + ufshcd_complete_requests(hba); + + if (err_xfer || err_tm) + needs_reset = true; + +skip_pending_xfer_clear: /* Fatal errors need reset */ - if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || - ((hba->saved_err & UIC_ERROR) && - (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { + if (needs_reset) { + unsigned long max_doorbells = (1UL << hba->nutrs) - 1; + + /* + * ufshcd_reset_and_restore() does the link reinitialization + * which will need atleast one empty doorbell slot to send the + * device management commands (NOP and query commands). + * If there is no slot empty at this moment then free up last + * slot forcefully. + */ + if (hba->outstanding_reqs == max_doorbells) + __ufshcd_transfer_req_compl(hba, + (1UL << (hba->nutrs - 1))); + + spin_unlock_irqrestore(hba->host->host_lock, flags); err = ufshcd_reset_and_restore(hba); + spin_lock_irqsave(hba->host->host_lock, flags); if (err) { dev_err(hba->dev, "%s: reset and restore failed\n", __func__); @@ -3523,9 +4062,19 @@ static void ufshcd_err_handler(struct work_struct *work) hba->saved_err = 0; hba->saved_uic_err = 0; } + +skip_err_handling: + if (!needs_reset) { + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + if (hba->saved_err || hba->saved_uic_err) + dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", + __func__, hba->saved_err, hba->saved_uic_err); + } + ufshcd_clear_eh_in_progress(hba); out: + spin_unlock_irqrestore(hba->host->host_lock, flags); scsi_unblock_requests(hba->host); ufshcd_release(hba); pm_runtime_put_sync(hba->dev); @@ -3543,6 +4092,14 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } /* UIC NL/TL/DME errors needs software retry */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); @@ -3580,15 +4137,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba) } if (queue_eh_work) { + /* + * update the transfer error masks to sticky bits, let's do this + * irrespective of current ufshcd_state. + */ + hba->saved_err |= hba->errors; + hba->saved_uic_err |= hba->uic_error; + /* handle fatal errors only when link is functional */ if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { /* block commands from scsi mid-layer */ scsi_block_requests(hba->host); - /* transfer error masks to sticky bits */ - hba->saved_err |= hba->errors; - hba->saved_uic_err |= hba->uic_error; - hba->ufshcd_state = UFSHCD_STATE_ERROR; schedule_work(&hba->eh_work); } @@ -3645,16 +4205,20 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { - u32 intr_status; + u32 intr_status, enabled_intr_status; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; spin_lock(hba->host->host_lock); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + enabled_intr_status = + intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); - if (intr_status) { + if (intr_status) ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); - ufshcd_sl_intr(hba, intr_status); + + if (enabled_intr_status) { + ufshcd_sl_intr(hba, enabled_intr_status); retval = IRQ_HANDLED; } spin_unlock(hba->host->host_lock); @@ -3677,7 +4241,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, REG_UTP_TASK_REQ_DOOR_BELL, - mask, 0, 1000, 1000); + mask, 0, 1000, 1000, true); out: return err; } @@ -3740,6 +4304,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, /* send command to the controller */ __set_bit(free_slot, &hba->outstanding_tasks); + + /* Make sure descriptors are ready before ringing the task doorbell */ + wmb(); + ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); spin_unlock_irqrestore(host->host_lock, flags); @@ -3845,13 +4413,23 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) host = cmd->device->host; hba = shost_priv(host); tag = cmd->request->tag; + if (!ufshcd_valid_tag(hba, tag)) { + dev_err(hba->dev, + "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", + __func__, tag, cmd, cmd->request); + BUG(); + } ufshcd_hold(hba, false); + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* If command is already aborted/completed, return SUCCESS */ - if (!(test_bit(tag, &hba->outstanding_reqs))) + if (!(test_bit(tag, &hba->outstanding_reqs))) { + dev_err(hba->dev, + "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", + __func__, tag, hba->outstanding_reqs, reg); goto out; + } - reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); if (!(reg & (1 << tag))) { dev_err(hba->dev, "%s: cmd was completed, but without a notifying intr, tag = %d", @@ -3905,7 +4483,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); - __clear_bit(tag, &hba->outstanding_reqs); + ufshcd_outstanding_req_clear(hba, tag); hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); @@ -3945,7 +4523,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) /* Reset the host controller */ spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, false); spin_unlock_irqrestore(hba->host->host_lock, flags); err = ufshcd_hba_enable(hba); @@ -4155,9 +4733,9 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, hba->init_prefetch_data.icc_level); - ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, - QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, - &hba->init_prefetch_data.icc_level); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, + &hba->init_prefetch_data.icc_level); if (ret) dev_err(hba->dev, @@ -4232,6 +4810,164 @@ out: return ret; } +static int ufs_get_device_info(struct ufs_hba *hba, + struct ufs_device_info *card_data) +{ + int err; + u8 model_index; + u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0}; + u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; + + err = ufshcd_read_device_desc(hba, desc_buf, + QUERY_DESC_DEVICE_MAX_SIZE); + if (err) { + dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", + __func__, err); + goto out; + } + + /* + * getting vendor (manufacturerID) and Bank Index in big endian + * format + */ + card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; + + model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + + err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, + QUERY_DESC_STRING_MAX_SIZE, ASCII_STD); + if (err) { + dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", + __func__, err); + goto out; + } + + str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; + strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), + min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], + MAX_MODEL_LEN)); + + /* Null terminate the model string */ + card_data->model[MAX_MODEL_LEN] = '\0'; + +out: + return err; +} + +void ufs_advertise_fixup_device(struct ufs_hba *hba) +{ + int err; + struct ufs_dev_fix *f; + struct ufs_device_info card_data; + + card_data.wmanufacturerid = 0; + + err = ufs_get_device_info(hba, &card_data); + if (err) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, err); + return; + } + + for (f = ufs_fixups; f->quirk; f++) { + if (((f->card.wmanufacturerid == card_data.wmanufacturerid) || + (f->card.wmanufacturerid == UFS_ANY_VENDOR)) && + (STR_PRFX_EQUAL(f->card.model, card_data.model) || + !strcmp(f->card.model, UFS_ANY_MODEL))) + hba->dev_quirks |= f->quirk; + } +} + +/** + * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro + * @hba: per-adapter instance + * + * PA_TActivate parameter can be tuned manually if UniPro version is less than + * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's + * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce + * the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) +{ + int ret = 0; + u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL( + RX_MIN_ACTIVATETIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_min_activatetime); + if (ret) + goto out; + + /* make sure proper unit conversion is applied */ + tuned_pa_tactivate = + ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US) + / PA_TACTIVATE_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + tuned_pa_tactivate); + +out: + return ret; +} + +/** + * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro + * @hba: per-adapter instance + * + * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than + * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's + * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY. + * This optimal value can help reduce the hibern8 exit latency. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) +{ + int ret = 0; + u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0; + u32 max_hibern8_time, tuned_pa_hibern8time; + + ret = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &local_tx_hibern8_time_cap); + if (ret) + goto out; + + ret = ufshcd_dme_peer_get(hba, + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &peer_rx_hibern8_time_cap); + if (ret) + goto out; + + max_hibern8_time = max(local_tx_hibern8_time_cap, + peer_rx_hibern8_time_cap); + /* make sure proper unit conversion is applied */ + tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US) + / PA_HIBERN8_TIME_UNIT_US); + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + tuned_pa_hibern8time); +out: + return ret; +} + +static void ufshcd_tune_unipro_params(struct ufs_hba *hba) +{ + if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { + ufshcd_tune_pa_tactivate(hba); + ufshcd_tune_pa_hibern8time(hba); + } + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) + /* set 1ms timeout for PA_TACTIVATE */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance @@ -4248,6 +4984,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_init_pwr_info(hba); + /* set the default level for urgent bkops */ + hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; + hba->is_urgent_bkops_lvl_checked = false; + /* UniPro link is active now */ ufshcd_set_link_active(hba); @@ -4259,10 +4999,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; + ufs_advertise_fixup_device(hba); + ufshcd_tune_unipro_params(hba); + + ret = ufshcd_set_vccq_rail_unused(hba, + (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false); + if (ret) + goto out; + /* UFS device is also active now */ ufshcd_set_ufs_dev_active(hba); ufshcd_force_reset_auto_bkops(hba); - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; hba->wlun_dev_clr_ua = true; if (ufshcd_get_max_pwr_mode(hba)) { @@ -4276,6 +5023,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) __func__, ret); } + /* set the state as operational after switching to desired gear */ + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; /* * If we are in error handling context or in power management callbacks * context, no need to scan the host @@ -4285,8 +5034,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* clear any previous UFS device information */ memset(&hba->dev_info, 0, sizeof(hba->dev_info)); - if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) + if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) hba->dev_info.f_power_on_wp_en = flag; if (!hba->is_init_prefetch) @@ -4332,6 +5081,41 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) ufshcd_probe_hba(hba); } +static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd) +{ + unsigned long flags; + struct Scsi_Host *host; + struct ufs_hba *hba; + int index; + bool found = false; + + if (!scmd || !scmd->device || !scmd->device->host) + return BLK_EH_NOT_HANDLED; + + host = scmd->device->host; + hba = shost_priv(host); + if (!hba) + return BLK_EH_NOT_HANDLED; + + spin_lock_irqsave(host->host_lock, flags); + + for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { + if (hba->lrb[index].cmd == scmd) { + found = true; + break; + } + } + + spin_unlock_irqrestore(host->host_lock, flags); + + /* + * Bypass SCSI error handling and reset the block layer timer if this + * SCSI command was not actually dispatched to UFS driver, otherwise + * let SCSI layer handle the error as usual. + */ + return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER; +} + static struct scsi_host_template ufshcd_driver_template = { .module = THIS_MODULE, .name = UFSHCD, @@ -4344,6 +5128,7 @@ static struct scsi_host_template ufshcd_driver_template = { .eh_abort_handler = ufshcd_abort, .eh_device_reset_handler = ufshcd_eh_device_reset_handler, .eh_host_reset_handler = ufshcd_eh_host_reset_handler, + .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -4372,13 +5157,24 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { - return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); + if (!vreg) + return 0; + else if (vreg->unused) + return 0; + else + return ufshcd_config_vreg_load(hba->dev, vreg, + UFS_VREG_LPM_LOAD_UA); } static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { - return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); + if (!vreg) + return 0; + else if (vreg->unused) + return 0; + else + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } static int ufshcd_config_vreg(struct device *dev, @@ -4413,7 +5209,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; - if (!vreg || vreg->enabled) + if (!vreg) + goto out; + else if (vreg->enabled || vreg->unused) goto out; ret = ufshcd_config_vreg(dev, vreg, true); @@ -4433,7 +5231,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; - if (!vreg || !vreg->enabled) + if (!vreg) + goto out; + else if (!vreg->enabled || vreg->unused) goto out; ret = regulator_disable(vreg->reg); @@ -4539,6 +5339,36 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba) return 0; } +static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused) +{ + int ret = 0; + struct ufs_vreg_info *info = &hba->vreg_info; + + if (!info) + goto out; + else if (!info->vccq) + goto out; + + if (unused) { + /* shut off the rail here */ + ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false); + /* + * Mark this rail as no longer used, so it doesn't get enabled + * later by mistake + */ + if (!ret) + info->vccq->unused = true; + } else { + /* + * rail should have been already enabled hence just make sure + * that unused flag is cleared. + */ + info->vccq->unused = false; + } +out: + return ret; +} + static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk) { @@ -4857,11 +5687,21 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, else if ((req_link_state == UIC_LINK_OFF_STATE) && (!check_for_bkops || (check_for_bkops && !hba->auto_bkops_enabled))) { + /* + * Let's make sure that link is in low power mode, we are doing + * this currently by putting the link in Hibern8. Otherway to + * put the link in low power mode is to send the DME end point + * to device and then send the DME reset command to local + * unipro. But putting the link in hibern8 is much faster. + */ + ret = ufshcd_uic_hibern8_enter(hba); + if (ret) + goto out; /* * Change controller state to "reset state" which * should also put the link in off/reset state */ - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, true); /* * TODO: Check if we need any delay to make sure that * controller is reset @@ -4875,6 +5715,16 @@ out: static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) { + /* + * It seems some UFS devices may keep drawing more than sleep current + * (atleast for 500us) from UFS rails (especially from VCCQ rail). + * To avoid this situation, add 2ms delay before putting these UFS + * rails in LPM mode. + */ + if (!ufshcd_is_link_active(hba) && + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) + usleep_range(2000, 2100); + /* * If UFS device is either in UFS_Sleep turn off VCC rail to save some * power. @@ -5337,7 +6187,7 @@ void ufshcd_remove(struct ufs_hba *hba) scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); - ufshcd_hba_stop(hba); + ufshcd_hba_stop(hba, true); scsi_host_put(hba->host); @@ -5601,6 +6451,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) init_waitqueue_head(&hba->dev_cmd.tag_wq); ufshcd_init_clk_gating(hba); + + /* + * In order to avoid any spurious interrupt immediately after + * registering UFS controller interrupt handler, clear any pending UFS + * interrupt status and disable all the UFS interrupts. + */ + ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), + REG_INTERRUPT_STATUS); + ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); + /* + * Make sure that UFS interrupts are disabled and any pending interrupt + * status is cleared before registering UFS interrupt handler. + */ + mb(); + /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); if (err) { -- cgit v1.2.3-54-g00ecf