diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
commit | 863981e96738983919de841ec669e157e6bdaeb0 (patch) | |
tree | d6d89a12e7eb8017837c057935a2271290907f76 /drivers/ata | |
parent | 8dec7c70575785729a6a9e6719a955e9c545bcab (diff) |
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/Kconfig | 13 | ||||
-rw-r--r-- | drivers/ata/ahci_seattle.c | 2 | ||||
-rw-r--r-- | drivers/ata/libahci.c | 4 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 243 | ||||
-rw-r--r-- | drivers/ata/libata-eh.c | 113 | ||||
-rw-r--r-- | drivers/ata/libata-scsi.c | 761 | ||||
-rw-r--r-- | drivers/ata/libata-trace.c | 72 | ||||
-rw-r--r-- | drivers/ata/libata.h | 8 | ||||
-rw-r--r-- | drivers/ata/pata_icside.c | 2 | ||||
-rw-r--r-- | drivers/ata/sata_dwc_460ex.c | 552 | ||||
-rw-r--r-- | drivers/ata/sata_highbank.c | 2 | ||||
-rw-r--r-- | drivers/ata/sata_mv.c | 2 |
12 files changed, 1311 insertions, 463 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index cfa936a32..e2dc4c045 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -313,14 +313,23 @@ config ATA_PIIX config SATA_DWC tristate "DesignWare Cores SATA support" - depends on 460EX - select DW_DMAC + depends on DMADEVICES + select GENERIC_PHY help This option enables support for the on-chip SATA controller of the AppliedMicro processor 460EX. If unsure, say N. +config SATA_DWC_OLD_DMA + bool "Support old device trees" + depends on SATA_DWC + select DW_DMAC_CORE + default y if 460EX + help + This option enables support for old device trees without the + "dmas" property. + config SATA_DWC_DEBUG bool "Debugging driver version" depends on SATA_DWC diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c index 6e702ab57..1d31c0c0f 100644 --- a/drivers/ata/ahci_seattle.c +++ b/drivers/ata/ahci_seattle.c @@ -137,7 +137,7 @@ static const struct ata_port_info *ahci_seattle_get_port_info( u32 val; plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL); - if (IS_ERR(plat_data)) + if (!plat_data) return &ahci_port_info; plat_data->sgpio_ctrl = devm_ioremap_resource(dev, diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index a5d7c1c2a..71b07198e 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2550,8 +2550,8 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht) if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) { if (hpriv->irq_handler) - dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \ - and custom irq handler implemented\n"); + dev_warn(host->dev, + "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n"); rc = ahci_host_activate_multi_irqs(host, sht); } else { diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 474f9f473..0506c49da 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -66,6 +66,7 @@ #include <scsi/scsi_host.h> #include <linux/libata.h> #include <asm/byteorder.h> +#include <asm/unaligned.h> #include <linux/cdrom.h> #include <linux/ratelimit.h> #include <linux/pm_runtime.h> @@ -695,7 +696,7 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) * RETURNS: * Block address read from @tf. */ -u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) +u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) { u64 block = 0; @@ -720,7 +721,7 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) if (!sect) { ata_dev_warn(dev, "device reported invalid CHS sector 0\n"); - sect = 1; /* oh well */ + return U64_MAX; } block = (cyl * dev->heads + head) * dev->sectors + sect - 1; @@ -884,7 +885,7 @@ unsigned long ata_pack_xfermask(unsigned long pio_mask, * @udma_mask: resulting udma_mask * * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. - * Any NULL distination masks will be ignored. + * Any NULL destination masks will be ignored. */ void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, unsigned long *mwdma_mask, unsigned long *udma_mask) @@ -2079,6 +2080,81 @@ static inline u8 ata_dev_knobble(struct ata_device *dev) return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); } +static void ata_dev_config_ncq_send_recv(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + unsigned int err_mask; + int log_index = ATA_LOG_NCQ_SEND_RECV * 2; + u16 log_pages; + + err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, + 0, ap->sector_buf, 1); + if (err_mask) { + ata_dev_dbg(dev, + "failed to get Log Directory Emask 0x%x\n", + err_mask); + return; + } + log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); + if (!log_pages) { + ata_dev_warn(dev, + "NCQ Send/Recv Log not supported\n"); + return; + } + err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, + 0, ap->sector_buf, 1); + if (err_mask) { + ata_dev_dbg(dev, + "failed to get NCQ Send/Recv Log Emask 0x%x\n", + err_mask); + } else { + u8 *cmds = dev->ncq_send_recv_cmds; + + dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; + memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); + + if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { + ata_dev_dbg(dev, "disabling queued TRIM support\n"); + cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= + ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; + } + } +} + +static void ata_dev_config_ncq_non_data(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + unsigned int err_mask; + int log_index = ATA_LOG_NCQ_NON_DATA * 2; + u16 log_pages; + + err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, + 0, ap->sector_buf, 1); + if (err_mask) { + ata_dev_dbg(dev, + "failed to get Log Directory Emask 0x%x\n", + err_mask); + return; + } + log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); + if (!log_pages) { + ata_dev_warn(dev, + "NCQ Send/Recv Log not supported\n"); + return; + } + err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA, + 0, ap->sector_buf, 1); + if (err_mask) { + ata_dev_dbg(dev, + "failed to get NCQ Non-Data Log Emask 0x%x\n", + err_mask); + } else { + u8 *cmds = dev->ncq_non_data_cmds; + + memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE); + } +} + static int ata_dev_config_ncq(struct ata_device *dev, char *desc, size_t desc_sz) { @@ -2127,29 +2203,125 @@ static int ata_dev_config_ncq(struct ata_device *dev, snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, ddepth, aa_desc); - if ((ap->flags & ATA_FLAG_FPDMA_AUX) && - ata_id_has_ncq_send_and_recv(dev->id)) { - err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, - 0, ap->sector_buf, 1); - if (err_mask) { - ata_dev_dbg(dev, - "failed to get NCQ Send/Recv Log Emask 0x%x\n", - err_mask); - } else { - u8 *cmds = dev->ncq_send_recv_cmds; + if ((ap->flags & ATA_FLAG_FPDMA_AUX)) { + if (ata_id_has_ncq_send_and_recv(dev->id)) + ata_dev_config_ncq_send_recv(dev); + if (ata_id_has_ncq_non_data(dev->id)) + ata_dev_config_ncq_non_data(dev); + } + + return 0; +} - dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; - memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); +static void ata_dev_config_sense_reporting(struct ata_device *dev) +{ + unsigned int err_mask; - if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { - ata_dev_dbg(dev, "disabling queued TRIM support\n"); - cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= - ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; - } + if (!ata_id_has_sense_reporting(dev->id)) + return; + + if (ata_id_sense_reporting_enabled(dev->id)) + return; + + err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1); + if (err_mask) { + ata_dev_dbg(dev, + "failed to enable Sense Data Reporting, Emask 0x%x\n", + err_mask); + } +} + +static void ata_dev_config_zac(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + unsigned int err_mask; + u8 *identify_buf = ap->sector_buf; + int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0; + u16 log_pages; + + dev->zac_zones_optimal_open = U32_MAX; + dev->zac_zones_optimal_nonseq = U32_MAX; + dev->zac_zones_max_open = U32_MAX; + + /* + * Always set the 'ZAC' flag for Host-managed devices. + */ + if (dev->class == ATA_DEV_ZAC) + dev->flags |= ATA_DFLAG_ZAC; + else if (ata_id_zoned_cap(dev->id) == 0x01) + /* + * Check for host-aware devices. + */ + dev->flags |= ATA_DFLAG_ZAC; + + if (!(dev->flags & ATA_DFLAG_ZAC)) + return; + + /* + * Read Log Directory to figure out if IDENTIFY DEVICE log + * is supported. + */ + err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, + 0, ap->sector_buf, 1); + if (err_mask) { + ata_dev_info(dev, + "failed to get Log Directory Emask 0x%x\n", + err_mask); + return; + } + log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); + if (log_pages == 0) { + ata_dev_warn(dev, + "ATA Identify Device Log not supported\n"); + return; + } + /* + * Read IDENTIFY DEVICE data log, page 0, to figure out + * if page 9 is supported. + */ + err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0, + identify_buf, 1); + if (err_mask) { + ata_dev_info(dev, + "failed to get Device Identify Log Emask 0x%x\n", + err_mask); + return; + } + log_pages = identify_buf[8]; + for (i = 0; i < log_pages; i++) { + if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) { + found++; + break; } } + if (!found) { + ata_dev_warn(dev, + "ATA Zoned Information Log not supported\n"); + return; + } - return 0; + /* + * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information) + */ + err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, + ATA_LOG_ZONED_INFORMATION, + identify_buf, 1); + if (!err_mask) { + u64 zoned_cap, opt_open, opt_nonseq, max_open; + + zoned_cap = get_unaligned_le64(&identify_buf[8]); + if ((zoned_cap >> 63)) + dev->zac_zoned_cap = (zoned_cap & 1); + opt_open = get_unaligned_le64(&identify_buf[24]); + if ((opt_open >> 63)) + dev->zac_zones_optimal_open = (u32)opt_open; + opt_nonseq = get_unaligned_le64(&identify_buf[32]); + if ((opt_nonseq >> 63)) + dev->zac_zones_optimal_nonseq = (u32)opt_nonseq; + max_open = get_unaligned_le64(&identify_buf[40]); + if ((max_open >> 63)) + dev->zac_zones_max_open = (u32)max_open; + } } /** @@ -2374,7 +2546,8 @@ int ata_dev_configure(struct ata_device *dev) dev->devslp_timing[i] = sata_setting[j]; } } - + ata_dev_config_sense_reporting(dev); + ata_dev_config_zac(dev); dev->cdb_len = 16; } @@ -3403,7 +3576,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) * EH context. * * RETURNS: - * 0 if @linke is ready before @deadline; otherwise, -errno. + * 0 if @link is ready before @deadline; otherwise, -errno. */ int ata_wait_ready(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) @@ -3484,7 +3657,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, * EH context. * * RETURNS: - * 0 if @linke is ready before @deadline; otherwise, -errno. + * 0 if @link is ready before @deadline; otherwise, -errno. */ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) @@ -3497,7 +3670,7 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, /** * sata_link_debounce - debounce SATA phy status * @link: ATA link to debounce SATA phy status for - * @params: timing parameters { interval, duratinon, timeout } in msec + * @params: timing parameters { interval, duration, timeout } in msec * @deadline: deadline jiffies for the operation * * Make sure SStatus of @link reaches stable state, determined by @@ -3567,7 +3740,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, /** * sata_link_resume - resume SATA link * @link: ATA link to resume SATA - * @params: timing parameters { interval, duratinon, timeout } in msec + * @params: timing parameters { interval, duration, timeout } in msec * @deadline: deadline jiffies for the operation * * Resume SATA phy @link and debounce it. @@ -3750,7 +3923,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline) /** * sata_link_hardreset - reset link via SATA phy reset * @link: link to reset - * @timing: timing parameters { interval, duratinon, timeout } in msec + * @timing: timing parameters { interval, duration, timeout } in msec * @deadline: deadline jiffies for the operation * @online: optional out parameter indicating link onlineness * @check_ready: optional callback to check link readiness @@ -4145,6 +4318,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { */ { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, + /* + * Device times out with higher max sects. + * https://bugzilla.kernel.org/show_bug.cgi?id=121671 + */ + { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, + /* Devices we expect to fail diagnostics */ /* Devices where NCQ should be avoided */ @@ -4532,6 +4711,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) { struct ata_taskfile tf; unsigned int err_mask; + unsigned long timeout = 0; /* set up set-features taskfile */ DPRINTK("set features - SATA features\n"); @@ -4543,7 +4723,10 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) tf.protocol = ATA_PROT_NODATA; tf.nsect = feature; - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + if (enable == SETFEATURES_SPINUP) + timeout = ata_probe_timeout ? + ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT; + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; @@ -6212,7 +6395,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) * * After allocating an ATA host and initializing it, most libata * LLDs perform three steps to activate the host - start host, - * request IRQ and register it. This helper takes necessasry + * request IRQ and register it. This helper takes necessary * arguments and performs the three steps in one go. * * An invalid IRQ skips the IRQ registration and expects the host to @@ -6265,7 +6448,7 @@ int ata_host_activate(struct ata_host *host, int irq, } /** - * ata_port_detach - Detach ATA port in prepration of device removal + * ata_port_detach - Detach ATA port in preparation of device removal * @ap: ATA port to be detached * * Detach all ATA devices and the associated SCSI devices of @ap; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 91a9e6af2..c6f017458 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1600,6 +1600,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, tf->hob_lbah = buf[10]; tf->nsect = buf[12]; tf->hob_nsect = buf[13]; + if (ata_id_has_ncq_autosense(dev->id)) + tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; return 0; } @@ -1636,6 +1638,56 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) } /** + * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT + * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to + * @cmd: scsi command for which the sense code should be set + * + * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK + * SENSE. This function is an EH helper. + * + * LOCKING: + * Kernel thread context (may sleep). + */ +static void ata_eh_request_sense(struct ata_queued_cmd *qc, + struct scsi_cmnd *cmd) +{ + struct ata_device *dev = qc->dev; + struct ata_taskfile tf; + unsigned int err_mask; + + if (qc->ap->pflags & ATA_PFLAG_FROZEN) { + ata_dev_warn(dev, "sense data available but port frozen\n"); + return; + } + + if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID) + return; + + if (!ata_id_sense_reporting_enabled(dev->id)) { + ata_dev_warn(qc->dev, "sense data reporting disabled\n"); + return; + } + + DPRINTK("ATA request sense\n"); + + ata_tf_init(dev, &tf); + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; + tf.command = ATA_CMD_REQ_SENSE_DATA; + tf.protocol = ATA_PROT_NODATA; + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + /* Ignore err_mask; ATA_ERR might be set */ + if (tf.command & ATA_SENSE) { + ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); + qc->flags |= ATA_QCFLAG_SENSE_VALID; + } else { + ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", + tf.command, err_mask); + } +} + +/** * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * @dev: device to perform REQUEST_SENSE to * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) @@ -1797,6 +1849,18 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) memcpy(&qc->result_tf, &tf, sizeof(tf)); qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; + if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) { + char sense_key, asc, ascq; + + sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; + asc = (qc->result_tf.auxiliary >> 8) & 0xff; + ascq = qc->result_tf.auxiliary & 0xff; + ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq); + ata_scsi_set_sense_information(dev, qc->scsicmd, + &qc->result_tf); + qc->flags |= ATA_QCFLAG_SENSE_VALID; + } + ehc->i.err_mask &= ~AC_ERR_DEV; } @@ -1826,14 +1890,23 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, return ATA_EH_RESET; } - if (stat & (ATA_ERR | ATA_DF)) + if (stat & (ATA_ERR | ATA_DF)) { qc->err_mask |= AC_ERR_DEV; - else + /* + * Sense data reporting does not work if the + * device fault bit is set. + */ + if (stat & ATA_DF) + stat &= ~ATA_SENSE; + } else { return 0; + } switch (qc->dev->class) { case ATA_DEV_ATA: case ATA_DEV_ZAC: + if (stat & ATA_SENSE) + ata_eh_request_sense(qc, qc->scsicmd); if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; if (err & (ATA_UNC | ATA_AMNF)) @@ -1847,20 +1920,31 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, tmp = atapi_eh_request_sense(qc->dev, qc->scsicmd->sense_buffer, qc->result_tf.feature >> 4); - if (!tmp) { - /* ATA_QCFLAG_SENSE_VALID is used to - * tell atapi_qc_complete() that sense - * data is already valid. - * - * TODO: interpret sense data and set - * appropriate err_mask. - */ + if (!tmp) qc->flags |= ATA_QCFLAG_SENSE_VALID; - } else + else qc->err_mask |= tmp; } } + if (qc->flags & ATA_QCFLAG_SENSE_VALID) { + int ret = scsi_check_sense(qc->scsicmd); + /* + * SUCCESS here means that the sense code could + * evaluated and should be passed to the upper layers + * for correct evaluation. + * FAILED means the sense code could not interpreted + * and the device would need to be reset. + * NEEDS_RETRY and ADD_TO_MLQUEUE means that the + * command would need to be retried. + */ + if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) { + qc->flags |= ATA_QCFLAG_RETRY; + qc->err_mask |= AC_ERR_OTHER; + } else if (ret != SUCCESS) { + qc->err_mask |= AC_ERR_HSM; + } + } if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) action |= ATA_EH_RESET; @@ -2398,6 +2482,8 @@ const char *ata_get_cmd_descript(u8 command) { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, + { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" }, + { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" }, { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, @@ -2569,14 +2655,15 @@ static void ata_eh_link_report(struct ata_link *link) #ifdef CONFIG_ATA_VERBOSE_ERROR if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | - ATA_ERR)) { + ATA_SENSE | ATA_ERR)) { if (res->command & ATA_BUSY) ata_dev_err(qc->dev, "status: { Busy }\n"); else - ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", + ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", res->command & ATA_DRDY ? "DRDY " : "", res->command & ATA_DF ? "DF " : "", res->command & ATA_DRQ ? "DRQ " : "", + res->command & ATA_SENSE ? "SENSE " : "", res->command & ATA_ERR ? "ERR " : ""); } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 567859ce0..bfec66fb2 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -270,11 +270,52 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR, ata_scsi_park_show, ata_scsi_park_store); EXPORT_SYMBOL_GPL(dev_attr_unload_heads); -static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) +void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd, + u8 sk, u8 asc, u8 ascq) { + bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); + + if (!cmd) + return; + cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; - scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); + scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq); +} + +void ata_scsi_set_sense_information(struct ata_device *dev, + struct scsi_cmnd *cmd, + const struct ata_taskfile *tf) +{ + u64 information; + + if (!cmd) + return; + + information = ata_tf_read_block(tf, dev); + if (information == U64_MAX) + return; + + scsi_set_sense_information(cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, information); +} + +static void ata_scsi_set_invalid_field(struct ata_device *dev, + struct scsi_cmnd *cmd, u16 field, u8 bit) +{ + ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0); + /* "Invalid field in cbd" */ + scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, + field, bit, 1); +} + +static void ata_scsi_set_invalid_parameter(struct ata_device *dev, + struct scsi_cmnd *cmd, u16 field) +{ + /* "Invalid field in parameter list" */ + ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0); + scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, + field, 0xff, 0); } static ssize_t @@ -364,10 +405,10 @@ struct device_attribute *ata_common_sdev_attrs[] = { }; EXPORT_SYMBOL_GPL(ata_common_sdev_attrs); -static void ata_scsi_invalid_field(struct scsi_cmnd *cmd) +static void ata_scsi_invalid_field(struct ata_device *dev, + struct scsi_cmnd *cmd, u16 field) { - ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); - /* "Invalid field in cbd" */ + ata_scsi_set_invalid_field(dev, cmd, field, 0xff); cmd->scsi_done(cmd); } @@ -980,6 +1021,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) unsigned char *sb = cmd->sense_buffer; unsigned char *desc = sb + 8; int verbose = qc->ap->ops->error_handler == NULL; + u8 sense_key, asc, ascq; memset(sb, 0, SCSI_SENSE_BUFFERSIZE); @@ -992,47 +1034,71 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) if (qc->err_mask || tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, - &sb[1], &sb[2], &sb[3], verbose); - sb[1] &= 0x0f; + &sense_key, &asc, &ascq, verbose); + ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); } else { - sb[1] = RECOVERED_ERROR; - sb[2] = 0; - sb[3] = 0x1D; + /* + * ATA PASS-THROUGH INFORMATION AVAILABLE + * Always in descriptor format sense. + */ + scsi_build_sense_buffer(1, cmd->sense_buffer, + RECOVERED_ERROR, 0, 0x1D); } - /* - * Sense data is current and format is descriptor. - */ - sb[0] = 0x72; - - desc[0] = 0x09; - - /* set length of additional sense data */ - sb[7] = 14; - desc[1] = 12; - - /* - * Copy registers into sense buffer. - */ - desc[2] = 0x00; - desc[3] = tf->feature; /* == error reg */ - desc[5] = tf->nsect; - desc[7] = tf->lbal; - desc[9] = tf->lbam; - desc[11] = tf->lbah; - desc[12] = tf->device; - desc[13] = tf->command; /* == status reg */ + if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) { + u8 len; + + /* descriptor format */ + len = sb[7]; + desc = (char *)scsi_sense_desc_find(sb, len + 8, 9); + if (!desc) { + if (SCSI_SENSE_BUFFERSIZE < len + 14) + return; + sb[7] = len + 14; + desc = sb + 8 + len; + } + desc[0] = 9; + desc[1] = 12; + /* + * Copy registers into sense buffer. + */ + desc[2] = 0x00; + desc[3] = tf->feature; /* == error reg */ + desc[5] = tf->nsect; + desc[7] = tf->lbal; + desc[9] = tf->lbam; + desc[11] = tf->lbah; + desc[12] = tf->device; + desc[13] = tf->command; /* == status reg */ - /* - * Fill in Extend bit, and the high order bytes - * if applicable. - */ - if (tf->flags & ATA_TFLAG_LBA48) { - desc[2] |= 0x01; - desc[4] = tf->hob_nsect; - desc[6] = tf->hob_lbal; - desc[8] = tf->hob_lbam; - desc[10] = tf->hob_lbah; + /* + * Fill in Extend bit, and the high order bytes + * if applicable. + */ + if (tf->flags & ATA_TFLAG_LBA48) { + desc[2] |= 0x01; + desc[4] = tf->hob_nsect; + desc[6] = tf->hob_lbal; + desc[8] = tf->hob_lbam; + desc[10] = tf->hob_lbah; + } + } else { + /* Fixed sense format */ + desc[0] = tf->feature; + desc[1] = tf->command; /* status */ + desc[2] = tf->device; + desc[3] = tf->nsect; + desc[0] = 0; + if (tf->flags & ATA_TFLAG_LBA48) { + desc[8] |= 0x80; + if (tf->hob_nsect) + desc[8] |= 0x40; + if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) + desc[8] |= 0x20; + } + desc[9] = tf->lbal; + desc[10] = tf->lbam; + desc[11] = tf->lbah; } } @@ -1052,41 +1118,41 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) struct scsi_cmnd *cmd = qc->scsicmd; struct ata_taskfile *tf = &qc->result_tf; unsigned char *sb = cmd->sense_buffer; - unsigned char *desc = sb + 8; int verbose = qc->ap->ops->error_handler == NULL; u64 block; + u8 sense_key, asc, ascq; memset(sb, 0, SCSI_SENSE_BUFFERSIZE); cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; - /* sense data is current and format is descriptor */ - sb[0] = 0x72; - + if (ata_dev_disabled(dev)) { + /* Device disabled after error recovery */ + /* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */ + ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21); + return; + } /* Use ata_to_sense_error() to map status register bits * onto sense key, asc & ascq. */ if (qc->err_mask || tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, - &sb[1], &sb[2], &sb[3], verbose); - sb[1] &= 0x0f; + &sense_key, &asc, &ascq, verbose); + ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); + } else { + /* Could not decode error */ + ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", + tf->command, qc->err_mask); + ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); + return; } block = ata_tf_read_block(&qc->result_tf, dev); + if (block == U64_MAX) + return; - /* information sense data descriptor */ - sb[7] = 12; - desc[0] = 0x00; - desc[1] = 10; - - desc[2] |= 0x80; /* valid */ - desc[6] = block >> 40; - desc[7] = block >> 32; - desc[8] = block >> 24; - desc[9] = block >> 16; - desc[10] = block >> 8; - desc[11] = block; + scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block); } static void ata_scsi_sdev_config(struct scsi_device *sdev) @@ -1109,7 +1175,7 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) * @rq: request to be checked * * ATAPI commands which transfer variable length data to host - * might overflow due to application error or hardare bug. This + * might overflow due to application error or hardware bug. This * function checks whether overflow should be drained and ignored * for @request. * @@ -1343,19 +1409,29 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) struct scsi_cmnd *scmd = qc->scsicmd; struct ata_taskfile *tf = &qc->tf; const u8 *cdb = scmd->cmnd; + u16 fp; + u8 bp = 0xff; - if (scmd->cmd_len < 5) + if (scmd->cmd_len < 5) { + fp = 4; goto invalid_fld; + } tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; tf->protocol = ATA_PROT_NODATA; if (cdb[1] & 0x1) { ; /* ignore IMMED bit, violates sat-r05 */ } - if (cdb[4] & 0x2) + if (cdb[4] & 0x2) { + fp = 4; + bp = 1; goto invalid_fld; /* LOEJ bit set not supported */ - if (((cdb[4] >> 4) & 0xf) != 0) + } + if (((cdb[4] >> 4) & 0xf) != 0) { + fp = 4; + bp = 3; goto invalid_fld; /* power conditions not supported */ + } if (cdb[4] & 0x1) { tf->nsect = 1; /* 1 sector, lba=0 */ @@ -1401,8 +1477,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) return 0; invalid_fld: - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); - /* "Invalid field in cbd" */ + ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); return 1; skip: scmd->result = SAM_STAT_GOOD; @@ -1553,20 +1628,27 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) const u8 *cdb = scmd->cmnd; u64 block; u32 n_block; + u16 fp; tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->protocol = ATA_PROT_NODATA; if (cdb[0] == VERIFY) { - if (scmd->cmd_len < 10) + if (scmd->cmd_len < 10) { + fp = 9; goto invalid_fld; + } scsi_10_lba_len(cdb, &block, &n_block); } else if (cdb[0] == VERIFY_16) { - if (scmd->cmd_len < 16) + if (scmd->cmd_len < 16) { + fp = 15; goto invalid_fld; + } scsi_16_lba_len(cdb, &block, &n_block); - } else + } else { + fp = 0; goto invalid_fld; + } if (!n_block) goto nothing_to_do; @@ -1640,12 +1722,11 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) return 0; invalid_fld: - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); - /* "Invalid field in cbd" */ + ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); return 1; out_of_range: - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); + ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); /* "Logical Block Address out of range" */ return 1; @@ -1680,6 +1761,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) u64 block; u32 n_block; int rc; + u16 fp = 0; if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16) tf_flags |= ATA_TFLAG_WRITE; @@ -1688,16 +1770,20 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) switch (cdb[0]) { case READ_10: case WRITE_10: - if (unlikely(scmd->cmd_len < 10)) + if (unlikely(scmd->cmd_len < 10)) { + fp = 9; goto invalid_fld; + } scsi_10_lba_len(cdb, &block, &n_block); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; break; case READ_6: case WRITE_6: - if (unlikely(scmd->cmd_len < 6)) + if (unlikely(scmd->cmd_len < 6)) { + fp = 5; goto invalid_fld; + } scsi_6_lba_len(cdb, &block, &n_block); /* for 6-byte r/w commands, transfer length 0 @@ -1708,14 +1794,17 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) break; case READ_16: case WRITE_16: - if (unlikely(scmd->cmd_len < 16)) + if (unlikely(scmd->cmd_len < 16)) { + fp = 15; goto invalid_fld; + } scsi_16_lba_len(cdb, &block, &n_block); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; break; default: DPRINTK("no-byte command\n"); + fp = 0; goto invalid_fld; } @@ -1742,12 +1831,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) goto out_of_range; /* treat all other errors as -EINVAL, fall through */ invalid_fld: - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); - /* "Invalid field in cbd" */ + ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); return 1; out_of_range: - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); + ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); /* "Logical Block Address out of range" */ return 1; @@ -1784,6 +1872,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && ((cdb[2] & 0x20) || need_sense)) ata_gen_passthru_sense(qc); + else if (qc->flags & ATA_QCFLAG_SENSE_VALID) + cmd->result = SAM_STAT_CHECK_CONDITION; else if (need_sense) ata_gen_ata_sense(qc); else @@ -1992,14 +2082,14 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 0x00, 0xA0, /* SAM-5 (no version claimed) */ - 0x04, - 0xC0, /* SBC-3 (no version claimed) */ + 0x06, + 0x00, /* SBC-4 (no version claimed) */ - 0x04, - 0x60, /* SPC-4 (no version claimed) */ + 0x05, + 0xC0, /* SPC-5 (no version claimed) */ 0x60, - 0x20, /* ZBC (no version claimed) */ + 0x24, /* ZBC r05 */ }; u8 hdr[] = { @@ -2019,10 +2109,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) hdr[1] |= (1 << 7); - if (args->dev->class == ATA_DEV_ZAC) { + if (args->dev->class == ATA_DEV_ZAC) hdr[0] = TYPE_ZBC; - hdr[2] = 0x6; /* ZBC is defined in SPC-4 */ - } memcpy(rbuf, hdr, sizeof(hdr)); memcpy(&rbuf[8], "ATA ", 8); @@ -2036,7 +2124,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) if (rbuf[32] == 0 || rbuf[32] == ' ') memcpy(&rbuf[32], "n/a ", 4); - if (args->dev->class == ATA_DEV_ZAC) + if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC) memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc)); else memcpy(rbuf + 58, versions, sizeof(versions)); @@ -2056,6 +2144,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) */ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) { + int num_pages; const u8 pages[] = { 0x00, /* page 0x00, this page */ 0x80, /* page 0x80, unit serial no page */ @@ -2064,10 +2153,14 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) 0xb0, /* page 0xb0, block limits page */ 0xb1, /* page 0xb1, block device characteristics page */ 0xb2, /* page 0xb2, thin provisioning page */ + 0xb6, /* page 0xb6, zoned block device characteristics */ }; - rbuf[3] = sizeof(pages); /* number of supported VPD pages */ - memcpy(rbuf + 4, pages, sizeof(pages)); + num_pages = sizeof(pages); + if (!(args->dev->flags & ATA_DFLAG_ZAC)) + num_pages--; + rbuf[3] = num_pages; /* number of supported VPD pages */ + memcpy(rbuf + 4, pages, num_pages); return 0; } @@ -2232,12 +2325,15 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf) { int form_factor = ata_id_form_factor(args->id); int media_rotation_rate = ata_id_rotation_rate(args->id); + u8 zoned = ata_id_zoned_cap(args->id); rbuf[1] = 0xb1; rbuf[3] = 0x3c; rbuf[4] = media_rotation_rate >> 8; rbuf[5] = media_rotation_rate; rbuf[7] = form_factor; + if (zoned) + rbuf[8] = (zoned << 4); return 0; } @@ -2252,6 +2348,26 @@ static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf) return 0; } +static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf) +{ + /* + * zbc-r05 SCSI Zoned Block device characteristics VPD page + */ + rbuf[1] = 0xb6; + rbuf[3] = 0x3C; + + /* + * URSWRZ bit is only meaningful for host-managed ZAC drives + */ + if (args->dev->zac_zoned_cap & 1) + rbuf[4] |= 1; + put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]); + put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]); + put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]); + + return 0; +} + /** * ata_scsiop_noop - Command handler that simply returns success. * @args: device IDENTIFY data / SCSI command of interest. @@ -2317,6 +2433,7 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) /** * ata_msense_ctl_mode - Simulate MODE SENSE control mode page + * @dev: ATA device of interest * @buf: output buffer * @changeable: whether changeable parameters are requested * @@ -2325,9 +2442,12 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) * LOCKING: * None. */ -static unsigned int ata_msense_ctl_mode(u8 *buf, bool changeable) +static unsigned int ata_msense_ctl_mode(struct ata_device *dev, u8 *buf, + bool changeable) { modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable); + if (changeable && (dev->flags & ATA_DFLAG_D_SENSE)) + buf[2] |= (1 << 2); /* Descriptor sense requested */ return sizeof(def_control_mpage); } @@ -2395,7 +2515,8 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) }; u8 pg, spg; unsigned int ebd, page_control, six_byte; - u8 dpofua; + u8 dpofua, bp = 0xff; + u16 fp; VPRINTK("ENTER\n"); @@ -2414,6 +2535,8 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) case 3: /* saved */ goto saving_not_supp; default: + fp = 2; + bp = 6; goto invalid_fld; } @@ -2428,8 +2551,10 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) * No mode subpages supported (yet) but asking for _all_ * subpages may be valid */ - if (spg && (spg != ALL_SUB_MPAGES)) + if (spg && (spg != ALL_SUB_MPAGES)) { + fp = 3; goto invalid_fld; + } switch(pg) { case RW_RECOVERY_MPAGE: @@ -2441,16 +2566,17 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) break; case CONTROL_MPAGE: - p += ata_msense_ctl_mode(p, page_control == 1); + p += ata_msense_ctl_mode(args->dev, p, page_control == 1); break; case ALL_MPAGES: p += ata_msense_rw_recovery(p, page_control == 1); p += ata_msense_caching(args->id, p, page_control == 1); - p += ata_msense_ctl_mode(p, page_control == 1); + p += ata_msense_ctl_mode(args->dev, p, page_control == 1); break; default: /* invalid page code */ + fp = 2; goto invalid_fld; } @@ -2480,12 +2606,11 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) return 0; invalid_fld: - ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0); - /* "Invalid field in cbd" */ + ata_scsi_set_invalid_field(dev, args->cmd, fp, bp); return 1; saving_not_supp: - ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); + ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); /* "Saving parameters not supported" */ return 1; } @@ -2561,6 +2686,9 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[14] |= 0x40; /* LBPRZ */ } } + if (ata_id_zoned_cap(args->id) || + args->dev->class == ATA_DEV_ZAC) + rbuf[12] = (1 << 4); /* RC_BASIS */ } return 0; } @@ -2942,9 +3070,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) struct scsi_cmnd *scmd = qc->scsicmd; struct ata_device *dev = qc->dev; const u8 *cdb = scmd->cmnd; + u16 fp; - if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) + if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) { + fp = 1; goto invalid_fld; + } /* enable LBA */ tf->flags |= ATA_TFLAG_LBA; @@ -3008,8 +3139,10 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) case ATA_CMD_READ_LONG_ONCE: case ATA_CMD_WRITE_LONG: case ATA_CMD_WRITE_LONG_ONCE: - if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) + if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) { + fp = 1; goto invalid_fld; + } qc->sect_size = scsi_bufflen(scmd); break; @@ -3072,12 +3205,16 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) ata_qc_set_pc_nbytes(qc); /* We may not issue DMA commands if no DMA mode is set */ - if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) + if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) { + fp = 1; goto invalid_fld; + } /* sanity check for pio multi commands */ - if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) + if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { + fp = 1; goto invalid_fld; + } if (is_multi_taskfile(tf)) { unsigned int multi_count = 1 << (cdb[1] >> 5); @@ -3098,8 +3235,10 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * ->set_dmamode(), and ->post_set_mode() hooks). */ if (tf->command == ATA_CMD_SET_FEATURES && - tf->feature == SETFEATURES_XFER) + tf->feature == SETFEATURES_XFER) { + fp = (cdb[0] == ATA_16) ? 4 : 3; goto invalid_fld; + } /* * Filter TPM commands by default. These provide an @@ -3116,14 +3255,15 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * so that we comply with the TC consortium stated goal that the user * can turn off TC features of their system. */ - if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) + if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) { + fp = (cdb[0] == ATA_16) ? 14 : 9; goto invalid_fld; + } return 0; invalid_fld: - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00); - /* "Invalid field in cdb" */ + ata_scsi_set_invalid_field(dev, scmd, fp, 0xff); return 1; } @@ -3137,25 +3277,32 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) u32 n_block; u32 size; void *buf; + u16 fp; + u8 bp = 0xff; /* we may not issue DMA commands if no DMA mode is set */ if (unlikely(!dev->dma_mode)) - goto invalid_fld; + goto invalid_opcode; - if (unlikely(scmd->cmd_len < 16)) + if (unlikely(scmd->cmd_len < 16)) { + fp = 15; goto invalid_fld; + } scsi_16_lba_len(cdb, &block, &n_block); /* for now we only support WRITE SAME with the unmap bit set */ - if (unlikely(!(cdb[1] & 0x8))) + if (unlikely(!(cdb[1] & 0x8))) { + fp = 1; + bp = 3; goto invalid_fld; + } /* * WRITE SAME always has a sector sized buffer as payload, this * should never be a multiple entry S/G list. */ if (!scsi_sg_count(scmd)) - goto invalid_fld; + goto invalid_param_len; buf = page_address(sg_page(scsi_sglist(scmd))); size = ata_set_lba_range_entries(buf, 512, block, n_block); @@ -3186,9 +3333,242 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) return 0; +invalid_fld: + ata_scsi_set_invalid_field(dev, scmd, fp, bp); + return 1; +invalid_param_len: + /* "Parameter list length error" */ + ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); + return 1; +invalid_opcode: + /* "Invalid command operation code" */ + ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0); + return 1; +} + +/** + * ata_scsi_report_zones_complete - convert ATA output + * @qc: command structure returning the data + * + * Convert T-13 little-endian field representation into + * T-10 big-endian field representation. + * What a mess. + */ +static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *scmd = qc->scsicmd; + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int bytes = 0; + + sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd), + SG_MITER_TO_SG | SG_MITER_ATOMIC); + + local_irq_save(flags); + while (sg_miter_next(&miter)) { + unsigned int offset = 0; + + if (bytes == 0) { + char *hdr; + u32 list_length; + u64 max_lba, opt_lba; + u16 same; + + /* Swizzle header */ + hdr = miter.addr; + list_length = get_unaligned_le32(&hdr[0]); + same = get_unaligned_le16(&hdr[4]); + max_lba = get_unaligned_le64(&hdr[8]); + opt_lba = get_unaligned_le64(&hdr[16]); + put_unaligned_be32(list_length, &hdr[0]); + hdr[4] = same & 0xf; + put_unaligned_be64(max_lba, &hdr[8]); + put_unaligned_be64(opt_lba, &hdr[16]); + offset += 64; + bytes += 64; + } + while (offset < miter.length) { + char *rec; + u8 cond, type, non_seq, reset; + u64 size, start, wp; + + /* Swizzle zone descriptor */ + rec = miter.addr + offset; + type = rec[0] & 0xf; + cond = (rec[1] >> 4) & 0xf; + non_seq = (rec[1] & 2); + reset = (rec[1] & 1); + size = get_unaligned_le64(&rec[8]); + start = get_unaligned_le64(&rec[16]); + wp = get_unaligned_le64(&rec[24]); + rec[0] = type; + rec[1] = (cond << 4) | non_seq | reset; + put_unaligned_be64(size, &rec[8]); + put_unaligned_be64(start, &rec[16]); + put_unaligned_be64(wp, &rec[24]); + WARN_ON(offset + 64 > miter.length); + offset += 64; + bytes += 64; + } + } + sg_miter_stop(&miter); + local_irq_restore(flags); + + ata_scsi_qc_complete(qc); +} + +static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc) +{ + struct ata_taskfile *tf = &qc->tf; + struct scsi_cmnd *scmd = qc->scsicmd; + const u8 *cdb = scmd->cmnd; + u16 sect, fp = (u16)-1; + u8 sa, options, bp = 0xff; + u64 block; + u32 n_block; + + if (unlikely(scmd->cmd_len < 16)) { + ata_dev_warn(qc->dev, "invalid cdb length %d\n", + scmd->cmd_len); + fp = 15; + goto invalid_fld; + } + scsi_16_lba_len(cdb, &block, &n_block); + if (n_block != scsi_bufflen(scmd)) { + ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n", + n_block, scsi_bufflen(scmd)); + goto invalid_param_len; + } + sa = cdb[1] & 0x1f; + if (sa != ZI_REPORT_ZONES) { + ata_dev_warn(qc->dev, "invalid service action %d\n", sa); + fp = 1; + goto invalid_fld; + } + /* + * ZAC allows only for transfers in 512 byte blocks, + * and uses a 16 bit value for the transfer count. + */ + if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) { + ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block); + goto invalid_param_len; + } + sect = n_block / 512; + options = cdb[14]; + + if (ata_ncq_enabled(qc->dev) && + ata_fpdma_zac_mgmt_in_supported(qc->dev)) { + tf->protocol = ATA_PROT_NCQ; + tf->command = ATA_CMD_FPDMA_RECV; + tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; + tf->nsect = qc->tag << 3; + tf->feature = sect & 0xff; + tf->hob_feature = (sect >> 8) & 0xff; + tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; + } else { + tf->command = ATA_CMD_ZAC_MGMT_IN; + tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; + tf->protocol = ATA_PROT_DMA; + tf->hob_feature = options; + tf->hob_nsect = (sect >> 8) & 0xff; + tf->nsect = sect & 0xff; + } + tf->device = ATA_LBA; + tf->lbah = (block >> 16) & 0xff; + tf->lbam = (block >> 8) & 0xff; + tf->lbal = block & 0xff; + tf->hob_lbah = (block >> 40) & 0xff; + tf->hob_lbam = (block >> 32) & 0xff; + tf->hob_lbal = (block >> 24) & 0xff; + + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; + qc->flags |= ATA_QCFLAG_RESULT_TF; + + ata_qc_set_pc_nbytes(qc); + + qc->complete_fn = ata_scsi_report_zones_complete; + + return 0; + +invalid_fld: + ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); + return 1; + +invalid_param_len: + /* "Parameter list length error" */ + ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); + return 1; +} + +static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) +{ + struct ata_taskfile *tf = &qc->tf; + struct scsi_cmnd *scmd = qc->scsicmd; + struct ata_device *dev = qc->dev; + const u8 *cdb = scmd->cmnd; + u8 reset_all, sa; + u64 block; + u32 n_block; + u16 fp = (u16)-1; + + if (unlikely(scmd->cmd_len < 16)) { + fp = 15; + goto invalid_fld; + } + + sa = cdb[1] & 0x1f; + if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) && + (sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) { + fp = 1; + goto invalid_fld; + } + + scsi_16_lba_len(cdb, &block, &n_block); + if (n_block) { + /* + * ZAC MANAGEMENT OUT doesn't define any length + */ + goto invalid_param_len; + } + if (block > dev->n_sectors) + goto out_of_range; + + reset_all = cdb[14] & 0x1; + + if (ata_ncq_enabled(qc->dev) && + ata_fpdma_zac_mgmt_out_supported(qc->dev)) { + tf->protocol = ATA_PROT_NCQ; + tf->command = ATA_CMD_NCQ_NON_DATA; + tf->hob_nsect = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; + tf->nsect = qc->tag << 3; + tf->auxiliary = sa | (reset_all & 0x1) << 8; + } else { + tf->protocol = ATA_PROT_NODATA; + tf->command = ATA_CMD_ZAC_MGMT_OUT; + tf->feature = sa; + tf->hob_feature = reset_all & 0x1; + } + tf->lbah = (block >> 16) & 0xff; + tf->lbam = (block >> 8) & 0xff; + tf->lbal = block & 0xff; + tf->hob_lbah = (block >> 40) & 0xff; + tf->hob_lbam = (block >> 32) & 0xff; + tf->hob_lbal = (block >> 24) & 0xff; + tf->device = ATA_LBA; + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; + + return 0; + invalid_fld: - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00); - /* "Invalid field in cdb" */ + ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); + return 1; + out_of_range: + /* "Logical Block Address out of range" */ + ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00); + return 1; +invalid_param_len: + /* "Parameter list length error" */ + ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); return 1; } @@ -3197,6 +3577,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) * @qc: Storage for translated ATA taskfile * @buf: input buffer * @len: number of valid bytes in the input buffer + * @fp: out parameter for the failed field on error * * Prepare a taskfile to modify caching information for the device. * @@ -3204,20 +3585,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) * None. */ static int ata_mselect_caching(struct ata_queued_cmd *qc, - const u8 *buf, int len) + const u8 *buf, int len, u16 *fp) { struct ata_taskfile *tf = &qc->tf; struct ata_device *dev = qc->dev; char mpage[CACHE_MPAGE_LEN]; u8 wce; + int i; /* * The first two bytes of def_cache_mpage are a header, so offsets * in mpage are off by 2 compared to buf. Same for len. */ - if (len != CACHE_MPAGE_LEN - 2) + if (len != CACHE_MPAGE_LEN - 2) { + if (len < CACHE_MPAGE_LEN - 2) + *fp = len; + else + *fp = CACHE_MPAGE_LEN - 2; return -EINVAL; + } wce = buf[0] & (1 << 2); @@ -3225,10 +3612,14 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc, * Check that read-only bits are not modified. */ ata_msense_caching(dev->id, mpage, false); - mpage[2] &= ~(1 << 2); - mpage[2] |= wce; - if (memcmp(mpage + 2, buf, CACHE_MPAGE_LEN - 2) != 0) - return -EINVAL; + for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) { + if (i == 0) + continue; + if (mpage[i + 2] != buf[i]) { + *fp = i; + return -EINVAL; + } + } tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; tf->protocol = ATA_PROT_NODATA; @@ -3239,6 +3630,62 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc, } /** + * ata_mselect_control - Simulate MODE SELECT for control page + * @qc: Storage for translated ATA taskfile + * @buf: input buffer + * @len: number of valid bytes in the input buffer + * @fp: out parameter for the failed field on error + * + * Prepare a taskfile to modify caching information for the device. + * + * LOCKING: + * None. + */ +static int ata_mselect_control(struct ata_queued_cmd *qc, + const u8 *buf, int len, u16 *fp) +{ + struct ata_device *dev = qc->dev; + char mpage[CONTROL_MPAGE_LEN]; + u8 d_sense; + int i; + + /* + * The first two bytes of def_control_mpage are a header, so offsets + * in mpage are off by 2 compared to buf. Same for len. + */ + + if (len != CONTROL_MPAGE_LEN - 2) { + if (len < CONTROL_MPAGE_LEN - 2) + *fp = len; + else + *fp = CONTROL_MPAGE_LEN - 2; + return -EINVAL; + } + + d_sense = buf[0] & (1 << 2); + + /* + * Check that read-only bits are not modified. + */ + ata_msense_ctl_mode(dev, mpage, false); + for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { + if (i == 0) + continue; + if (mpage[2 + i] != buf[i]) { + *fp = i; + return -EINVAL; + } + } + if (d_sense & (1 << 2)) + dev->flags |= ATA_DFLAG_D_SENSE; + else + dev->flags &= ~ATA_DFLAG_D_SENSE; + qc->scsicmd->result = SAM_STAT_GOOD; + qc->scsicmd->scsi_done(qc->scsicmd); + return 0; +} + +/** * ata_scsiop_mode_select - Simulate MODE SELECT 6, 10 commands * @qc: Storage for translated ATA taskfile * @@ -3257,27 +3704,36 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) u8 pg, spg; unsigned six_byte, pg_len, hdr_len, bd_len; int len; + u16 fp = (u16)-1; + u8 bp = 0xff; VPRINTK("ENTER\n"); six_byte = (cdb[0] == MODE_SELECT); if (six_byte) { - if (scmd->cmd_len < 5) + if (scmd->cmd_len < 5) { + fp = 4; goto invalid_fld; + } len = cdb[4]; hdr_len = 4; } else { - if (scmd->cmd_len < 9) + if (scmd->cmd_len < 9) { + fp = 8; goto invalid_fld; + } len = (cdb[7] << 8) + cdb[8]; hdr_len = 8; } /* We only support PF=1, SP=0. */ - if ((cdb[1] & 0x11) != 0x10) + if ((cdb[1] & 0x11) != 0x10) { + fp = 1; + bp = (cdb[1] & 0x01) ? 1 : 5; goto invalid_fld; + } /* Test early for possible overrun. */ if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) @@ -3298,8 +3754,11 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) p += hdr_len; if (len < bd_len) goto invalid_param_len; - if (bd_len != 0 && bd_len != 8) + if (bd_len != 0 && bd_len != 8) { + fp = (six_byte) ? 3 : 6; + fp += bd_len + hdr_len; goto invalid_param; + } len -= bd_len; p += bd_len; @@ -3330,18 +3789,29 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) * No mode subpages supported (yet) but asking for _all_ * subpages may be valid */ - if (spg && (spg != ALL_SUB_MPAGES)) + if (spg && (spg != ALL_SUB_MPAGES)) { + fp = (p[0] & 0x40) ? 1 : 0; + fp += hdr_len + bd_len; goto invalid_param; + } if (pg_len > len) goto invalid_param_len; switch (pg) { case CACHE_MPAGE: - if (ata_mselect_caching(qc, p, pg_len) < 0) + if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) { + fp += hdr_len + bd_len; goto invalid_param; + } + break; + case CONTROL_MPAGE: + if (ata_mselect_control(qc, p, pg_len, &fp) < 0) { + fp += hdr_len + bd_len; + goto invalid_param; + } break; - default: /* invalid page code */ + fp = bd_len + hdr_len; goto invalid_param; } @@ -3355,18 +3825,16 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) return 0; invalid_fld: - /* "Invalid field in CDB" */ - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); + ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); return 1; invalid_param: - /* "Invalid field in parameter list" */ - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x26, 0x0); + ata_scsi_set_invalid_parameter(qc->dev, scmd, fp); return 1; invalid_param_len: /* "Parameter list length error" */ - ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x1a, 0x0); + ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); return 1; skip: @@ -3419,6 +3887,12 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) return ata_scsi_mode_select_xlat; break; + case ZBC_IN: + return ata_scsi_zbc_in_xlat; + + case ZBC_OUT: + return ata_scsi_zbc_out_xlat; + case START_STOP: return ata_scsi_start_stop_xlat; } @@ -3439,14 +3913,11 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, { #ifdef ATA_DEBUG struct scsi_device *scsidev = cmd->device; - u8 *scsicmd = cmd->cmnd; - DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + DPRINTK("CDB (%u:%d,%d,%d) %9ph\n", ap->print_id, scsidev->channel, scsidev->id, scsidev->lun, - scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], - scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], - scsicmd[8]); + cmd->cmnd); #endif } @@ -3570,12 +4041,12 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) switch(scsicmd[0]) { /* TODO: worth improving? */ case FORMAT_UNIT: - ata_scsi_invalid_field(cmd); + ata_scsi_invalid_field(dev, cmd, 0); break; case INQUIRY: - if (scsicmd[1] & 2) /* is CmdDt set? */ - ata_scsi_invalid_field(cmd); + if (scsicmd[1] & 2) /* is CmdDt set? */ + ata_scsi_invalid_field(dev, cmd, 1); else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); else switch (scsicmd[2]) { @@ -3600,8 +4071,14 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) case 0xb2: ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2); break; + case 0xb6: + if (dev->flags & ATA_DFLAG_ZAC) { + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6); + break; + } + /* Fallthrough */ default: - ata_scsi_invalid_field(cmd); + ata_scsi_invalid_field(dev, cmd, 2); break; } break; @@ -3619,7 +4096,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); else - ata_scsi_invalid_field(cmd); + ata_scsi_invalid_field(dev, cmd, 1); break; case REPORT_LUNS: @@ -3627,7 +4104,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) break; case REQUEST_SENSE: - ata_scsi_set_sense(cmd, 0, 0, 0); + ata_scsi_set_sense(dev, cmd, 0, 0, 0); cmd->result = (DRIVER_SENSE << 24); cmd->scsi_done(cmd); break; @@ -3651,12 +4128,12 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) ata_scsi_rbuf_fill(&args, ata_scsiop_noop); else - ata_scsi_invalid_field(cmd); + ata_scsi_invalid_field(dev, cmd, 1); break; /* all other commands */ default: - ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); + ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0); /* "Invalid command operation code" */ cmd->scsi_done(cmd); break; diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c index fd30b8c10..f8c550df0 100644 --- a/drivers/ata/libata-trace.c +++ b/drivers/ata/libata-trace.c @@ -149,3 +149,75 @@ libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags) return ret; } + +const char * +libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd, + unsigned char feature, unsigned char hob_nsect) +{ + const char *ret = trace_seq_buffer_ptr(p); + + switch (cmd) { + case ATA_CMD_FPDMA_RECV: + switch (hob_nsect & 0x5f) { + case ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT: + trace_seq_printf(p, " READ_LOG_DMA_EXT"); + break; + case ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN: + trace_seq_printf(p, " ZAC_MGMT_IN"); + break; + } + break; + case ATA_CMD_FPDMA_SEND: + switch (hob_nsect & 0x5f) { + case ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT: + trace_seq_printf(p, " WRITE_LOG_DMA_EXT"); + break; + case ATA_SUBCMD_FPDMA_SEND_DSM: + trace_seq_printf(p, " DATASET_MANAGEMENT"); + break; + } + break; + case ATA_CMD_NCQ_NON_DATA: + switch (feature) { + case ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE: + trace_seq_printf(p, " ABORT_QUEUE"); + break; + case ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES: + trace_seq_printf(p, " SET_FEATURES"); + break; + case ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT: + trace_seq_printf(p, " ZERO_EXT"); + break; + case ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT: + trace_seq_printf(p, " ZAC_MGMT_OUT"); + break; + } + break; + case ATA_CMD_ZAC_MGMT_IN: + switch (feature) { + case ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES: + trace_seq_printf(p, " REPORT_ZONES"); + break; + } + break; + case ATA_CMD_ZAC_MGMT_OUT: + switch (feature) { + case ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE: + trace_seq_printf(p, " CLOSE_ZONE"); + break; + case ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE: + trace_seq_printf(p, " FINISH_ZONE"); + break; + case ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE: + trace_seq_printf(p, " OPEN_ZONE"); + break; + case ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER: + trace_seq_printf(p, " RESET_WRITE_POINTER"); + break; + } + break; + } + trace_seq_putc(p, 0); + + return ret; +} diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index f840ca18a..3b301a480 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -67,7 +67,8 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag); extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, u64 block, u32 n_block, unsigned int tf_flags, unsigned int tag); -extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); +extern u64 ata_tf_read_block(const struct ata_taskfile *tf, + struct ata_device *dev); extern unsigned ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf, const u8 *cdb, int dma_dir, void *buf, unsigned int buflen, @@ -137,6 +138,11 @@ extern int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht); extern void ata_scsi_scan_host(struct ata_port *ap, int sync); extern int ata_scsi_offline_dev(struct ata_device *dev); +extern void ata_scsi_set_sense(struct ata_device *dev, + struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq); +extern void ata_scsi_set_sense_information(struct ata_device *dev, + struct scsi_cmnd *cmd, + const struct ata_taskfile *tf); extern void ata_scsi_media_change_notify(struct ata_device *dev); extern void ata_scsi_hotplug(struct work_struct *work); extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index d7c732042..188f2f2eb 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -294,7 +294,7 @@ static int icside_dma_init(struct pata_icside_info *info) static struct scsi_host_template pata_icside_sht = { ATA_BASE_SHT(DRV_NAME), - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, + .sg_tablesize = SG_MAX_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, }; diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 902034991..00c2af1d2 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -30,10 +30,12 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/dmaengine.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/phy/phy.h> #include <linux/libata.h> #include <linux/slab.h> @@ -42,10 +44,6 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> -/* Supported DMA engine drivers */ -#include <linux/platform_data/dma-dw.h> -#include <linux/dma/dw.h> - /* These two are defined in "libata.h" */ #undef DRV_NAME #undef DRV_VERSION @@ -53,19 +51,14 @@ #define DRV_NAME "sata-dwc" #define DRV_VERSION "1.3" -#ifndef out_le32 -#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a)) -#endif - -#ifndef in_le32 -#define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a))) -#endif +#define sata_dwc_writel(a, v) writel_relaxed(v, a) +#define sata_dwc_readl(a) readl_relaxed(a) #ifndef NO_IRQ #define NO_IRQ 0 #endif -#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/ +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ enum { SATA_DWC_MAX_PORTS = 1, @@ -102,7 +95,7 @@ struct sata_dwc_regs { u32 versionr; /* Version Register */ u32 idr; /* ID Register */ u32 unimpl[192]; /* Unimplemented */ - u32 dmadr[256]; /* FIFO Locations in DMA Mode */ + u32 dmadr[256]; /* FIFO Locations in DMA Mode */ }; enum { @@ -146,9 +139,14 @@ struct sata_dwc_device { struct device *dev; /* generic device struct */ struct ata_probe_ent *pe; /* ptr to probe-ent */ struct ata_host *host; - u8 __iomem *reg_base; - struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ + struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */ + u32 sactive_issued; + u32 sactive_queued; + struct phy *phy; + phys_addr_t dmadr; +#ifdef CONFIG_SATA_DWC_OLD_DMA struct dw_dma_chip *dma; +#endif }; #define SATA_DWC_QCMD_MAX 32 @@ -159,25 +157,19 @@ struct sata_dwc_device_port { int dma_pending[SATA_DWC_QCMD_MAX]; /* DMA info */ - struct dw_dma_slave *dws; struct dma_chan *chan; struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX]; u32 dma_interrupt_count; }; /* - * Commonly used DWC SATA driver Macros + * Commonly used DWC SATA driver macros */ -#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\ - (host)->private_data) -#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\ - (ap)->host->private_data) -#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\ - (ap)->private_data) -#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\ - (qc)->ap->host->private_data) -#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\ - (hsdevp)->hsdev) +#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data) +#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data) +#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data) +#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data) +#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev) enum { SATA_DWC_CMD_ISSUED_NOT = 0, @@ -190,21 +182,6 @@ enum { SATA_DWC_DMA_PENDING_RX = 2, }; -struct sata_dwc_host_priv { - void __iomem *scr_addr_sstatus; - u32 sata_dwc_sactive_issued ; - u32 sata_dwc_sactive_queued ; -}; - -static struct sata_dwc_host_priv host_pvt; - -static struct dw_dma_slave sata_dwc_dma_dws = { - .src_id = 0, - .dst_id = 0, - .src_master = 0, - .dst_master = 1, -}; - /* * Prototypes */ @@ -215,6 +192,93 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); static void sata_dwc_port_stop(struct ata_port *ap); static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); +#ifdef CONFIG_SATA_DWC_OLD_DMA + +#include <linux/platform_data/dma-dw.h> +#include <linux/dma/dw.h> + +static struct dw_dma_slave sata_dwc_dma_dws = { + .src_id = 0, + .dst_id = 0, + .m_master = 1, + .p_master = 0, +}; + +static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_slave *dws = &sata_dwc_dma_dws; + + if (dws->dma_dev != chan->device->dev) + return false; + + chan->private = dws; + return true; +} + +static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp) +{ + struct sata_dwc_device *hsdev = hsdevp->hsdev; + struct dw_dma_slave *dws = &sata_dwc_dma_dws; + dma_cap_mask_t mask; + + dws->dma_dev = hsdev->dev; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* Acquire DMA channel */ + hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); + if (!hsdevp->chan) { + dev_err(hsdev->dev, "%s: dma channel unavailable\n", + __func__); + return -EAGAIN; + } + + return 0; +} + +static int sata_dwc_dma_init_old(struct platform_device *pdev, + struct sata_dwc_device *hsdev) +{ + struct device_node *np = pdev->dev.of_node; + struct resource *res; + + hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL); + if (!hsdev->dma) + return -ENOMEM; + + hsdev->dma->dev = &pdev->dev; + + /* Get SATA DMA interrupt number */ + hsdev->dma->irq = irq_of_parse_and_map(np, 1); + if (hsdev->dma->irq == NO_IRQ) { + dev_err(&pdev->dev, "no SATA DMA irq\n"); + return -ENODEV; + } + + /* Get physical SATA DMA register base address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(hsdev->dma->regs)) { + dev_err(&pdev->dev, + "ioremap failed for AHBDMA register address\n"); + return PTR_ERR(hsdev->dma->regs); + } + + /* Initialize AHB DMAC */ + return dw_dma_probe(hsdev->dma); +} + +static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev) +{ + if (!hsdev->dma) + return; + + dw_dma_remove(hsdev->dma); +} + +#endif + static const char *get_prot_descript(u8 protocol) { switch ((enum ata_tf_protocols)protocol) { @@ -305,21 +369,20 @@ static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd struct ata_port *ap = qc->ap; struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); - dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr; struct dma_slave_config sconf; struct dma_async_tx_descriptor *desc; if (qc->dma_dir == DMA_DEV_TO_MEM) { - sconf.src_addr = addr; - sconf.device_fc = true; + sconf.src_addr = hsdev->dmadr; + sconf.device_fc = false; } else { /* DMA_MEM_TO_DEV */ - sconf.dst_addr = addr; + sconf.dst_addr = hsdev->dmadr; sconf.device_fc = false; } sconf.direction = qc->dma_dir; - sconf.src_maxburst = AHB_DMA_BRST_DFLT; - sconf.dst_maxburst = AHB_DMA_BRST_DFLT; + sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ + sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -336,8 +399,8 @@ static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd desc->callback = dma_dwc_xfer_done; desc->callback_param = hsdev; - dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pad\n", - __func__, qc->sg, qc->n_elem, &addr); + dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__, + qc->sg, qc->n_elem, &hsdev->dmadr); return desc; } @@ -350,48 +413,38 @@ static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) return -EINVAL; } - *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4)); - dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", - __func__, link->ap->print_id, scr, *val); + *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4)); + dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, + link->ap->print_id, scr, *val); return 0; } static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) { - dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", - __func__, link->ap->print_id, scr, val); + dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, + link->ap->print_id, scr, val); if (scr > SCR_NOTIFICATION) { dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", __func__, scr); return -EINVAL; } - out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val); + sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val); return 0; } -static u32 core_scr_read(unsigned int scr) -{ - return in_le32(host_pvt.scr_addr_sstatus + (scr * 4)); -} - -static void core_scr_write(unsigned int scr, u32 val) -{ - out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val); -} - -static void clear_serror(void) +static void clear_serror(struct ata_port *ap) { u32 val; - val = core_scr_read(SCR_ERROR); - core_scr_write(SCR_ERROR, val); + sata_dwc_scr_read(&ap->link, SCR_ERROR, &val); + sata_dwc_scr_write(&ap->link, SCR_ERROR, val); } static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) { - out_le32(&hsdev->sata_dwc_regs->intpr, - in_le32(&hsdev->sata_dwc_regs->intpr)); + sata_dwc_writel(&hsdev->sata_dwc_regs->intpr, + sata_dwc_readl(&hsdev->sata_dwc_regs->intpr)); } static u32 qcmd_tag_to_mask(u8 tag) @@ -412,7 +465,7 @@ static void sata_dwc_error_intr(struct ata_port *ap, ata_ehi_clear_desc(ehi); - serror = core_scr_read(SCR_ERROR); + sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror); status = ap->ops->sff_check_status(ap); tag = ap->link.active_tag; @@ -423,7 +476,7 @@ static void sata_dwc_error_intr(struct ata_port *ap, hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]); /* Clear error register and interrupt bit */ - clear_serror(); + clear_serror(ap); clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); /* This is the only error happening now. TODO check for exact error */ @@ -462,12 +515,12 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) int handled, num_processed, port = 0; uint intpr, sactive, sactive2, tag_mask; struct sata_dwc_device_port *hsdevp; - host_pvt.sata_dwc_sactive_issued = 0; + hsdev->sactive_issued = 0; spin_lock_irqsave(&host->lock, flags); /* Read the interrupt register */ - intpr = in_le32(&hsdev->sata_dwc_regs->intpr); + intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr); ap = host->ports[port]; hsdevp = HSDEVP_FROM_AP(ap); @@ -486,12 +539,12 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) if (intpr & SATA_DWC_INTPR_NEWFP) { clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); - tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr)); + tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr)); dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); - host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag); + hsdev->sactive_issued |= qcmd_tag_to_mask(tag); qc = ata_qc_from_tag(ap, tag); /* @@ -505,11 +558,11 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) handled = 1; goto DONE; } - sactive = core_scr_read(SCR_ACTIVE); - tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); + tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; /* If no sactive issued and tag_mask is zero then this is not NCQ */ - if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) { + if (hsdev->sactive_issued == 0 && tag_mask == 0) { if (ap->link.active_tag == ATA_TAG_POISON) tag = 0; else @@ -579,22 +632,19 @@ DRVSTILLBUSY: */ /* process completed commands */ - sactive = core_scr_read(SCR_ACTIVE); - tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive; + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); + tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; - if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \ - tag_mask > 1) { + if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) { dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", - __func__, sactive, host_pvt.sata_dwc_sactive_issued, - tag_mask); + __func__, sactive, hsdev->sactive_issued, tag_mask); } - if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \ - (host_pvt.sata_dwc_sactive_issued)) { + if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) { dev_warn(ap->dev, - "Bad tag mask? sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask=0x%08x\n", - sactive, host_pvt.sata_dwc_sactive_issued, tag_mask); + "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", + sactive, hsdev->sactive_issued, tag_mask); } /* read just to clear ... not bad if currently still busy */ @@ -656,7 +706,7 @@ STILLBUSY: * we were processing --we read status as part of processing a completed * command). */ - sactive2 = core_scr_read(SCR_ACTIVE); + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2); if (sactive2 != sactive) { dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2=0x%x\n", @@ -672,15 +722,14 @@ DONE: static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) { struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); + u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr); if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { - out_le32(&(hsdev->sata_dwc_regs->dmacr), - SATA_DWC_DMACR_RX_CLEAR( - in_le32(&(hsdev->sata_dwc_regs->dmacr)))); + dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr); + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { - out_le32(&(hsdev->sata_dwc_regs->dmacr), - SATA_DWC_DMACR_TX_CLEAR( - in_le32(&(hsdev->sata_dwc_regs->dmacr)))); + dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr); + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); } else { /* * This should not happen, it indicates the driver is out of @@ -688,10 +737,9 @@ static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) */ dev_err(hsdev->dev, "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n", - __func__, tag, hsdevp->dma_pending[tag], - in_le32(&hsdev->sata_dwc_regs->dmacr)); - out_le32(&(hsdev->sata_dwc_regs->dmacr), - SATA_DWC_DMACR_TXRXCH_CLEAR); + __func__, tag, hsdevp->dma_pending[tag], dmacr); + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); } } @@ -716,7 +764,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) __func__, qc->tag, qc->tf.command, get_dma_dir_descript(qc->dma_dir), get_prot_descript(qc->tf.protocol), - in_le32(&(hsdev->sata_dwc_regs->dmacr))); + sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); } #endif @@ -725,7 +773,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) dev_err(ap->dev, "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n", __func__, - in_le32(&(hsdev->sata_dwc_regs->dmacr))); + sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); } hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; @@ -742,8 +790,9 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, u8 status = 0; u32 mask = 0x0; u8 tag = qc->tag; + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); - host_pvt.sata_dwc_sactive_queued = 0; + hsdev->sactive_queued = 0; dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) @@ -756,10 +805,8 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, /* clear active bit */ mask = (~(qcmd_tag_to_mask(tag))); - host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \ - & mask; - host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \ - & mask; + hsdev->sactive_queued = hsdev->sactive_queued & mask; + hsdev->sactive_issued = hsdev->sactive_issued & mask; ata_qc_complete(qc); return 0; } @@ -767,54 +814,62 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) { /* Enable selective interrupts by setting the interrupt maskregister*/ - out_le32(&hsdev->sata_dwc_regs->intmr, - SATA_DWC_INTMR_ERRM | - SATA_DWC_INTMR_NEWFPM | - SATA_DWC_INTMR_PMABRTM | - SATA_DWC_INTMR_DMATM); + sata_dwc_writel(&hsdev->sata_dwc_regs->intmr, + SATA_DWC_INTMR_ERRM | + SATA_DWC_INTMR_NEWFPM | + SATA_DWC_INTMR_PMABRTM | + SATA_DWC_INTMR_DMATM); /* * Unmask the error bits that should trigger an error interrupt by * setting the error mask register. */ - out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); + sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", - __func__, in_le32(&hsdev->sata_dwc_regs->intmr), - in_le32(&hsdev->sata_dwc_regs->errmr)); + __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr), + sata_dwc_readl(&hsdev->sata_dwc_regs->errmr)); } -static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) +static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base) { - struct sata_dwc_device_port *hsdevp = param; - struct dw_dma_slave *dws = hsdevp->dws; + port->cmd_addr = base + 0x00; + port->data_addr = base + 0x00; - if (dws->dma_dev != chan->device->dev) - return false; + port->error_addr = base + 0x04; + port->feature_addr = base + 0x04; - chan->private = dws; - return true; -} + port->nsect_addr = base + 0x08; -static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) -{ - port->cmd_addr = (void __iomem *)base + 0x00; - port->data_addr = (void __iomem *)base + 0x00; + port->lbal_addr = base + 0x0c; + port->lbam_addr = base + 0x10; + port->lbah_addr = base + 0x14; - port->error_addr = (void __iomem *)base + 0x04; - port->feature_addr = (void __iomem *)base + 0x04; + port->device_addr = base + 0x18; + port->command_addr = base + 0x1c; + port->status_addr = base + 0x1c; - port->nsect_addr = (void __iomem *)base + 0x08; + port->altstatus_addr = base + 0x20; + port->ctl_addr = base + 0x20; +} + +static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp) +{ + struct sata_dwc_device *hsdev = hsdevp->hsdev; + struct device *dev = hsdev->dev; - port->lbal_addr = (void __iomem *)base + 0x0c; - port->lbam_addr = (void __iomem *)base + 0x10; - port->lbah_addr = (void __iomem *)base + 0x14; +#ifdef CONFIG_SATA_DWC_OLD_DMA + if (!of_find_property(dev->of_node, "dmas", NULL)) + return sata_dwc_dma_get_channel_old(hsdevp); +#endif - port->device_addr = (void __iomem *)base + 0x18; - port->command_addr = (void __iomem *)base + 0x1c; - port->status_addr = (void __iomem *)base + 0x1c; + hsdevp->chan = dma_request_chan(dev, "sata-dma"); + if (IS_ERR(hsdevp->chan)) { + dev_err(dev, "failed to allocate dma channel: %ld\n", + PTR_ERR(hsdevp->chan)); + return PTR_ERR(hsdevp->chan); + } - port->altstatus_addr = (void __iomem *)base + 0x20; - port->ctl_addr = (void __iomem *)base + 0x20; + return 0; } /* @@ -829,7 +884,6 @@ static int sata_dwc_port_start(struct ata_port *ap) struct sata_dwc_device *hsdev; struct sata_dwc_device_port *hsdevp = NULL; struct device *pdev; - dma_cap_mask_t mask; int i; hsdev = HSDEV_FROM_AP(ap); @@ -853,20 +907,13 @@ static int sata_dwc_port_start(struct ata_port *ap) } hsdevp->hsdev = hsdev; - hsdevp->dws = &sata_dwc_dma_dws; - hsdevp->dws->dma_dev = hsdev->dev; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); + err = sata_dwc_dma_get_channel(hsdevp); + if (err) + goto CLEANUP_ALLOC; - /* Acquire DMA channel */ - hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); - if (!hsdevp->chan) { - dev_err(hsdev->dev, "%s: dma channel unavailable\n", - __func__); - err = -EAGAIN; + err = phy_power_on(hsdev->phy); + if (err) goto CLEANUP_ALLOC; - } for (i = 0; i < SATA_DWC_QCMD_MAX; i++) hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; @@ -877,18 +924,18 @@ static int sata_dwc_port_start(struct ata_port *ap) if (ap->port_no == 0) { dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", __func__); - out_le32(&hsdev->sata_dwc_regs->dmacr, - SATA_DWC_DMACR_TXRXCH_CLEAR); + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__); - out_le32(&hsdev->sata_dwc_regs->dbtsr, - (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | - SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); + sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); } /* Clear any error bits before libata starts issuing commands */ - clear_serror(); + clear_serror(ap); ap->private_data = hsdevp; dev_dbg(ap->dev, "%s: done\n", __func__); return 0; @@ -903,11 +950,13 @@ CLEANUP: static void sata_dwc_port_stop(struct ata_port *ap) { struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); - dmaengine_terminate_all(hsdevp->chan); + dmaengine_terminate_sync(hsdevp->chan); dma_release_channel(hsdevp->chan); + phy_power_off(hsdev->phy); kfree(hsdevp); ap->private_data = NULL; @@ -924,22 +973,20 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap, struct ata_taskfile *tf, u8 tag, u32 cmd_issued) { - unsigned long flags; struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, ata_get_cmd_descript(tf->command), tag); - spin_lock_irqsave(&ap->host->lock, flags); hsdevp->cmd_issued[tag] = cmd_issued; - spin_unlock_irqrestore(&ap->host->lock, flags); + /* * Clear SError before executing a new command. * sata_dwc_scr_write and read can not be used here. Clearing the PM * managed SError register for the disk needs to be done before the * task file is loaded. */ - clear_serror(); + clear_serror(ap); ata_sff_exec_command(ap, tf); } @@ -992,18 +1039,18 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) sata_dwc_tf_dump(ap, &qc->tf); if (start_dma) { - reg = core_scr_read(SCR_ERROR); + sata_dwc_scr_read(&ap->link, SCR_ERROR, ®); if (reg & SATA_DWC_SERROR_ERR_BITS) { dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", __func__, reg); } if (dir == DMA_TO_DEVICE) - out_le32(&hsdev->sata_dwc_regs->dmacr, - SATA_DWC_DMACR_TXCHEN); + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXCHEN); else - out_le32(&hsdev->sata_dwc_regs->dmacr, - SATA_DWC_DMACR_RXCHEN); + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_RXCHEN); /* Enable AHB DMA transfer on the specified channel */ dmaengine_submit(desc); @@ -1025,36 +1072,12 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) sata_dwc_bmdma_start_by_tag(qc, tag); } -/* - * Function : sata_dwc_qc_prep_by_tag - * arguments : ata_queued_cmd *qc, u8 tag - * Return value : None - * qc_prep for a particular queued command based on tag - */ -static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) -{ - struct dma_async_tx_descriptor *desc; - struct ata_port *ap = qc->ap; - struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); - - dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n", - __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), - qc->n_elem); - - desc = dma_dwc_xfer_setup(qc); - if (!desc) { - dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n", - __func__); - return; - } - hsdevp->desc[tag] = desc; -} - static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) { u32 sactive; u8 tag = qc->tag; struct ata_port *ap = qc->ap; + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); #ifdef DEBUG_NCQ if (qc->tag > 0 || ap->link.sactive > 1) @@ -1068,47 +1091,33 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) if (!ata_is_ncq(qc->tf.protocol)) tag = 0; - sata_dwc_qc_prep_by_tag(qc, tag); + + if (ata_is_dma(qc->tf.protocol)) { + hsdevp->desc[tag] = dma_dwc_xfer_setup(qc); + if (!hsdevp->desc[tag]) + return AC_ERR_SYSTEM; + } else { + hsdevp->desc[tag] = NULL; + } if (ata_is_ncq(qc->tf.protocol)) { - sactive = core_scr_read(SCR_ACTIVE); + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); sactive |= (0x00000001 << tag); - core_scr_write(SCR_ACTIVE, sactive); + sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive); dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive, sactive); ap->ops->sff_tf_load(ap, &qc->tf); - sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, + sata_dwc_exec_command_by_tag(ap, &qc->tf, tag, SATA_DWC_CMD_ISSUED_PEND); } else { - ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } return 0; } -/* - * Function : sata_dwc_qc_prep - * arguments : ata_queued_cmd *qc - * Return value : None - * qc_prep for a particular queued command - */ - -static void sata_dwc_qc_prep(struct ata_queued_cmd *qc) -{ - if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) - return; - -#ifdef DEBUG_NCQ - if (qc->tag > 0) - dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", - __func__, qc->tag, qc->ap->link.active_tag); - - return ; -#endif -} - static void sata_dwc_error_handler(struct ata_port *ap) { ata_sff_error_handler(ap); @@ -1125,17 +1134,22 @@ static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, sata_dwc_enable_interrupts(hsdev); /* Reconfigure the DMA control register */ - out_le32(&hsdev->sata_dwc_regs->dmacr, - SATA_DWC_DMACR_TXRXCH_CLEAR); + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, + SATA_DWC_DMACR_TXRXCH_CLEAR); /* Reconfigure the DMA Burst Transaction Size register */ - out_le32(&hsdev->sata_dwc_regs->dbtsr, - SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | - SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)); + sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, + SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)); return ret; } +static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device) +{ + /* SATA DWC is master only */ +} + /* * scsi mid-layer and libata interface structures */ @@ -1148,7 +1162,13 @@ static struct scsi_host_template sata_dwc_sht = { */ .sg_tablesize = LIBATA_MAX_PRD, /* .can_queue = ATA_MAX_QUEUE, */ - .dma_boundary = ATA_DMA_BOUNDARY, + /* + * Make sure a LLI block is not created that will span 8K max FIS + * boundary. If the block spans such a FIS boundary, there is a chance + * that a DMA burst will cross that boundary -- this results in an + * error in the host controller. + */ + .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */, }; static struct ata_port_operations sata_dwc_ops = { @@ -1157,7 +1177,6 @@ static struct ata_port_operations sata_dwc_ops = { .error_handler = sata_dwc_error_handler, .hardreset = sata_dwc_hardreset, - .qc_prep = sata_dwc_qc_prep, .qc_issue = sata_dwc_qc_issue, .scr_read = sata_dwc_scr_read, @@ -1166,6 +1185,8 @@ static struct ata_port_operations sata_dwc_ops = { .port_start = sata_dwc_port_start, .port_stop = sata_dwc_port_stop, + .sff_dev_select = sata_dwc_dev_select, + .bmdma_setup = sata_dwc_bmdma_setup, .bmdma_start = sata_dwc_bmdma_start, }; @@ -1184,13 +1205,14 @@ static int sata_dwc_probe(struct platform_device *ofdev) struct sata_dwc_device *hsdev; u32 idr, versionr; char *ver = (char *)&versionr; - u8 __iomem *base; + void __iomem *base; int err = 0; int irq; struct ata_host *host; struct ata_port_info pi = sata_dwc_port_info[0]; const struct ata_port_info *ppi[] = { &pi, NULL }; struct device_node *np = ofdev->dev.of_node; + struct resource *res; /* Allocate DWC SATA device */ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); @@ -1201,57 +1223,33 @@ static int sata_dwc_probe(struct platform_device *ofdev) host->private_data = hsdev; /* Ioremap SATA registers */ - base = of_iomap(np, 0); - if (!base) { + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(base)) { dev_err(&ofdev->dev, "ioremap failed for SATA register address\n"); - return -ENODEV; + return PTR_ERR(base); } - hsdev->reg_base = base; dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); /* Synopsys DWC SATA specific Registers */ - hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); + hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET; + hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr); /* Setup port */ host->ports[0]->ioaddr.cmd_addr = base; host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; - host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET; - sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base); + sata_dwc_setup_port(&host->ports[0]->ioaddr, base); /* Read the ID and Version Registers */ - idr = in_le32(&hsdev->sata_dwc_regs->idr); - versionr = in_le32(&hsdev->sata_dwc_regs->versionr); + idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr); + versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr); dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]); - /* Get SATA DMA interrupt number */ - hsdev->dma->irq = irq_of_parse_and_map(np, 1); - if (hsdev->dma->irq == NO_IRQ) { - dev_err(&ofdev->dev, "no SATA DMA irq\n"); - err = -ENODEV; - goto error_iomap; - } - - /* Get physical SATA DMA register base address */ - hsdev->dma->regs = of_iomap(np, 1); - if (!hsdev->dma->regs) { - dev_err(&ofdev->dev, - "ioremap failed for AHBDMA register address\n"); - err = -ENODEV; - goto error_iomap; - } - /* Save dev for later use in dev_xxx() routines */ hsdev->dev = &ofdev->dev; - hsdev->dma->dev = &ofdev->dev; - - /* Initialize AHB DMAC */ - err = dw_dma_probe(hsdev->dma, NULL); - if (err) - goto error_dma_iomap; - /* Enable SATA Interrupts */ sata_dwc_enable_interrupts(hsdev); @@ -1263,6 +1261,25 @@ static int sata_dwc_probe(struct platform_device *ofdev) goto error_out; } +#ifdef CONFIG_SATA_DWC_OLD_DMA + if (!of_find_property(np, "dmas", NULL)) { + err = sata_dwc_dma_init_old(ofdev, hsdev); + if (err) + goto error_out; + } +#endif + + hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy"); + if (IS_ERR(hsdev->phy)) { + err = PTR_ERR(hsdev->phy); + hsdev->phy = NULL; + goto error_out; + } + + err = phy_init(hsdev->phy); + if (err) + goto error_out; + /* * Now, register with libATA core, this will also initiate the * device discovery process, invoking our port_start() handler & @@ -1276,12 +1293,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) return 0; error_out: - /* Free SATA DMA resources */ - dw_dma_remove(hsdev->dma); -error_dma_iomap: - iounmap(hsdev->dma->regs); -error_iomap: - iounmap(base); + phy_exit(hsdev->phy); return err; } @@ -1293,11 +1305,13 @@ static int sata_dwc_remove(struct platform_device *ofdev) ata_host_detach(host); + phy_exit(hsdev->phy); + +#ifdef CONFIG_SATA_DWC_OLD_DMA /* Free SATA DMA resources */ - dw_dma_remove(hsdev->dma); + sata_dwc_dma_exit_old(hsdev); +#endif - iounmap(hsdev->dma->regs); - iounmap(hsdev->reg_base); dev_dbg(&ofdev->dev, "done\n"); return 0; } diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 8638d575b..aafb8cc03 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -197,7 +197,7 @@ static void highbank_set_em_messages(struct device *dev, for (i = 0; i < SGPIO_PINS; i++) { err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i); - if (IS_ERR_VALUE(err)) + if (err < 0) return; pdata->sgpio_gpio[i] = err; diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index bd74ee555..745489a1c 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -986,7 +986,7 @@ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) * Looks like a lot of fuss, but it avoids an unnecessary * +1 usec read-after-write delay for unaffected registers. */ - laddr = (long)addr & 0xffff; + laddr = (unsigned long)addr & 0xffff; if (laddr >= 0x300 && laddr <= 0x33c) { laddr &= 0x000f; if (laddr == 0x4 || laddr == 0xc) { |