From e5fd91f1ef340da553f7a79da9540c3db711c937 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Tue, 8 Sep 2015 01:01:14 -0300 Subject: Linux-libre 4.2-gnu --- drivers/scsi/hpsa.c | 2794 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 1980 insertions(+), 814 deletions(-) (limited to 'drivers/scsi/hpsa.c') diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8eab107b5..1dafeb433 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include #include #include @@ -56,7 +58,7 @@ #include "hpsa.h" /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ -#define HPSA_DRIVER_VERSION "3.4.4-1" +#define HPSA_DRIVER_VERSION "3.4.10-0" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" @@ -129,6 +131,7 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, @@ -186,6 +189,7 @@ static struct board_type products[] = { {0x21CC103C, "Smart Array", &SA5_access}, {0x21CD103C, "Smart Array", &SA5_access}, {0x21CE103C, "Smart HBA", &SA5_access}, + {0x05809005, "SmartHBA-SA", &SA5_access}, {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, @@ -194,6 +198,10 @@ static struct board_type products[] = { {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; +#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) +static const struct scsi_cmnd hpsa_cmd_busy; +#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) +static const struct scsi_cmnd hpsa_cmd_idle; static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); @@ -207,6 +215,9 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, static void cmd_free(struct ctlr_info *h, struct CommandList *c); static struct CommandList *cmd_alloc(struct ctlr_info *h); +static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); +static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, + struct scsi_cmnd *scmd); static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type); @@ -222,6 +233,7 @@ static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); +static int hpsa_slave_configure(struct scsi_device *sdev); static void hpsa_slave_destroy(struct scsi_device *sdev); static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); @@ -232,7 +244,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, int min_blocks, u32 *bucket_map); -static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); +static void hpsa_free_performant_mode(struct ctlr_info *h); +static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); static inline u32 next_command(struct ctlr_info *h, u8 q); static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, @@ -252,6 +265,8 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); static void hpsa_command_resubmit_worker(struct work_struct *work); +static u32 lockup_detected(struct ctlr_info *h); +static int detect_controller_lockup(struct ctlr_info *h); static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { @@ -265,40 +280,86 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) return (struct ctlr_info *) *priv; } +static inline bool hpsa_is_cmd_idle(struct CommandList *c) +{ + return c->scsi_cmd == SCSI_CMD_IDLE; +} + +static inline bool hpsa_is_pending_event(struct CommandList *c) +{ + return c->abort_pending || c->reset_pending; +} + +/* extract sense key, asc, and ascq from sense data. -1 means invalid. */ +static void decode_sense_data(const u8 *sense_data, int sense_data_len, + u8 *sense_key, u8 *asc, u8 *ascq) +{ + struct scsi_sense_hdr sshdr; + bool rc; + + *sense_key = -1; + *asc = -1; + *ascq = -1; + + if (sense_data_len < 1) + return; + + rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); + if (rc) { + *sense_key = sshdr.sense_key; + *asc = sshdr.asc; + *ascq = sshdr.ascq; + } +} + static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c) { - if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) + u8 sense_key, asc, ascq; + int sense_len; + + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) + sense_len = sizeof(c->err_info->SenseInfo); + else + sense_len = c->err_info->SenseLen; + + decode_sense_data(c->err_info->SenseInfo, sense_len, + &sense_key, &asc, &ascq); + if (sense_key != UNIT_ATTENTION || asc == -1) return 0; - switch (c->err_info->SenseInfo[12]) { + switch (asc) { case STATE_CHANGED: - dev_warn(&h->pdev->dev, HPSA "%d: a state change " - "detected, command retried\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: a state change detected, command retried\n", + h->devname); break; case LUN_FAILED: dev_warn(&h->pdev->dev, - HPSA "%d: LUN failure detected\n", h->ctlr); + "%s: LUN failure detected\n", h->devname); break; case REPORT_LUNS_CHANGED: dev_warn(&h->pdev->dev, - HPSA "%d: report LUN data changed\n", h->ctlr); + "%s: report LUN data changed\n", h->devname); /* * Note: this REPORT_LUNS_CHANGED condition only occurs on the external * target (array) devices. */ break; case POWER_OR_RESET: - dev_warn(&h->pdev->dev, HPSA "%d: a power on " - "or device reset detected\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: a power on or device reset detected\n", + h->devname); break; case UNIT_ATTENTION_CLEARED: - dev_warn(&h->pdev->dev, HPSA "%d: unit attention " - "cleared by another initiator\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: unit attention cleared by another initiator\n", + h->devname); break; default: - dev_warn(&h->pdev->dev, HPSA "%d: unknown " - "unit attention detected\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: unknown unit attention detected\n", + h->devname); break; } return 1; @@ -314,6 +375,20 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c) return 1; } +static u32 lockup_detected(struct ctlr_info *h); +static ssize_t host_show_lockup_detected(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ld; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + ld = lockup_detected(h); + + return sprintf(buf, "ld=%d\n", ld); +} + static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -425,7 +500,7 @@ static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ - 0x324b103C, /* SmartArray P711m */ + 0x324b103C, /* Smart Array P711m */ 0x3223103C, /* Smart Array P800 */ 0x3234103C, /* Smart Array P400 */ 0x3235103C, /* Smart Array P400i */ @@ -467,24 +542,32 @@ static u32 soft_unresettable_controller[] = { 0x409D0E11, /* Smart Array 6400 EM */ }; -static int ctlr_is_hard_resettable(u32 board_id) +static u32 needs_abort_tags_swizzled[] = { + 0x323D103C, /* Smart Array P700m */ + 0x324a103C, /* Smart Array P712m */ + 0x324b103C, /* SmartArray P711m */ +}; + +static int board_id_in_array(u32 a[], int nelems, u32 board_id) { int i; - for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) - if (unresettable_controller[i] == board_id) - return 0; - return 1; + for (i = 0; i < nelems; i++) + if (a[i] == board_id) + return 1; + return 0; } -static int ctlr_is_soft_resettable(u32 board_id) +static int ctlr_is_hard_resettable(u32 board_id) { - int i; + return !board_id_in_array(unresettable_controller, + ARRAY_SIZE(unresettable_controller), board_id); +} - for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) - if (soft_unresettable_controller[i] == board_id) - return 0; - return 1; +static int ctlr_is_soft_resettable(u32 board_id) +{ + return !board_id_in_array(soft_unresettable_controller, + ARRAY_SIZE(soft_unresettable_controller), board_id); } static int ctlr_is_resettable(u32 board_id) @@ -493,6 +576,12 @@ static int ctlr_is_resettable(u32 board_id) ctlr_is_soft_resettable(board_id); } +static int ctlr_needs_abort_tags_swizzled(u32 board_id) +{ + return board_id_in_array(needs_abort_tags_swizzled, + ARRAY_SIZE(needs_abort_tags_swizzled), board_id); +} + static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { @@ -647,12 +736,15 @@ static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); +static DEVICE_ATTR(lockup_detected, S_IRUGO, + host_show_lockup_detected, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, &dev_attr_lunid, &dev_attr_unique_id, &dev_attr_hp_ssd_smart_path_enabled, + &dev_attr_lockup_detected, NULL, }; @@ -667,6 +759,9 @@ static struct device_attribute *hpsa_shost_attrs[] = { NULL, }; +#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \ + HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS) + static struct scsi_host_template hpsa_driver_template = { .module = THIS_MODULE, .name = HPSA, @@ -681,6 +776,7 @@ static struct scsi_host_template hpsa_driver_template = { .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, .slave_alloc = hpsa_slave_alloc, + .slave_configure = hpsa_slave_configure, .slave_destroy = hpsa_slave_destroy, #ifdef CONFIG_COMPAT .compat_ioctl = hpsa_compat_ioctl, @@ -743,30 +839,43 @@ static inline u32 next_command(struct ctlr_info *h, u8 q) * a separate special register for submitting commands. */ -/* set_performant_mode: Modify the tag for cciss performant +/* + * set_performant_mode: Modify the tag for cciss performant * set bit 0 for pull model, bits 3-1 for block fetch * register number */ -static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) +#define DEFAULT_REPLY_QUEUE (-1) +static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, + int reply_queue) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) { c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); - if (likely(h->msix_vector > 0)) + if (unlikely(!h->msix_vector)) + return; + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) c->Header.ReplyQueue = raw_smp_processor_id() % h->nreply_queues; + else + c->Header.ReplyQueue = reply_queue % h->nreply_queues; } } static void set_ioaccel1_performant_mode(struct ctlr_info *h, - struct CommandList *c) + struct CommandList *c, + int reply_queue) { struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; - /* Tell the controller to post the reply to the queue for this + /* + * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - cp->ReplyQueue = smp_processor_id() % h->nreply_queues; - /* Set the bits in the address sent down to include: + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->ReplyQueue = smp_processor_id() % h->nreply_queues; + else + cp->ReplyQueue = reply_queue % h->nreply_queues; + /* + * Set the bits in the address sent down to include: * - performant mode bit (bit 0) * - pull count (bits 1-3) * - command type (bits 4-6) @@ -775,20 +884,48 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h, IOACCEL1_BUSADDR_CMDTYPE; } -static void set_ioaccel2_performant_mode(struct ctlr_info *h, - struct CommandList *c) +static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) { - struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) + &h->ioaccel2_cmd_pool[c->cmdindex]; /* Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - cp->reply_queue = smp_processor_id() % h->nreply_queues; + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->reply_queue = smp_processor_id() % h->nreply_queues; + else + cp->reply_queue = reply_queue % h->nreply_queues; /* Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 * - pull count (bits 0-3) * - command type isn't needed for ioaccel2 */ + c->busaddr |= h->ioaccel2_blockFetchTable[0]; +} + +static void set_ioaccel2_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) +{ + struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* + * Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->reply_queue = smp_processor_id() % h->nreply_queues; + else + cp->reply_queue = reply_queue % h->nreply_queues; + /* + * Set the bits in the address sent down to include: + * - performant mode bit not used in ioaccel mode 2 + * - pull count (bits 0-3) + * - command type isn't needed for ioaccel2 + */ c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); } @@ -821,26 +958,38 @@ static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; } -static void enqueue_cmd_and_start_io(struct ctlr_info *h, - struct CommandList *c) +static void __enqueue_cmd_and_start_io(struct ctlr_info *h, + struct CommandList *c, int reply_queue) { dial_down_lockup_detection_during_fw_flash(h, c); atomic_inc(&h->commands_outstanding); switch (c->cmd_type) { case CMD_IOACCEL1: - set_ioaccel1_performant_mode(h, c); + set_ioaccel1_performant_mode(h, c, reply_queue); writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); break; case CMD_IOACCEL2: - set_ioaccel2_performant_mode(h, c); + set_ioaccel2_performant_mode(h, c, reply_queue); + writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); + break; + case IOACCEL2_TMF: + set_ioaccel2_tmf_performant_mode(h, c, reply_queue); writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); break; default: - set_performant_mode(h, c); + set_performant_mode(h, c, reply_queue); h->access.submit_command(h, c); } } +static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) +{ + if (unlikely(hpsa_is_pending_event(c))) + return finish_cmd(c); + + __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); +} + static inline int is_hba_lunid(unsigned char scsi3addr[]) { return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; @@ -881,6 +1030,23 @@ static int hpsa_find_target_lun(struct ctlr_info *h, return !found; } +static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, char *description) +{ + dev_printk(level, &h->pdev->dev, + "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n", + h->scsi_host->host_no, dev->bus, dev->target, dev->lun, + description, + scsi_device_type(dev->devtype), + dev->vendor, + dev->model, + dev->raid_level > RAID_UNKNOWN ? + "RAID-?" : raid_label[dev->raid_level], + dev->offload_config ? '+' : '-', + dev->offload_enabled ? '+' : '-', + dev->expose_state); +} + /* Add an entry into h->dev[] array. */ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, struct hpsa_scsi_dev_t *device, @@ -948,15 +1114,10 @@ lun_assigned: h->ndevices++; added[*nadded] = device; (*nadded)++; - - /* initially, (before registering with scsi layer) we don't - * know our hostno and we don't want to print anything first - * time anyway (the scsi layer's inquiries will show that info) - */ - /* if (hostno != -1) */ - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", - scsi_device_type(device->devtype), hostno, - device->bus, device->target, device->lun); + hpsa_show_dev_msg(KERN_INFO, h, device, + device->expose_state & HPSA_SCSI_ADD ? "added" : "masked"); + device->offload_to_be_enabled = device->offload_enabled; + device->offload_enabled = 0; return 0; } @@ -964,6 +1125,7 @@ lun_assigned: static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, int entry, struct hpsa_scsi_dev_t *new_entry) { + int offload_enabled; /* assumes h->devlock is held */ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); @@ -982,16 +1144,29 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, */ h->dev[entry]->raid_map = new_entry->raid_map; h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; - wmb(); /* ensure raid map updated prior to ->offload_enabled */ } + if (new_entry->hba_ioaccel_enabled) { + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ + } + h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; h->dev[entry]->offload_config = new_entry->offload_config; h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; - h->dev[entry]->offload_enabled = new_entry->offload_enabled; h->dev[entry]->queue_depth = new_entry->queue_depth; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", - scsi_device_type(new_entry->devtype), hostno, new_entry->bus, - new_entry->target, new_entry->lun); + /* + * We can turn off ioaccel offload now, but need to delay turning + * it on until we can update h->dev[entry]->phys_disk[], but we + * can't do that until all the devices are updated. + */ + h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; + if (!new_entry->offload_enabled) + h->dev[entry]->offload_enabled = 0; + + offload_enabled = h->dev[entry]->offload_enabled; + h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled; + hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); + h->dev[entry]->offload_enabled = offload_enabled; } /* Replace an entry from h->dev[] array. */ @@ -1017,9 +1192,9 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, h->dev[entry] = new_entry; added[*nadded] = new_entry; (*nadded)++; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", - scsi_device_type(new_entry->devtype), hostno, new_entry->bus, - new_entry->target, new_entry->lun); + hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); + new_entry->offload_to_be_enabled = new_entry->offload_enabled; + new_entry->offload_enabled = 0; } /* Remove an entry from h->dev[] array. */ @@ -1039,9 +1214,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, for (i = entry; i < h->ndevices-1; i++) h->dev[i] = h->dev[i+1]; h->ndevices--; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", - scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, - sd->lun); + hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); } #define SCSI3ADDR_EQ(a, b) ( \ @@ -1283,6 +1456,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) nraid_map_entries = RAID_MAP_MAX_ENTRIES; + logical_drive->nphysical_disks = nraid_map_entries; + qdepth = 0; for (i = 0; i < nraid_map_entries; i++) { logical_drive->phys_disk[i] = NULL; @@ -1312,7 +1487,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, */ if (!logical_drive->phys_disk[i]) { logical_drive->offload_enabled = 0; - logical_drive->queue_depth = h->nr_cmds; + logical_drive->offload_to_be_enabled = 0; + logical_drive->queue_depth = 8; } } if (nraid_map_entries) @@ -1335,6 +1511,16 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, continue; if (!is_logical_dev_addr_mode(dev[i]->scsi3addr)) continue; + + /* + * If offload is currently enabled, the RAID map and + * phys_disk[] assignment *better* not be changing + * and since it isn't changing, we do not need to + * update it. + */ + if (dev[i]->offload_enabled) + continue; + hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); } } @@ -1411,9 +1597,7 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, */ if (sd[i]->volume_offline) { hpsa_show_volume_status(h, sd[i]); - dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", - h->scsi_host->host_no, - sd[i]->bus, sd[i]->target, sd[i]->lun); + hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); continue; } @@ -1433,6 +1617,14 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, /* but if it does happen, we just ignore that device */ } } + hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); + + /* Now that h->dev[]->phys_disk[] is coherent, we can enable + * any logical drives that need it enabled. + */ + for (i = 0; i < h->ndevices; i++) + h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; + spin_unlock_irqrestore(&h->devlock, flags); /* Monitor devices which are in one of several NOT READY states to be @@ -1456,20 +1648,22 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, sh = h->scsi_host; /* Notify scsi mid layer of any removed devices */ for (i = 0; i < nremoved; i++) { - struct scsi_device *sdev = - scsi_device_lookup(sh, removed[i]->bus, - removed[i]->target, removed[i]->lun); - if (sdev != NULL) { - scsi_remove_device(sdev); - scsi_device_put(sdev); - } else { - /* We don't expect to get here. - * future cmds to this device will get selection - * timeout as if the device was gone. - */ - dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " - " for removal.", hostno, removed[i]->bus, - removed[i]->target, removed[i]->lun); + if (removed[i]->expose_state & HPSA_SCSI_ADD) { + struct scsi_device *sdev = + scsi_device_lookup(sh, removed[i]->bus, + removed[i]->target, removed[i]->lun); + if (sdev != NULL) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + /* + * We don't expect to get here. + * future cmds to this device will get selection + * timeout as if the device was gone. + */ + hpsa_show_dev_msg(KERN_WARNING, h, removed[i], + "didn't find device for removal."); + } } kfree(removed[i]); removed[i] = NULL; @@ -1477,16 +1671,18 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, /* Notify scsi mid layer of any added devices */ for (i = 0; i < nadded; i++) { + if (!(added[i]->expose_state & HPSA_SCSI_ADD)) + continue; if (scsi_add_device(sh, added[i]->bus, added[i]->target, added[i]->lun) == 0) continue; - dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " - "device not added.\n", hostno, added[i]->bus, - added[i]->target, added[i]->lun); + hpsa_show_dev_msg(KERN_WARNING, h, added[i], + "addition failed, device not added."); /* now we have to remove it from h->dev, * since it didn't get added to scsi mid layer */ fixup_botched_add(h, added[i]); + added[i] = NULL; } free_and_out: @@ -1512,7 +1708,6 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, return NULL; } -/* link sdev->hostdata to our per-device structure. */ static int hpsa_slave_alloc(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *sd; @@ -1523,21 +1718,80 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) spin_lock_irqsave(&h->devlock, flags); sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), sdev_id(sdev), sdev->lun); - if (sd != NULL) { - sdev->hostdata = sd; - if (sd->queue_depth) - scsi_change_queue_depth(sdev, sd->queue_depth); + if (likely(sd)) { atomic_set(&sd->ioaccel_cmds_out, 0); - } + sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL; + } else + sdev->hostdata = NULL; spin_unlock_irqrestore(&h->devlock, flags); return 0; } +/* configure scsi device based on internal per-device structure */ +static int hpsa_slave_configure(struct scsi_device *sdev) +{ + struct hpsa_scsi_dev_t *sd; + int queue_depth; + + sd = sdev->hostdata; + sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH); + + if (sd) + queue_depth = sd->queue_depth != 0 ? + sd->queue_depth : sdev->host->can_queue; + else + queue_depth = sdev->host->can_queue; + + scsi_change_queue_depth(sdev, queue_depth); + + return 0; +} + static void hpsa_slave_destroy(struct scsi_device *sdev) { /* nothing to do. */ } +static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (!h->ioaccel2_cmd_sg_list) + return; + for (i = 0; i < h->nr_cmds; i++) { + kfree(h->ioaccel2_cmd_sg_list[i]); + h->ioaccel2_cmd_sg_list[i] = NULL; + } + kfree(h->ioaccel2_cmd_sg_list); + h->ioaccel2_cmd_sg_list = NULL; +} + +static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (h->chainsize <= 0) + return 0; + + h->ioaccel2_cmd_sg_list = + kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds, + GFP_KERNEL); + if (!h->ioaccel2_cmd_sg_list) + return -ENOMEM; + for (i = 0; i < h->nr_cmds; i++) { + h->ioaccel2_cmd_sg_list[i] = + kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) * + h->maxsgentries, GFP_KERNEL); + if (!h->ioaccel2_cmd_sg_list[i]) + goto clean; + } + return 0; + +clean: + hpsa_free_ioaccel2_sg_chain_blocks(h); + return -ENOMEM; +} + static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) { int i; @@ -1552,7 +1806,7 @@ static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) h->cmd_sg_list = NULL; } -static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) +static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) { int i; @@ -1580,6 +1834,39 @@ clean: return -ENOMEM; } +static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, + struct io_accel2_cmd *cp, struct CommandList *c) +{ + struct ioaccel2_sg_element *chain_block; + u64 temp64; + u32 chain_size; + + chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; + chain_size = le32_to_cpu(cp->data_len); + temp64 = pci_map_single(h->pdev, chain_block, chain_size, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&h->pdev->dev, temp64)) { + /* prevent subsequent unmapping */ + cp->sg->address = 0; + return -1; + } + cp->sg->address = cpu_to_le64(temp64); + return 0; +} + +static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, + struct io_accel2_cmd *cp) +{ + struct ioaccel2_sg_element *chain_sg; + u64 temp64; + u32 chain_size; + + chain_sg = cp->sg; + temp64 = le64_to_cpu(chain_sg->address); + chain_size = le32_to_cpu(cp->data_len); + pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); +} + static int hpsa_map_sg_chain_block(struct ctlr_info *h, struct CommandList *c) { @@ -1629,6 +1916,7 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, { int data_len; int retry = 0; + u32 ioaccel2_resid = 0; switch (c2->error_data.serv_response) { case IOACCEL2_SERV_RESPONSE_COMPLETE: @@ -1636,9 +1924,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: break; case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: - dev_warn(&h->pdev->dev, - "%s: task complete with check condition.\n", - "HP SSD Smart Path"); cmd->result |= SAM_STAT_CHECK_CONDITION; if (c2->error_data.data_present != IOACCEL2_SENSE_DATA_PRESENT) { @@ -1658,58 +1943,56 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: - dev_warn(&h->pdev->dev, - "%s: task complete with BUSY status.\n", - "HP SSD Smart Path"); retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: - dev_warn(&h->pdev->dev, - "%s: task complete with reservation conflict.\n", - "HP SSD Smart Path"); retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: - /* Make scsi midlayer do unlimited retries */ - cmd->result = DID_IMM_RETRY << 16; + retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: - dev_warn(&h->pdev->dev, - "%s: task complete with aborted status.\n", - "HP SSD Smart Path"); retry = 1; break; default: - dev_warn(&h->pdev->dev, - "%s: task complete with unrecognized status: 0x%02x\n", - "HP SSD Smart Path", c2->error_data.status); retry = 1; break; } break; case IOACCEL2_SERV_RESPONSE_FAILURE: - /* don't expect to get here. */ - dev_warn(&h->pdev->dev, - "unexpected delivery or target failure, status = 0x%02x\n", - c2->error_data.status); - retry = 1; + switch (c2->error_data.status) { + case IOACCEL2_STATUS_SR_IO_ERROR: + case IOACCEL2_STATUS_SR_IO_ABORTED: + case IOACCEL2_STATUS_SR_OVERRUN: + retry = 1; + break; + case IOACCEL2_STATUS_SR_UNDERRUN: + cmd->result = (DID_OK << 16); /* host byte */ + cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ + ioaccel2_resid = get_unaligned_le32( + &c2->error_data.resid_cnt[0]); + scsi_set_resid(cmd, ioaccel2_resid); + break; + case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: + case IOACCEL2_STATUS_SR_INVALID_DEVICE: + case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: + /* We will get an event from ctlr to trigger rescan */ + retry = 1; + break; + default: + retry = 1; + } break; case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: break; case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: break; case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: - dev_warn(&h->pdev->dev, "task management function rejected.\n"); retry = 1; break; case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: - dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); break; default: - dev_warn(&h->pdev->dev, - "%s: Unrecognized server response: 0x%02x\n", - "HP SSD Smart Path", - c2->error_data.serv_response); retry = 1; break; } @@ -1717,6 +2000,87 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, return retry; /* retry on raid path? */ } +static void hpsa_cmd_resolve_events(struct ctlr_info *h, + struct CommandList *c) +{ + bool do_wake = false; + + /* + * Prevent the following race in the abort handler: + * + * 1. LLD is requested to abort a SCSI command + * 2. The SCSI command completes + * 3. The struct CommandList associated with step 2 is made available + * 4. New I/O request to LLD to another LUN re-uses struct CommandList + * 5. Abort handler follows scsi_cmnd->host_scribble and + * finds struct CommandList and tries to aborts it + * Now we have aborted the wrong command. + * + * Reset c->scsi_cmd here so that the abort or reset handler will know + * this command has completed. Then, check to see if the handler is + * waiting for this command, and, if so, wake it. + */ + c->scsi_cmd = SCSI_CMD_IDLE; + mb(); /* Declare command idle before checking for pending events. */ + if (c->abort_pending) { + do_wake = true; + c->abort_pending = false; + } + if (c->reset_pending) { + unsigned long flags; + struct hpsa_scsi_dev_t *dev; + + /* + * There appears to be a reset pending; lock the lock and + * reconfirm. If so, then decrement the count of outstanding + * commands and wake the reset command if this is the last one. + */ + spin_lock_irqsave(&h->lock, flags); + dev = c->reset_pending; /* Re-fetch under the lock. */ + if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) + do_wake = true; + c->reset_pending = NULL; + spin_unlock_irqrestore(&h->lock, flags); + } + + if (do_wake) + wake_up_all(&h->event_sync_wait_queue); +} + +static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, + struct CommandList *c) +{ + hpsa_cmd_resolve_events(h, c); + cmd_tagged_free(h, c); +} + +static void hpsa_cmd_free_and_done(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd) +{ + hpsa_cmd_resolve_and_free(h, c); + cmd->scsi_done(cmd); +} + +static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) +{ + INIT_WORK(&c->work, hpsa_command_resubmit_worker); + queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); +} + +static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd) +{ + cmd->result = DID_ABORT << 16; +} + +static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c, + struct scsi_cmnd *cmd) +{ + hpsa_set_scsi_cmd_aborted(cmd); + dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", + c->Request.CDB, c->err_info->ScsiStatus); + hpsa_cmd_resolve_and_free(h, c); +} + static void process_ioaccel2_completion(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, struct hpsa_scsi_dev_t *dev) @@ -1725,13 +2089,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h, /* check for good status */ if (likely(c2->error_data.serv_response == 0 && - c2->error_data.status == 0)) { - cmd_free(h, c); - cmd->scsi_done(cmd); - return; - } + c2->error_data.status == 0)) + return hpsa_cmd_free_and_done(h, c, cmd); - /* Any RAID offload error results in retry which will use + /* + * Any RAID offload error results in retry which will use * the normal I/O path so the controller can handle whatever's * wrong. */ @@ -1741,19 +2103,42 @@ static void process_ioaccel2_completion(struct ctlr_info *h, if (c2->error_data.status == IOACCEL2_STATUS_SR_IOACCEL_DISABLED) dev->offload_enabled = 0; - goto retry_cmd; + + return hpsa_retry_cmd(h, c); } if (handle_ioaccel_mode2_error(h, c, cmd, c2)) - goto retry_cmd; + return hpsa_retry_cmd(h, c); - cmd_free(h, c); - cmd->scsi_done(cmd); - return; + return hpsa_cmd_free_and_done(h, c, cmd); +} -retry_cmd: - INIT_WORK(&c->work, hpsa_command_resubmit_worker); - queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); +/* Returns 0 on success, < 0 otherwise. */ +static int hpsa_evaluate_tmf_status(struct ctlr_info *h, + struct CommandList *cp) +{ + u8 tmf_status = cp->err_info->ScsiStatus; + + switch (tmf_status) { + case CISS_TMF_COMPLETE: + /* + * CISS_TMF_COMPLETE never happens, instead, + * ei->CommandStatus == 0 for this case. + */ + case CISS_TMF_SUCCESS: + return 0; + case CISS_TMF_INVALID_FRAME: + case CISS_TMF_NOT_SUPPORTED: + case CISS_TMF_FAILED: + case CISS_TMF_WRONG_LUN: + case CISS_TMF_OVERLAPPED_TAG: + break; + default: + dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", + tmf_status); + break; + } + return -tmf_status; } static void complete_scsi_command(struct CommandList *cp) @@ -1762,51 +2147,58 @@ static void complete_scsi_command(struct CommandList *cp) struct ctlr_info *h; struct ErrorInfo *ei; struct hpsa_scsi_dev_t *dev; + struct io_accel2_cmd *c2; - unsigned char sense_key; - unsigned char asc; /* additional sense code */ - unsigned char ascq; /* additional sense code qualifier */ + u8 sense_key; + u8 asc; /* additional sense code */ + u8 ascq; /* additional sense code qualifier */ unsigned long sense_data_size; ei = cp->err_info; cmd = cp->scsi_cmd; h = cp->h; dev = cmd->device->hostdata; + c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; scsi_dma_unmap(cmd); /* undo the DMA mappings */ if ((cp->cmd_type == CMD_SCSI) && (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) hpsa_unmap_sg_chain_block(h, cp); + if ((cp->cmd_type == CMD_IOACCEL2) && + (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) + hpsa_unmap_ioaccel2_sg_chain_block(h, c2); + cmd->result = (DID_OK << 16); /* host byte */ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) atomic_dec(&cp->phys_disk->ioaccel_cmds_out); - if (cp->cmd_type == CMD_IOACCEL2) - return process_ioaccel2_completion(h, cp, cmd, dev); - - cmd->result |= ei->ScsiStatus; + /* + * We check for lockup status here as it may be set for + * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by + * fail_all_oustanding_cmds() + */ + if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { + /* DID_NO_CONNECT will prevent a retry */ + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(h, cp, cmd); + } - scsi_set_resid(cmd, ei->ResidualCnt); - if (ei->CommandStatus == 0) { - if (cp->cmd_type == CMD_IOACCEL1) - atomic_dec(&cp->phys_disk->ioaccel_cmds_out); - cmd_free(h, cp); - cmd->scsi_done(cmd); - return; + if ((unlikely(hpsa_is_pending_event(cp)))) { + if (cp->reset_pending) + return hpsa_cmd_resolve_and_free(h, cp); + if (cp->abort_pending) + return hpsa_cmd_abort_and_free(h, cp, cmd); } - /* copy the sense data */ - if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) - sense_data_size = SCSI_SENSE_BUFFERSIZE; - else - sense_data_size = sizeof(ei->SenseInfo); - if (ei->SenseLen < sense_data_size) - sense_data_size = ei->SenseLen; + if (cp->cmd_type == CMD_IOACCEL2) + return process_ioaccel2_completion(h, cp, cmd, dev); - memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); + scsi_set_resid(cmd, ei->ResidualCnt); + if (ei->CommandStatus == 0) + return hpsa_cmd_free_and_done(h, cp, cmd); /* For I/O accelerator commands, copy over some fields to the normal * CISS header used below for error handling. @@ -1828,10 +2220,7 @@ static void complete_scsi_command(struct CommandList *cp) if (is_logical_dev_addr_mode(dev->scsi3addr)) { if (ei->CommandStatus == CMD_IOACCEL_DISABLED) dev->offload_enabled = 0; - INIT_WORK(&cp->work, hpsa_command_resubmit_worker); - queue_work_on(raw_smp_processor_id(), - h->resubmit_wq, &cp->work); - return; + return hpsa_retry_cmd(h, cp); } } @@ -1839,14 +2228,18 @@ static void complete_scsi_command(struct CommandList *cp) switch (ei->CommandStatus) { case CMD_TARGET_STATUS: - if (ei->ScsiStatus) { - /* Get sense key */ - sense_key = 0xf & ei->SenseInfo[2]; - /* Get additional sense code */ - asc = ei->SenseInfo[12]; - /* Get addition sense code qualifier */ - ascq = ei->SenseInfo[13]; - } + cmd->result |= ei->ScsiStatus; + /* copy the sense data */ + if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) + sense_data_size = SCSI_SENSE_BUFFERSIZE; + else + sense_data_size = sizeof(ei->SenseInfo); + if (ei->SenseLen < sense_data_size) + sense_data_size = ei->SenseLen; + memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); + if (ei->ScsiStatus) + decode_sense_data(ei->SenseInfo, sense_data_size, + &sense_key, &asc, &ascq); if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { if (sense_key == ABORTED_COMMAND) { cmd->result |= DID_SOFT_ERROR << 16; @@ -1918,10 +2311,8 @@ static void complete_scsi_command(struct CommandList *cp) cp->Request.CDB); break; case CMD_ABORTED: - cmd->result = DID_ABORT << 16; - dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", - cp->Request.CDB, ei->ScsiStatus); - break; + /* Return now to avoid calling scsi_done(). */ + return hpsa_cmd_abort_and_free(h, cp, cmd); case CMD_ABORT_FAILED: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", @@ -1941,6 +2332,10 @@ static void complete_scsi_command(struct CommandList *cp) cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "Command unabortable\n"); break; + case CMD_TMF_STATUS: + if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ + cmd->result = DID_ERROR << 16; + break; case CMD_IOACCEL_DISABLED: /* This only handles the direct pass-through case since RAID * offload is handled above. Just attempt a retry. @@ -1954,8 +2349,8 @@ static void complete_scsi_command(struct CommandList *cp) dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); } - cmd_free(h, cp); - cmd->scsi_done(cmd); + + return hpsa_cmd_free_and_done(h, cp, cmd); } static void hpsa_pci_unmap(struct pci_dev *pdev, @@ -1998,14 +2393,36 @@ static int hpsa_map_one(struct pci_dev *pdev, return 0; } -static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, - struct CommandList *c) +#define NO_TIMEOUT ((unsigned long) -1) +#define DEFAULT_TIMEOUT 30000 /* milliseconds */ +static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, + struct CommandList *c, int reply_queue, unsigned long timeout_msecs) { DECLARE_COMPLETION_ONSTACK(wait); c->waiting = &wait; - enqueue_cmd_and_start_io(h, c); - wait_for_completion(&wait); + __enqueue_cmd_and_start_io(h, c, reply_queue); + if (timeout_msecs == NO_TIMEOUT) { + /* TODO: get rid of this no-timeout thing */ + wait_for_completion_io(&wait); + return IO_OK; + } + if (!wait_for_completion_io_timeout(&wait, + msecs_to_jiffies(timeout_msecs))) { + dev_warn(&h->pdev->dev, "Command timed out.\n"); + return -ETIMEDOUT; + } + return IO_OK; +} + +static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, + int reply_queue, unsigned long timeout_msecs) +{ + if (unlikely(lockup_detected(h))) { + c->err_info->CommandStatus = CMD_CTLR_LOCKUP; + return IO_OK; + } + return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); } static u32 lockup_detected(struct ctlr_info *h) @@ -2020,25 +2437,19 @@ static u32 lockup_detected(struct ctlr_info *h) return rc; } -static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, - struct CommandList *c) -{ - /* If controller lockup detected, fake a hardware error. */ - if (unlikely(lockup_detected(h))) - c->err_info->CommandStatus = CMD_HARDWARE_ERR; - else - hpsa_scsi_do_simple_cmd_core(h, c); -} - #define MAX_DRIVER_CMD_RETRIES 25 -static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, - struct CommandList *c, int data_direction) +static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, + struct CommandList *c, int data_direction, unsigned long timeout_msecs) { int backoff_time = 10, retry_count = 0; + int rc; do { memset(c->err_info, 0, sizeof(*c->err_info)); - hpsa_scsi_do_simple_cmd_core(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + timeout_msecs); + if (rc) + break; retry_count++; if (retry_count > 3) { msleep(backoff_time); @@ -2049,6 +2460,9 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, check_for_busy(h, c)) && retry_count <= MAX_DRIVER_CMD_RETRIES); hpsa_pci_unmap(h->pdev, c, 1, data_direction); + if (retry_count > MAX_DRIVER_CMD_RETRIES) + rc = -EIO; + return rc; } static void hpsa_print_cmd(struct ctlr_info *h, char *txt, @@ -2072,16 +2486,23 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h, { const struct ErrorInfo *ei = cp->err_info; struct device *d = &cp->h->pdev->dev; - const u8 *sd = ei->SenseInfo; + u8 sense_key, asc, ascq; + int sense_len; switch (ei->CommandStatus) { case CMD_TARGET_STATUS: + if (ei->SenseLen > sizeof(ei->SenseInfo)) + sense_len = sizeof(ei->SenseInfo); + else + sense_len = ei->SenseLen; + decode_sense_data(ei->SenseInfo, sense_len, + &sense_key, &asc, &ascq); hpsa_print_cmd(h, "SCSI status", cp); if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) - dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", - sd[2] & 0x0f, sd[12], sd[13]); + dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n", + sense_key, asc, ascq); else - dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); + dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus); if (ei->ScsiStatus == 0) dev_warn(d, "SCSI status is abnormally zero. " "(probably indicates selection timeout " @@ -2125,6 +2546,9 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h, case CMD_UNABORTABLE: hpsa_print_cmd(h, "unabortable", cp); break; + case CMD_CTLR_LOCKUP: + hpsa_print_cmd(h, "controller lockup detected", cp); + break; default: hpsa_print_cmd(h, "unknown status", cp); dev_warn(d, "Unknown command status %x\n", @@ -2142,17 +2566,15 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, c = cmd_alloc(h); - if (c == NULL) { - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } - if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD)) { rc = -1; goto out; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); @@ -2172,17 +2594,15 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, struct ErrorInfo *ei; c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } - if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, page, scsi3addr, TYPE_CMD)) { rc = -1; goto out; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); @@ -2191,10 +2611,10 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, out: cmd_free(h, c); return rc; - } +} static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, - u8 reset_type) + u8 reset_type, int reply_queue) { int rc = IO_OK; struct CommandList *c; @@ -2202,16 +2622,16 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } /* fill_cmd can't fail here, no data buffer to map. */ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ - hpsa_scsi_do_simple_cmd_core(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) { + dev_warn(&h->pdev->dev, "Failed to send reset command\n"); + goto out; + } /* no unmap needed here because no data xfer. */ ei = c->err_info; @@ -2219,10 +2639,129 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, hpsa_scsi_interpret_error(h, c); rc = -1; } +out: cmd_free(h, c); return rc; } +static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, + struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr) +{ + int i; + bool match = false; + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; + + if (hpsa_is_cmd_idle(c)) + return false; + + switch (c->cmd_type) { + case CMD_SCSI: + case CMD_IOCTL_PEND: + match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, + sizeof(c->Header.LUN.LunAddrBytes)); + break; + + case CMD_IOACCEL1: + case CMD_IOACCEL2: + if (c->phys_disk == dev) { + /* HBA mode match */ + match = true; + } else { + /* Possible RAID mode -- check each phys dev. */ + /* FIXME: Do we need to take out a lock here? If + * so, we could just call hpsa_get_pdisk_of_ioaccel2() + * instead. */ + for (i = 0; i < dev->nphysical_disks && !match; i++) { + /* FIXME: an alternate test might be + * + * match = dev->phys_disk[i]->ioaccel_handle + * == c2->scsi_nexus; */ + match = dev->phys_disk[i] == c->phys_disk; + } + } + break; + + case IOACCEL2_TMF: + for (i = 0; i < dev->nphysical_disks && !match; i++) { + match = dev->phys_disk[i]->ioaccel_handle == + le32_to_cpu(ac->it_nexus); + } + break; + + case 0: /* The command is in the middle of being initialized. */ + match = false; + break; + + default: + dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", + c->cmd_type); + BUG(); + } + + return match; +} + +static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr, u8 reset_type, int reply_queue) +{ + int i; + int rc = 0; + + /* We can really only handle one reset at a time */ + if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { + dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); + return -EINTR; + } + + BUG_ON(atomic_read(&dev->reset_cmds_out) != 0); + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + int refcount = atomic_inc_return(&c->refcount); + + if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { + unsigned long flags; + + /* + * Mark the target command as having a reset pending, + * then lock a lock so that the command cannot complete + * while we're considering it. If the command is not + * idle then count it; otherwise revoke the event. + */ + c->reset_pending = dev; + spin_lock_irqsave(&h->lock, flags); /* Implied MB */ + if (!hpsa_is_cmd_idle(c)) + atomic_inc(&dev->reset_cmds_out); + else + c->reset_pending = NULL; + spin_unlock_irqrestore(&h->lock, flags); + } + + cmd_free(h, c); + } + + rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); + if (!rc) + wait_event(h->event_sync_wait_queue, + atomic_read(&dev->reset_cmds_out) == 0 || + lockup_detected(h)); + + if (unlikely(lockup_detected(h))) { + dev_warn(&h->pdev->dev, + "Controller lockup detected during reset wait\n"); + mutex_unlock(&h->reset_mutex); + rc = -ENODEV; + } + + if (unlikely(rc)) + atomic_set(&dev->reset_cmds_out, 0); + + mutex_unlock(&h->reset_mutex); + return rc; +} + static void hpsa_get_raid_level(struct ctlr_info *h, unsigned char *scsi3addr, unsigned char *raid_level) { @@ -2328,23 +2867,23 @@ static int hpsa_get_raid_map(struct ctlr_info *h, struct ErrorInfo *ei; c = cmd_alloc(h); - if (c == NULL) { - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } + if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, sizeof(this_device->raid_map), 0, scsi3addr, TYPE_CMD)) { - dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); + dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); cmd_free(h, c); - return -ENOMEM; + return -1; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); - cmd_free(h, c); - return -1; + rc = -1; + goto out; } cmd_free(h, c); @@ -2356,6 +2895,9 @@ static int hpsa_get_raid_map(struct ctlr_info *h, } hpsa_debug_map_buff(h, rc, &this_device->raid_map); return rc; +out: + cmd_free(h, c); + return rc; } static int hpsa_bmic_id_physical_device(struct ctlr_info *h, @@ -2375,7 +2917,8 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h, c->Request.CDB[2] = bmic_device_index & 0xff; c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, + NO_TIMEOUT); ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); @@ -2438,6 +2981,7 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, this_device->offload_config = 0; this_device->offload_enabled = 0; + this_device->offload_to_be_enabled = 0; buf = kzalloc(64, GFP_KERNEL); if (!buf) @@ -2461,6 +3005,7 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, if (hpsa_get_raid_map(h, scsi3addr, this_device)) this_device->offload_enabled = 0; } + this_device->offload_to_be_enabled = this_device->offload_enabled; out: kfree(buf); return; @@ -2495,10 +3040,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, struct ErrorInfo *ei; c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -1; - } + /* address the controller */ memset(scsi3addr, 0, sizeof(scsi3addr)); if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, @@ -2508,7 +3050,10 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, } if (extended_response) c->Request.CDB[1] = extended_response; - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { @@ -2600,8 +3145,10 @@ static int hpsa_volume_offline(struct ctlr_info *h, unsigned char scsi3addr[]) { struct CommandList *c; - unsigned char *sense, sense_key, asc, ascq; - int ldstat = 0; + unsigned char *sense; + u8 sense_key, asc, ascq; + int sense_len; + int rc, ldstat = 0; u16 cmd_status; u8 scsi_status; #define ASC_LUN_NOT_READY 0x04 @@ -2609,14 +3156,19 @@ static int hpsa_volume_offline(struct ctlr_info *h, #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 c = cmd_alloc(h); - if (!c) - return 0; + (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); - hpsa_scsi_do_simple_cmd_core(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + if (rc) { + cmd_free(h, c); + return 0; + } sense = c->err_info->SenseInfo; - sense_key = sense[2]; - asc = sense[12]; - ascq = sense[13]; + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) + sense_len = sizeof(c->err_info->SenseInfo); + else + sense_len = c->err_info->SenseLen; + decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); cmd_status = c->err_info->CommandStatus; scsi_status = c->err_info->ScsiStatus; cmd_free(h, c); @@ -2656,6 +3208,52 @@ static int hpsa_volume_offline(struct ctlr_info *h, return 0; } +/* + * Find out if a logical device supports aborts by simply trying one. + * Smart Array may claim not to support aborts on logical drives, but + * if a MSA2000 * is connected, the drives on that will be presented + * by the Smart Array as logical drives, and aborts may be sent to + * those devices successfully. So the simplest way to find out is + * to simply try an abort and see how the device responds. + */ +static int hpsa_device_supports_aborts(struct ctlr_info *h, + unsigned char *scsi3addr) +{ + struct CommandList *c; + struct ErrorInfo *ei; + int rc = 0; + + u64 tag = (u64) -1; /* bogus tag */ + + /* Assume that physical devices support aborts */ + if (!is_logical_dev_addr_mode(scsi3addr)) + return 1; + + c = cmd_alloc(h); + + (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); + (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + /* no unmap needed here because no data xfer. */ + ei = c->err_info; + switch (ei->CommandStatus) { + case CMD_INVALID: + rc = 0; + break; + case CMD_UNABORTABLE: + case CMD_ABORT_FAILED: + rc = 1; + break; + case CMD_TMF_STATUS: + rc = hpsa_evaluate_tmf_status(h, c); + break; + default: + rc = 0; + break; + } + cmd_free(h, c); + return rc; +} + static int hpsa_update_device_info(struct ctlr_info *h, unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, unsigned char *is_OBDR_device) @@ -2708,6 +3306,8 @@ static int hpsa_update_device_info(struct ctlr_info *h, this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; this_device->offload_enabled = 0; + this_device->offload_to_be_enabled = 0; + this_device->hba_ioaccel_enabled = 0; this_device->volume_offline = 0; this_device->queue_depth = h->nr_cmds; } @@ -2721,7 +3321,6 @@ static int hpsa_update_device_info(struct ctlr_info *h, strncmp(obdr_sig, OBDR_TAPE_SIG, OBDR_SIG_LEN) == 0); } - kfree(inq_buff); return 0; @@ -2730,6 +3329,31 @@ bail_out: return 1; } +static void hpsa_update_device_supports_aborts(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, u8 *scsi3addr) +{ + unsigned long flags; + int rc, entry; + /* + * See if this device supports aborts. If we already know + * the device, we already know if it supports aborts, otherwise + * we have to find out if it supports aborts by trying one. + */ + spin_lock_irqsave(&h->devlock, flags); + rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry); + if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) && + entry >= 0 && entry < h->ndevices) { + dev->supports_aborts = h->dev[entry]->supports_aborts; + spin_unlock_irqrestore(&h->devlock, flags); + } else { + spin_unlock_irqrestore(&h->devlock, flags); + dev->supports_aborts = + hpsa_device_supports_aborts(h, scsi3addr); + if (dev->supports_aborts < 0) + dev->supports_aborts = 0; + } +} + static unsigned char *ext_target_model[] = { "MSA2012", "MSA2024", @@ -2835,6 +3459,7 @@ static int add_ext_target_dev(struct ctlr_info *h, (*n_ext_target_devs)++; hpsa_set_bus_target_lun(this_device, tmpdevice->bus, tmpdevice->target, 0); + hpsa_update_device_supports_aborts(h, this_device, scsi3addr); set_bit(tmpdevice->target, lunzerobits); return 1; } @@ -2850,88 +3475,23 @@ static int add_ext_target_dev(struct ctlr_info *h, static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) { - struct ReportExtendedLUNdata *physicals = NULL; - int responsesize = 24; /* size of physical extended response */ - int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; - u32 nphysicals = 0; /* number of reported physical devs */ - int found = 0; /* found match (1) or not (0) */ - u32 find; /* handle we need to match */ + struct io_accel2_cmd *c2 = + &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; + unsigned long flags; int i; - struct scsi_cmnd *scmd; /* scsi command within request being aborted */ - struct hpsa_scsi_dev_t *d; /* device of request being aborted */ - struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ - __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ - __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ - - if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) - return 0; /* no match */ - - /* point to the ioaccel2 device handle */ - c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; - if (c2a == NULL) - return 0; /* no match */ - - scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; - if (scmd == NULL) - return 0; /* no match */ - - d = scmd->device->hostdata; - if (d == NULL) - return 0; /* no match */ - - it_nexus = cpu_to_le32(d->ioaccel_handle); - scsi_nexus = c2a->scsi_nexus; - find = le32_to_cpu(c2a->scsi_nexus); - - if (h->raid_offload_debug > 0) - dev_info(&h->pdev->dev, - "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", - __func__, scsi_nexus, - d->device_id[0], d->device_id[1], d->device_id[2], - d->device_id[3], d->device_id[4], d->device_id[5], - d->device_id[6], d->device_id[7], d->device_id[8], - d->device_id[9], d->device_id[10], d->device_id[11], - d->device_id[12], d->device_id[13], d->device_id[14], - d->device_id[15]); - - /* Get the list of physical devices */ - physicals = kzalloc(reportsize, GFP_KERNEL); - if (physicals == NULL) - return 0; - if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) { - dev_err(&h->pdev->dev, - "Can't lookup %s device handle: report physical LUNs failed.\n", - "HP SSD Smart Path"); - kfree(physicals); - return 0; - } - nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / - responsesize; - - /* find ioaccel2 handle in list of physicals: */ - for (i = 0; i < nphysicals; i++) { - struct ext_report_lun_entry *entry = &physicals->LUN[i]; - - /* handle is in bytes 28-31 of each lun */ - if (entry->ioaccel_handle != find) - continue; /* didn't match */ - found = 1; - memcpy(scsi3addr, entry->lunid, 8); - if (h->raid_offload_debug > 0) - dev_info(&h->pdev->dev, - "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", - __func__, find, - entry->ioaccel_handle, scsi3addr); - break; /* found it */ - } - - kfree(physicals); - if (found) - return 1; - else - return 0; + spin_lock_irqsave(&h->devlock, flags); + for (i = 0; i < h->ndevices; i++) + if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) { + memcpy(scsi3addr, h->dev[i]->scsi3addr, + sizeof(h->dev[i]->scsi3addr)); + spin_unlock_irqrestore(&h->devlock, flags); + return 1; + } + spin_unlock_irqrestore(&h->devlock, flags); + return 0; } + /* * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, * logdev. The number of luns in physdev and logdev are returned in @@ -3036,6 +3596,8 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, (struct ext_report_lun_entry *) lunaddrbytes; dev->ioaccel_handle = rle->ioaccel_handle; + if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle) + dev->hba_ioaccel_enabled = 1; memset(id_phys, 0, sizeof(*id_phys)); rc = hpsa_bmic_id_physical_device(h, lunaddrbytes, GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys, @@ -3050,6 +3612,7 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, else dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ atomic_set(&dev->ioaccel_cmds_out, 0); + atomic_set(&dev->reset_cmds_out, 0); } static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) @@ -3142,16 +3705,19 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) /* Figure out where the LUN ID info is coming from */ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, i, nphysicals, nlogicals, physdev_list, logdev_list); - /* skip masked physical devices. */ - if (lunaddrbytes[3] & 0xC0 && - i < nphysicals + (raid_ctlr_position == 0)) - continue; + + /* skip masked non-disk devices */ + if (MASKED_DEVICE(lunaddrbytes)) + if (i < nphysicals + (raid_ctlr_position == 0) && + NON_DISK_PHYS_DEV(lunaddrbytes)) + continue; /* Get device type, vendor, model, device id */ if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, &is_OBDR)) continue; /* skip it if we can't talk to it. */ figure_bus_target_lun(h, lunaddrbytes, tmpdevice); + hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); this_device = currentsd[ncurrent]; /* @@ -3170,6 +3736,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) *this_device = *tmpdevice; + /* do not expose masked devices */ + if (MASKED_DEVICE(lunaddrbytes) && + i < nphysicals + (raid_ctlr_position == 0)) { + if (h->hba_mode_enabled) + dev_warn(&h->pdev->dev, + "Masked physical device detected\n"); + this_device->expose_state = HPSA_DO_NOT_EXPOSE; + } else { + this_device->expose_state = + HPSA_SG_ATTACH | HPSA_ULD_ATTACH; + } + switch (this_device->devtype) { case TYPE_ROM: /* We don't *really* support actual CD-ROM devices, @@ -3183,34 +3761,31 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) ncurrent++; break; case TYPE_DISK: - if (h->hba_mode_enabled) { - /* never use raid mapper in HBA mode */ - this_device->offload_enabled = 0; - ncurrent++; - break; - } else if (h->acciopath_status) { - if (i >= nphysicals) { - ncurrent++; - break; - } - } else { - if (i < nphysicals) - break; + if (i >= nphysicals) { ncurrent++; break; } - if (h->transMethod & CFGTBL_Trans_io_accel1 || - h->transMethod & CFGTBL_Trans_io_accel2) { - hpsa_get_ioaccel_drive_info(h, this_device, - lunaddrbytes, id_phys); - atomic_set(&this_device->ioaccel_cmds_out, 0); - ncurrent++; - } + + if (h->hba_mode_enabled) + /* never use raid mapper in HBA mode */ + this_device->offload_enabled = 0; + else if (!(h->transMethod & CFGTBL_Trans_io_accel1 || + h->transMethod & CFGTBL_Trans_io_accel2)) + break; + + hpsa_get_ioaccel_drive_info(h, this_device, + lunaddrbytes, id_phys); + atomic_set(&this_device->ioaccel_cmds_out, 0); + ncurrent++; break; case TYPE_TAPE: case TYPE_MEDIUM_CHANGER: ncurrent++; break; + case TYPE_ENCLOSURE: + if (h->hba_mode_enabled) + ncurrent++; + break; case TYPE_RAID: /* Only present the Smartarray HBA as a RAID controller. * If it's a RAID controller other than the HBA itself @@ -3227,7 +3802,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) if (ncurrent >= HPSA_MAX_DEVICES) break; } - hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent); adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); out: kfree(tmpdevice); @@ -3260,7 +3834,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, struct scsi_cmnd *cmd) { struct scatterlist *sg; - int use_sg, i, sg_index, chained; + int use_sg, i, sg_limit, chained, last_sg; struct SGDescriptor *curr_sg; BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); @@ -3272,22 +3846,39 @@ static int hpsa_scatter_gather(struct ctlr_info *h, if (!use_sg) goto sglist_finished; + /* + * If the number of entries is greater than the max for a single list, + * then we have a chained list; we will set up all but one entry in the + * first list (the last entry is saved for link information); + * otherwise, we don't have a chained list and we'll set up at each of + * the entries in the one list. + */ curr_sg = cp->SG; - chained = 0; - sg_index = 0; - scsi_for_each_sg(cmd, sg, use_sg, i) { - if (i == h->max_cmd_sg_entries - 1 && - use_sg > h->max_cmd_sg_entries) { - chained = 1; - curr_sg = h->cmd_sg_list[cp->cmdindex]; - sg_index = 0; - } + chained = use_sg > h->max_cmd_sg_entries; + sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; + last_sg = scsi_sg_count(cmd) - 1; + scsi_for_each_sg(cmd, sg, sg_limit, i) { hpsa_set_sg_descriptor(curr_sg, sg); curr_sg++; } + if (chained) { + /* + * Continue with the chained list. Set curr_sg to the chained + * list. Modify the limit to the total count less the entries + * we've already set up. Resume the scan at the list entry + * where the previous loop left off. + */ + curr_sg = h->cmd_sg_list[cp->cmdindex]; + sg_limit = use_sg - sg_limit; + for_each_sg(sg, sg, sg_limit, i) { + hpsa_set_sg_descriptor(curr_sg, sg); + curr_sg++; + } + } + /* Back the pointer up to the last entry and mark it as "last". */ - (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); + (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); if (use_sg + chained > h->maxSG) h->maxSG = use_sg + chained; @@ -3530,10 +4121,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, u32 len; u32 total_len = 0; - if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { - atomic_dec(&phys_disk->ioaccel_cmds_out); - return IO_ACCEL_INELIGIBLE; - } + BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); if (fixup_ioaccel_cdb(cdb, &cdb_len)) { atomic_dec(&phys_disk->ioaccel_cmds_out); @@ -3556,8 +4144,19 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, } if (use_sg) { - BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); curr_sg = cp->sg; + if (use_sg > h->ioaccel_maxsg) { + addr64 = le64_to_cpu( + h->ioaccel2_cmd_sg_list[c->cmdindex]->address); + curr_sg->address = cpu_to_le64(addr64); + curr_sg->length = 0; + curr_sg->reserved[0] = 0; + curr_sg->reserved[1] = 0; + curr_sg->reserved[2] = 0; + curr_sg->chain_indicator = 0x80; + + curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; + } scsi_for_each_sg(cmd, sg, use_sg, i) { addr64 = (u64) sg_dma_address(sg); len = sg_dma_len(sg); @@ -3602,14 +4201,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); memcpy(cp->cdb, cdb, sizeof(cp->cdb)); - /* fill in sg elements */ - cp->sg_count = (u8) use_sg; - cp->data_len = cpu_to_le32(total_len); cp->err_ptr = cpu_to_le64(c->busaddr + offsetof(struct io_accel2_cmd, error_data)); cp->err_len = cpu_to_le32(sizeof(cp->error_data)); + /* fill in sg elements */ + if (use_sg > h->ioaccel_maxsg) { + cp->sg_count = 1; + if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + scsi_dma_unmap(cmd); + return -1; + } + } else + cp->sg_count = (u8) use_sg; + enqueue_cmd_and_start_io(h, c); return 0; } @@ -3992,7 +4599,11 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, dev->phys_disk[map_index]); } -/* Submit commands down the "normal" RAID stack path */ +/* + * Submit commands down the "normal" RAID stack path + * All callers to hpsa_ciss_submit must check lockup_detected + * beforehand, before (opt.) and after calling cmd_alloc + */ static int hpsa_ciss_submit(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, unsigned char scsi3addr[]) @@ -4007,7 +4618,6 @@ static int hpsa_ciss_submit(struct ctlr_info *h, /* Fill in the request block... */ c->Request.Timeout = 0; - memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); c->Request.CDBLen = cmd->cmd_len; memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); @@ -4050,7 +4660,7 @@ static int hpsa_ciss_submit(struct ctlr_info *h, } if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ - cmd_free(h, c); + hpsa_cmd_resolve_and_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } enqueue_cmd_and_start_io(h, c); @@ -4058,25 +4668,125 @@ static int hpsa_ciss_submit(struct ctlr_info *h, return 0; } -static void hpsa_command_resubmit_worker(struct work_struct *work) +static void hpsa_cmd_init(struct ctlr_info *h, int index, + struct CommandList *c) { - struct scsi_cmnd *cmd; - struct hpsa_scsi_dev_t *dev; - struct CommandList *c = - container_of(work, struct CommandList, work); + dma_addr_t cmd_dma_handle, err_dma_handle; - cmd = c->scsi_cmd; - dev = cmd->device->hostdata; - if (!dev) { - cmd->result = DID_NO_CONNECT << 16; - cmd->scsi_done(cmd); - return; + /* Zero out all of commandlist except the last field, refcount */ + memset(c, 0, offsetof(struct CommandList, refcount)); + c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); + cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); + c->err_info = h->errinfo_pool + index; + memset(c->err_info, 0, sizeof(*c->err_info)); + err_dma_handle = h->errinfo_pool_dhandle + + index * sizeof(*c->err_info); + c->cmdindex = index; + c->busaddr = (u32) cmd_dma_handle; + c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); + c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); + c->h = h; + c->scsi_cmd = SCSI_CMD_IDLE; +} + +static void hpsa_preinitialize_commands(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + + hpsa_cmd_init(h, i, c); + atomic_set(&c->refcount, 0); + } +} + +static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, + struct CommandList *c) +{ + dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); + + BUG_ON(c->cmdindex != index); + + memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); + memset(c->err_info, 0, sizeof(*c->err_info)); + c->busaddr = (u32) cmd_dma_handle; +} + +static int hpsa_ioaccel_submit(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + unsigned char *scsi3addr) +{ + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + int rc = IO_ACCEL_INELIGIBLE; + + cmd->host_scribble = (unsigned char *) c; + + if (dev->offload_enabled) { + hpsa_cmd_init(h, c->cmdindex, c); + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + rc = hpsa_scsi_ioaccel_raid_map(h, c); + if (rc < 0) /* scsi_dma_map failed. */ + rc = SCSI_MLQUEUE_HOST_BUSY; + } else if (dev->hba_ioaccel_enabled) { + hpsa_cmd_init(h, c->cmdindex, c); + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + rc = hpsa_scsi_ioaccel_direct_map(h, c); + if (rc < 0) /* scsi_dma_map failed. */ + rc = SCSI_MLQUEUE_HOST_BUSY; + } + return rc; +} + +static void hpsa_command_resubmit_worker(struct work_struct *work) +{ + struct scsi_cmnd *cmd; + struct hpsa_scsi_dev_t *dev; + struct CommandList *c = container_of(work, struct CommandList, work); + + cmd = c->scsi_cmd; + dev = cmd->device->hostdata; + if (!dev) { + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(c->h, c, cmd); + } + if (c->reset_pending) + return hpsa_cmd_resolve_and_free(c->h, c); + if (c->abort_pending) + return hpsa_cmd_abort_and_free(c->h, c, cmd); + if (c->cmd_type == CMD_IOACCEL2) { + struct ctlr_info *h = c->h; + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + int rc; + + if (c2->error_data.serv_response == + IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { + rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); + if (rc == 0) + return; + if (rc == SCSI_MLQUEUE_HOST_BUSY) { + /* + * If we get here, it means dma mapping failed. + * Try again via scsi mid layer, which will + * then get SCSI_MLQUEUE_HOST_BUSY. + */ + cmd->result = DID_IMM_RETRY << 16; + return hpsa_cmd_free_and_done(h, c, cmd); + } + /* else, fall thru and resubmit down CISS path */ + } } + hpsa_cmd_partial_init(c->h, c->cmdindex, c); if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { /* * If we get here, it means dma mapping failed. Try * again via scsi mid layer, which will then get * SCSI_MLQUEUE_HOST_BUSY. + * + * hpsa_ciss_submit will have already freed c + * if it encountered a dma mapping failure. */ cmd->result = DID_IMM_RETRY << 16; cmd->scsi_done(cmd); @@ -4094,30 +4804,24 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) /* Get the ptr to our adapter structure out of cmd->host. */ h = sdev_to_hba(cmd->device); + + BUG_ON(cmd->request->tag < 0); + dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; cmd->scsi_done(cmd); return 0; } + memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); if (unlikely(lockup_detected(h))) { - cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); - return 0; - } - c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return SCSI_MLQUEUE_HOST_BUSY; - } - if (unlikely(lockup_detected(h))) { - cmd->result = DID_ERROR << 16; - cmd_free(h, c); + cmd->result = DID_NO_CONNECT << 16; cmd->scsi_done(cmd); return 0; } + c = cmd_tagged_alloc(h, cmd); /* * Call alternate submit routine for I/O accelerated commands. @@ -4126,27 +4830,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) if (likely(cmd->retries == 0 && cmd->request->cmd_type == REQ_TYPE_FS && h->acciopath_status)) { - - cmd->host_scribble = (unsigned char *) c; - c->cmd_type = CMD_SCSI; - c->scsi_cmd = cmd; - - if (dev->offload_enabled) { - rc = hpsa_scsi_ioaccel_raid_map(h, c); - if (rc == 0) - return 0; /* Sent on ioaccel path */ - if (rc < 0) { /* scsi_dma_map failed. */ - cmd_free(h, c); - return SCSI_MLQUEUE_HOST_BUSY; - } - } else if (dev->ioaccel_handle) { - rc = hpsa_scsi_ioaccel_direct_map(h, c); - if (rc == 0) - return 0; /* Sent on direct map path */ - if (rc < 0) { /* scsi_dma_map failed. */ - cmd_free(h, c); - return SCSI_MLQUEUE_HOST_BUSY; - } + rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); + if (rc == 0) + return 0; + if (rc == SCSI_MLQUEUE_HOST_BUSY) { + hpsa_cmd_resolve_and_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; } } return hpsa_ciss_submit(h, c, cmd, scsi3addr); @@ -4228,22 +4917,16 @@ static int hpsa_scan_finished(struct Scsi_Host *sh, return finished; } -static void hpsa_unregister_scsi(struct ctlr_info *h) -{ - /* we are being forcibly unloaded, and may not refuse. */ - scsi_remove_host(h->scsi_host); - scsi_host_put(h->scsi_host); - h->scsi_host = NULL; -} - -static int hpsa_register_scsi(struct ctlr_info *h) +static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; int error; sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); - if (sh == NULL) - goto fail; + if (sh == NULL) { + dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); + return -ENOMEM; + } sh->io_port = 0; sh->n_io_port = 0; @@ -4252,80 +4935,156 @@ static int hpsa_register_scsi(struct ctlr_info *h) sh->max_cmd_len = MAX_COMMAND_SIZE; sh->max_lun = HPSA_MAX_LUN; sh->max_id = HPSA_MAX_LUN; - sh->can_queue = h->nr_cmds - - HPSA_CMDS_RESERVED_FOR_ABORTS - - HPSA_CMDS_RESERVED_FOR_DRIVER - - HPSA_MAX_CONCURRENT_PASSTHRUS; + sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; sh->cmd_per_lun = sh->can_queue; sh->sg_tablesize = h->maxsgentries; - h->scsi_host = sh; sh->hostdata[0] = (unsigned long) h; sh->irq = h->intr[h->intr_mode]; sh->unique_id = sh->irq; - error = scsi_add_host(sh, &h->pdev->dev); - if (error) - goto fail_host_put; - scsi_scan_host(sh); + error = scsi_init_shared_tag_map(sh, sh->can_queue); + if (error) { + dev_err(&h->pdev->dev, + "%s: scsi_init_shared_tag_map failed for controller %d\n", + __func__, h->ctlr); + scsi_host_put(sh); + return error; + } + h->scsi_host = sh; return 0; +} - fail_host_put: - dev_err(&h->pdev->dev, "%s: scsi_add_host" - " failed for controller %d\n", __func__, h->ctlr); - scsi_host_put(sh); - return error; - fail: - dev_err(&h->pdev->dev, "%s: scsi_host_alloc" - " failed for controller %d\n", __func__, h->ctlr); - return -ENOMEM; +static int hpsa_scsi_add_host(struct ctlr_info *h) +{ + int rv; + + rv = scsi_add_host(h->scsi_host, &h->pdev->dev); + if (rv) { + dev_err(&h->pdev->dev, "scsi_add_host failed\n"); + return rv; + } + scsi_scan_host(h->scsi_host); + return 0; } -static int wait_for_device_to_become_ready(struct ctlr_info *h, - unsigned char lunaddr[]) +/* + * The block layer has already gone to the trouble of picking out a unique, + * small-integer tag for this request. We use an offset from that value as + * an index to select our command block. (The offset allows us to reserve the + * low-numbered entries for our own uses.) + */ +static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) +{ + int idx = scmd->request->tag; + + if (idx < 0) + return idx; + + /* Offset to leave space for internal cmds. */ + return idx += HPSA_NRESERVED_CMDS; +} + +/* + * Send a TEST_UNIT_READY command to the specified LUN using the specified + * reply queue; returns zero if the unit is ready, and non-zero otherwise. + */ +static int hpsa_send_test_unit_ready(struct ctlr_info *h, + struct CommandList *c, unsigned char lunaddr[], + int reply_queue) +{ + int rc; + + /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ + (void) fill_cmd(c, TEST_UNIT_READY, h, + NULL, 0, 0, lunaddr, TYPE_CMD); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) + return rc; + /* no unmap needed here because no data xfer. */ + + /* Check if the unit is already ready. */ + if (c->err_info->CommandStatus == CMD_SUCCESS) + return 0; + + /* + * The first command sent after reset will receive "unit attention" to + * indicate that the LUN has been reset...this is actually what we're + * looking for (but, success is good too). + */ + if (c->err_info->CommandStatus == CMD_TARGET_STATUS && + c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && + (c->err_info->SenseInfo[2] == NO_SENSE || + c->err_info->SenseInfo[2] == UNIT_ATTENTION)) + return 0; + + return 1; +} + +/* + * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; + * returns zero when the unit is ready, and non-zero when giving up. + */ +static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, + struct CommandList *c, + unsigned char lunaddr[], int reply_queue) { int rc; int count = 0; int waittime = 1; /* seconds */ - struct CommandList *c; - - c = cmd_alloc(h); - if (!c) { - dev_warn(&h->pdev->dev, "out of memory in " - "wait_for_device_to_become_ready.\n"); - return IO_ERROR; - } /* Send test unit ready until device ready, or give up. */ - while (count < HPSA_TUR_RETRY_LIMIT) { + for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { - /* Wait for a bit. do this first, because if we send + /* + * Wait for a bit. do this first, because if we send * the TUR right away, the reset will just abort it. */ msleep(1000 * waittime); - count++; - rc = 0; /* Device ready. */ + + rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); + if (!rc) + break; /* Increase wait time with each try, up to a point. */ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) - waittime = waittime * 2; + waittime *= 2; - /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ - (void) fill_cmd(c, TEST_UNIT_READY, h, - NULL, 0, 0, lunaddr, TYPE_CMD); - hpsa_scsi_do_simple_cmd_core(h, c); - /* no unmap needed here because no data xfer. */ + dev_warn(&h->pdev->dev, + "waiting %d secs for device to become ready.\n", + waittime); + } - if (c->err_info->CommandStatus == CMD_SUCCESS) - break; + return rc; +} - if (c->err_info->CommandStatus == CMD_TARGET_STATUS && - c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && - (c->err_info->SenseInfo[2] == NO_SENSE || - c->err_info->SenseInfo[2] == UNIT_ATTENTION)) - break; +static int wait_for_device_to_become_ready(struct ctlr_info *h, + unsigned char lunaddr[], + int reply_queue) +{ + int first_queue; + int last_queue; + int rq; + int rc = 0; + struct CommandList *c; + + c = cmd_alloc(h); - dev_warn(&h->pdev->dev, "waiting %d secs " - "for device to become ready.\n", waittime); - rc = 1; /* device not ready. */ + /* + * If no specific reply queue was requested, then send the TUR + * repeatedly, requesting a reply on each reply queue; otherwise execute + * the loop exactly once using only the specified queue. + */ + if (reply_queue == DEFAULT_REPLY_QUEUE) { + first_queue = 0; + last_queue = h->nreply_queues - 1; + } else { + first_queue = reply_queue; + last_queue = reply_queue; + } + + for (rq = first_queue; rq <= last_queue; rq++) { + rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); + if (rc) + break; } if (rc) @@ -4345,6 +5104,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) int rc; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; + char msg[40]; /* find the controller to which the command to be aborted was sent */ h = sdev_to_hba(scsicmd->device); @@ -4356,19 +5116,38 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) dev = scsicmd->device->hostdata; if (!dev) { - dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " - "device lookup failed.\n"); + dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); return FAILED; } - dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", - h->scsi_host->host_no, dev->bus, dev->target, dev->lun); - /* send a reset to the SCSI LUN which the command was sent to */ - rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); - if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) + + /* if controller locked up, we can guarantee command won't complete */ + if (lockup_detected(h)) { + sprintf(msg, "cmd %d RESET FAILED, lockup detected", + hpsa_get_cmd_index(scsicmd)); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + return FAILED; + } + + /* this reset request might be the result of a lockup; check */ + if (detect_controller_lockup(h)) { + sprintf(msg, "cmd %d RESET FAILED, new lockup detected", + hpsa_get_cmd_index(scsicmd)); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + return FAILED; + } + + /* Do not attempt on controller */ + if (is_hba_lunid(dev->scsi3addr)) return SUCCESS; - dev_warn(&h->pdev->dev, "resetting device failed.\n"); - return FAILED; + hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting"); + + /* send a reset to the SCSI LUN which the command was sent to */ + rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN, + DEFAULT_REPLY_QUEUE); + sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed"); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + return rc == 0 ? SUCCESS : FAILED; } static void swizzle_abort_tag(u8 *tag) @@ -4412,7 +5191,7 @@ static void hpsa_get_tag(struct ctlr_info *h, } static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, - struct CommandList *abort, int swizzle) + struct CommandList *abort, int reply_queue) { int rc = IO_OK; struct CommandList *c; @@ -4420,19 +5199,15 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, __le32 tagupper, taglower; c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } /* fill_cmd can't fail here, no buffer to map */ - (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, + (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag, 0, 0, scsi3addr, TYPE_MSG); - if (swizzle) + if (h->needs_abort_tags_swizzled) swizzle_abort_tag(&c->Request.CDB[4]); - hpsa_scsi_do_simple_cmd_core(h, c); + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); hpsa_get_tag(h, abort, &taglower, &tagupper); - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", + dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", __func__, tagupper, taglower); /* no unmap needed here because no data xfer. */ @@ -4440,6 +5215,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, switch (ei->CommandStatus) { case CMD_SUCCESS: break; + case CMD_TMF_STATUS: + rc = hpsa_evaluate_tmf_status(h, c); + break; case CMD_UNABORTABLE: /* Very common, don't make noise. */ rc = -1; break; @@ -4456,6 +5234,48 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, return rc; } +static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h, + struct CommandList *command_to_abort, int reply_queue) +{ + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; + struct io_accel2_cmd *c2a = + &h->ioaccel2_cmd_pool[command_to_abort->cmdindex]; + struct scsi_cmnd *scmd = command_to_abort->scsi_cmd; + struct hpsa_scsi_dev_t *dev = scmd->device->hostdata; + + /* + * We're overlaying struct hpsa_tmf_struct on top of something which + * was allocated as a struct io_accel2_cmd, so we better be sure it + * actually fits, and doesn't overrun the error info space. + */ + BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) > + sizeof(struct io_accel2_cmd)); + BUG_ON(offsetof(struct io_accel2_cmd, error_data) < + offsetof(struct hpsa_tmf_struct, error_len) + + sizeof(ac->error_len)); + + c->cmd_type = IOACCEL2_TMF; + c->scsi_cmd = SCSI_CMD_BUSY; + + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + + (c->cmdindex * sizeof(struct io_accel2_cmd)); + BUG_ON(c->busaddr & 0x0000007F); + + memset(ac, 0, sizeof(*c2)); /* yes this is correct */ + ac->iu_type = IOACCEL2_IU_TMF_TYPE; + ac->reply_queue = reply_queue; + ac->tmf = IOACCEL2_TMF_ABORT; + ac->it_nexus = cpu_to_le32(dev->ioaccel_handle); + memset(ac->lun_id, 0, sizeof(ac->lun_id)); + ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT); + ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag)); + ac->error_ptr = cpu_to_le64(c->busaddr + + offsetof(struct io_accel2_cmd, error_data)); + ac->error_len = cpu_to_le32(sizeof(c2->error_data)); +} + /* ioaccel2 path firmware cannot handle abort task requests. * Change abort requests to physical target reset, and send to the * address of the physical disk used for the ioaccel 2 command. @@ -4464,7 +5284,7 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, */ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, - unsigned char *scsi3addr, struct CommandList *abort) + unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) { int rc = IO_OK; struct scsi_cmnd *scmd; /* scsi command within request being aborted */ @@ -4483,8 +5303,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, if (h->raid_offload_debug > 0) dev_info(&h->pdev->dev, - "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", h->scsi_host->host_no, dev->bus, dev->target, dev->lun, + "Reset as abort", scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); @@ -4506,7 +5327,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", psa[0], psa[1], psa[2], psa[3], psa[4], psa[5], psa[6], psa[7]); - rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); + rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue); if (rc != 0) { dev_warn(&h->pdev->dev, "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", @@ -4516,7 +5337,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, } /* wait for device to recover */ - if (wait_for_device_to_become_ready(h, psa) != 0) { + if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { dev_warn(&h->pdev->dev, "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", psa[0], psa[1], psa[2], psa[3], @@ -4533,25 +5354,94 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, return rc; /* success */ } -/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to - * tell which kind we're dealing with, so we send the abort both ways. There - * shouldn't be any collisions between swizzled and unswizzled tags due to the - * way we construct our tags but we check anyway in case the assumptions which - * make this true someday become false. - */ +static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, + struct CommandList *abort, int reply_queue) +{ + int rc = IO_OK; + struct CommandList *c; + __le32 taglower, tagupper; + struct hpsa_scsi_dev_t *dev; + struct io_accel2_cmd *c2; + + dev = abort->scsi_cmd->device->hostdata; + if (!dev->offload_enabled && !dev->hba_ioaccel_enabled) + return -1; + + c = cmd_alloc(h); + setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); + c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + hpsa_get_tag(h, abort, &taglower, &tagupper); + dev_dbg(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", + __func__, tagupper, taglower); + /* no unmap needed here because no data xfer. */ + + dev_dbg(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n", + __func__, tagupper, taglower, c2->error_data.serv_response); + switch (c2->error_data.serv_response) { + case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: + case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: + rc = 0; + break; + case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: + case IOACCEL2_SERV_RESPONSE_FAILURE: + case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: + rc = -1; + break; + default: + dev_warn(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n", + __func__, tagupper, taglower, + c2->error_data.serv_response); + rc = -1; + } + cmd_free(h, c); + dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, + tagupper, taglower); + return rc; +} + static int hpsa_send_abort_both_ways(struct ctlr_info *h, - unsigned char *scsi3addr, struct CommandList *abort) + unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) { - /* ioccelerator mode 2 commands should be aborted via the + /* + * ioccelerator mode 2 commands should be aborted via the * accelerated path, since RAID path is unaware of these commands, - * but underlying firmware can't handle abort TMF. - * Change abort to physical device reset. + * but not all underlying firmware can handle abort TMF. + * Change abort to physical device reset when abort TMF is unsupported. */ - if (abort->cmd_type == CMD_IOACCEL2) - return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); + if (abort->cmd_type == CMD_IOACCEL2) { + if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) + return hpsa_send_abort_ioaccel2(h, abort, + reply_queue); + else + return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, + abort, reply_queue); + } + return hpsa_send_abort(h, scsi3addr, abort, reply_queue); +} - return hpsa_send_abort(h, scsi3addr, abort, 0) && - hpsa_send_abort(h, scsi3addr, abort, 1); +/* Find out which reply queue a command was meant to return on */ +static int hpsa_extract_reply_queue(struct ctlr_info *h, + struct CommandList *c) +{ + if (c->cmd_type == CMD_IOACCEL2) + return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue; + return c->Header.ReplyQueue; +} + +/* + * Limit concurrency of abort commands to prevent + * over-subscription of commands + */ +static inline int wait_for_available_abort_cmd(struct ctlr_info *h) +{ +#define ABORT_CMD_WAIT_MSECS 5000 + return !wait_event_timeout(h->abort_cmd_wait_queue, + atomic_dec_if_positive(&h->abort_cmds_available) >= 0, + msecs_to_jiffies(ABORT_CMD_WAIT_MSECS)); } /* Send an abort for the specified command. @@ -4561,7 +5451,7 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h, static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) { - int i, rc; + int rc; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; struct CommandList *abort; /* pointer to command to be aborted */ @@ -4569,27 +5459,19 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) char msg[256]; /* For debug messaging. */ int ml = 0; __le32 tagupper, taglower; - int refcount; + int refcount, reply_queue; - /* Find the controller of the command to be aborted */ - h = sdev_to_hba(sc->device); - if (WARN(h == NULL, - "ABORT REQUEST FAILED, Controller lookup failed.\n")) + if (sc == NULL) return FAILED; - if (lockup_detected(h)) + if (sc->device == NULL) return FAILED; - /* Check that controller supports some kind of task abort */ - if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && - !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + /* Find the controller of the command to be aborted */ + h = sdev_to_hba(sc->device); + if (h == NULL) return FAILED; - memset(msg, 0, sizeof(msg)); - ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ", - h->scsi_host->host_no, sc->device->channel, - sc->device->id, sc->device->lun); - /* Find the device of the command to be aborted */ dev = sc->device->hostdata; if (!dev) { @@ -4598,6 +5480,31 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) return FAILED; } + /* If controller locked up, we can guarantee command won't complete */ + if (lockup_detected(h)) { + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "ABORT FAILED, lockup detected"); + return FAILED; + } + + /* This is a good time to check if controller lockup has occurred */ + if (detect_controller_lockup(h)) { + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "ABORT FAILED, new lockup detected"); + return FAILED; + } + + /* Check that controller supports some kind of task abort */ + if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && + !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + return FAILED; + + memset(msg, 0, sizeof(msg)); + ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p", + h->scsi_host->host_no, sc->device->channel, + sc->device->id, sc->device->lun, + "Aborting command", sc); + /* Get SCSI command to be aborted */ abort = (struct CommandList *) sc->host_scribble; if (abort == NULL) { @@ -4609,50 +5516,115 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) cmd_free(h, abort); return SUCCESS; } + + /* Don't bother trying the abort if we know it won't work. */ + if (abort->cmd_type != CMD_IOACCEL2 && + abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) { + cmd_free(h, abort); + return FAILED; + } + + /* + * Check that we're aborting the right command. + * It's possible the CommandList already completed and got re-used. + */ + if (abort->scsi_cmd != sc) { + cmd_free(h, abort); + return SUCCESS; + } + + abort->abort_pending = true; hpsa_get_tag(h, abort, &taglower, &tagupper); + reply_queue = hpsa_extract_reply_queue(h, abort); ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); as = abort->scsi_cmd; if (as != NULL) - ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", - as->cmnd[0], as->serial_number); - dev_dbg(&h->pdev->dev, "%s\n", msg); - dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", - h->scsi_host->host_no, dev->bus, dev->target, dev->lun); + ml += sprintf(msg+ml, + "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ", + as->cmd_len, as->cmnd[0], as->cmnd[1], + as->serial_number); + dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg); + hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command"); + /* * Command is in flight, or possibly already completed * by the firmware (but not to the scsi mid layer) but we can't * distinguish which. Send the abort down. */ - rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); + if (wait_for_available_abort_cmd(h)) { + dev_warn(&h->pdev->dev, + "%s FAILED, timeout waiting for an abort command to become available.\n", + msg); + cmd_free(h, abort); + return FAILED; + } + rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue); + atomic_inc(&h->abort_cmds_available); + wake_up_all(&h->abort_cmd_wait_queue); if (rc != 0) { - dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); - dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", - h->scsi_host->host_no, - dev->bus, dev->target, dev->lun); + dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg); + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "FAILED to abort command"); cmd_free(h, abort); return FAILED; } - dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); + dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg); + wait_event(h->event_sync_wait_queue, + abort->scsi_cmd != sc || lockup_detected(h)); + cmd_free(h, abort); + return !lockup_detected(h) ? SUCCESS : FAILED; +} - /* If the abort(s) above completed and actually aborted the - * command, then the command to be aborted should already be - * completed. If not, wait around a bit more to see if they - * manage to complete normally. - */ -#define ABORT_COMPLETE_WAIT_SECS 30 - for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { - refcount = atomic_read(&abort->refcount); - if (refcount < 2) { - cmd_free(h, abort); - return SUCCESS; - } else { - msleep(100); - } +/* + * For operations with an associated SCSI command, a command block is allocated + * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the + * block request tag as an index into a table of entries. cmd_tagged_free() is + * the complement, although cmd_free() may be called instead. + */ +static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, + struct scsi_cmnd *scmd) +{ + int idx = hpsa_get_cmd_index(scmd); + struct CommandList *c = h->cmd_pool + idx; + + if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { + dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", + idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); + /* The index value comes from the block layer, so if it's out of + * bounds, it's probably not our bug. + */ + BUG(); } - dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", - msg, ABORT_COMPLETE_WAIT_SECS); - cmd_free(h, abort); - return FAILED; + + atomic_inc(&c->refcount); + if (unlikely(!hpsa_is_cmd_idle(c))) { + /* + * We expect that the SCSI layer will hand us a unique tag + * value. Thus, there should never be a collision here between + * two requests...because if the selected command isn't idle + * then someone is going to be very disappointed. + */ + dev_err(&h->pdev->dev, + "tag collision (tag=%d) in cmd_tagged_alloc().\n", + idx); + if (c->scsi_cmd != NULL) + scsi_print_command(c->scsi_cmd); + scsi_print_command(scmd); + } + + hpsa_cmd_partial_init(h, idx, c); + return c; +} + +static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) +{ + /* + * Release our reference to the block. We don't need to do anything + * else to free it, because it is accessed by index. (There's no point + * in checking the result of the decrement, since we cannot guarantee + * that there isn't a concurrent abort which is also accessing it.) + */ + (void)atomic_dec(&c->refcount); } /* @@ -4660,16 +5632,15 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track * which ones are free or in use. Lock must be held when calling this. * cmd_free() is the complement. + * This function never gives up and returns NULL. If it hangs, + * another thread must call cmd_free() to free some tags. */ static struct CommandList *cmd_alloc(struct ctlr_info *h) { struct CommandList *c; - int i; - union u64bit temp64; - dma_addr_t cmd_dma_handle, err_dma_handle; - int refcount; - unsigned long offset; + int refcount, i; + int offset = 0; /* * There is some *extremely* small but non-zero chance that that @@ -4681,12 +5652,20 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) * very unlucky thread might be starved anyway, never able to * beat the other threads. In reality, this happens so * infrequently as to be indistinguishable from never. + * + * Note that we start allocating commands before the SCSI host structure + * is initialized. Since the search starts at bit zero, this + * all works, since we have at least one command structure available; + * however, it means that the structures with the low indexes have to be + * reserved for driver-initiated requests, while requests from the block + * layer will use the higher indexes. */ - offset = h->last_allocation; /* benignly racy */ for (;;) { - i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset); - if (unlikely(i == h->nr_cmds)) { + i = find_next_zero_bit(h->cmd_pool_bits, + HPSA_NRESERVED_CMDS, + offset); + if (unlikely(i >= HPSA_NRESERVED_CMDS)) { offset = 0; continue; } @@ -4694,35 +5673,23 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) refcount = atomic_inc_return(&c->refcount); if (unlikely(refcount > 1)) { cmd_free(h, c); /* already in use */ - offset = (i + 1) % h->nr_cmds; + offset = (i + 1) % HPSA_NRESERVED_CMDS; continue; } set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)); break; /* it's ours now. */ } - h->last_allocation = i; /* benignly racy */ - - /* Zero out all of commandlist except the last field, refcount */ - memset(c, 0, offsetof(struct CommandList, refcount)); - c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT)); - cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c); - c->err_info = h->errinfo_pool + i; - memset(c->err_info, 0, sizeof(*c->err_info)); - err_dma_handle = h->errinfo_pool_dhandle - + i * sizeof(*c->err_info); - - c->cmdindex = i; - - c->busaddr = (u32) cmd_dma_handle; - temp64.val = (u64) err_dma_handle; - c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); - c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); - - c->h = h; + hpsa_cmd_partial_init(h, i, c); return c; } +/* + * This is the complementary operation to cmd_alloc(). Note, however, in some + * corner cases it may also be used to free blocks allocated by + * cmd_tagged_alloc() in which case the ref-count decrement does the trick and + * the clear-bit is harmless. + */ static void cmd_free(struct ctlr_info *h, struct CommandList *c) { if (atomic_dec_and_test(&c->refcount)) { @@ -4900,7 +5867,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) if (iocommand.buf_size > 0) { buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) - return -EFAULT; + return -ENOMEM; if (iocommand.Request.Type.Direction & XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand.buf, @@ -4913,12 +5880,10 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) } } c = cmd_alloc(h); - if (c == NULL) { - rc = -ENOMEM; - goto out_kfree; - } + /* Fill in the command type */ c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ if (iocommand.buf_size > 0) { /* buffer to fill */ @@ -4948,10 +5913,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[0].Len = cpu_to_le32(iocommand.buf_size); c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } - hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (iocommand.buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); + if (rc) { + rc = -EIO; + goto out; + } /* Copy the error information out */ memcpy(&iocommand.error_info, c->err_info, @@ -5048,11 +6017,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) sg_used++; } c = cmd_alloc(h); - if (c == NULL) { - status = -ENOMEM; - goto cleanup1; - } + c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; c->Header.ReplyQueue = 0; c->Header.SGList = (u8) sg_used; c->Header.SGTotal = cpu_to_le16(sg_used); @@ -5078,10 +6045,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) } c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); } - hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); + status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (sg_used) hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); + if (status) { + status = -EIO; + goto cleanup0; + } + /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); if (copy_to_user(argp, ioc, sizeof(*ioc))) { @@ -5163,14 +6135,13 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) } } -static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, +static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, u8 reset_type) { struct CommandList *c; c = cmd_alloc(h); - if (!c) - return -ENOMEM; + /* fill_cmd can't fail here, no data buffer to map */ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, RAID_CTLR_LUNID, TYPE_MSG); @@ -5181,7 +6152,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, * the command either. This is the last command we will send before * re-initializing everything, so it doesn't matter and won't leak. */ - return 0; + return; } static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, @@ -5189,9 +6160,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, int cmd_type) { int pci_dir = XFER_NONE; - struct CommandList *a; /* for commands to be aborted */ + u64 tag; /* for commands to be aborted */ c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; c->Header.ReplyQueue = 0; if (buff != NULL && size > 0) { c->Header.SGList = 1; @@ -5305,10 +6277,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.CDB[7] = 0x00; break; case HPSA_ABORT_MSG: - a = buff; /* point to command to be aborted */ + memcpy(&tag, buff, sizeof(tag)); dev_dbg(&h->pdev->dev, - "Abort Tag:0x%016llx request Tag:0x%016llx", - a->Header.tag, c->Header.tag); + "Abort Tag:0x%016llx using rqst Tag:0x%016llx", + tag, c->Header.tag); c->Request.CDBLen = 16; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, @@ -5319,8 +6291,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.CDB[2] = 0x00; /* reserved */ c->Request.CDB[3] = 0x00; /* reserved */ /* Tag to abort goes in CDB[4]-CDB[11] */ - memcpy(&c->Request.CDB[4], &a->Header.tag, - sizeof(a->Header.tag)); + memcpy(&c->Request.CDB[4], &tag, sizeof(tag)); c->Request.CDB[12] = 0x00; /* reserved */ c->Request.CDB[13] = 0x00; /* reserved */ c->Request.CDB[14] = 0x00; /* reserved */ @@ -5399,7 +6370,7 @@ static inline void finish_cmd(struct CommandList *c) if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI || c->cmd_type == CMD_IOACCEL2)) complete_scsi_command(c); - else if (c->cmd_type == CMD_IOCTL_PEND) + else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) complete(c->waiting); } @@ -5733,7 +6704,7 @@ static int controller_reset_failed(struct CfgTable __iomem *cfgtable) /* This does a hard reset of the controller using PCI power management * states or the using the doorbell register. */ -static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) +static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) { u64 cfg_offset; u32 cfg_base_addr; @@ -5744,7 +6715,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) int rc; struct CfgTable __iomem *cfgtable; u32 use_doorbell; - u32 board_id; u16 command_register; /* For controllers as old as the P600, this is very nearly @@ -5760,11 +6730,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) * using the doorbell register. */ - rc = hpsa_lookup_board_id(pdev, &board_id); - if (rc < 0) { - dev_warn(&pdev->dev, "Board ID not found\n"); - return rc; - } if (!ctlr_is_resettable(board_id)) { dev_warn(&pdev->dev, "Controller not resettable\n"); return -ENODEV; @@ -5930,10 +6895,22 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) return -1; } +static void hpsa_disable_interrupt_mode(struct ctlr_info *h) +{ + if (h->msix_vector) { + if (h->pdev->msix_enabled) + pci_disable_msix(h->pdev); + h->msix_vector = 0; + } else if (h->msi_vector) { + if (h->pdev->msi_enabled) + pci_disable_msi(h->pdev); + h->msi_vector = 0; + } +} + /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use legacy INTx mode. */ - static void hpsa_interrupt_mode(struct ctlr_info *h) { #ifdef CONFIG_PCI_MSI @@ -6064,6 +7041,21 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, return 0; } +static void hpsa_free_cfgtables(struct ctlr_info *h) +{ + if (h->transtable) { + iounmap(h->transtable); + h->transtable = NULL; + } + if (h->cfgtable) { + iounmap(h->cfgtable); + h->cfgtable = NULL; + } +} + +/* Find and map CISS config table and transfer table ++ * several items must be unmapped (freed) later ++ * */ static int hpsa_find_cfgtables(struct ctlr_info *h) { u64 cfg_offset; @@ -6090,25 +7082,31 @@ static int hpsa_find_cfgtables(struct ctlr_info *h) h->transtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index)+cfg_offset+trans_offset, sizeof(*h->transtable)); - if (!h->transtable) + if (!h->transtable) { + dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); + hpsa_free_cfgtables(h); return -ENOMEM; + } return 0; } static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) { - h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); +#define MIN_MAX_COMMANDS 16 + BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); + + h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); /* Limit commands in memory limited kdump scenario. */ if (reset_devices && h->max_commands > 32) h->max_commands = 32; - if (h->max_commands < 16) { - dev_warn(&h->pdev->dev, "Controller reports " - "max supported commands of %d, an obvious lie. " - "Using 16. Ensure that firmware is up to date.\n", - h->max_commands); - h->max_commands = 16; + if (h->max_commands < MIN_MAX_COMMANDS) { + dev_warn(&h->pdev->dev, + "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n", + h->max_commands, + MIN_MAX_COMMANDS); + h->max_commands = MIN_MAX_COMMANDS; } } @@ -6153,6 +7151,8 @@ static void hpsa_find_board_params(struct ctlr_info *h) dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); + if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); } static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) @@ -6222,6 +7222,8 @@ static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) * as we enter this code.) */ for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { + if (h->remove_in_progress) + goto done; spin_lock_irqsave(&h->lock, flags); doorbell_value = readl(h->vaddr + SA5_DOORBELL); spin_unlock_irqrestore(&h->lock, flags); @@ -6262,6 +7264,22 @@ error: return -ENODEV; } +/* free items allocated or mapped by hpsa_pci_init */ +static void hpsa_free_pci_init(struct ctlr_info *h) +{ + hpsa_free_cfgtables(h); /* pci_init 4 */ + iounmap(h->vaddr); /* pci_init 3 */ + h->vaddr = NULL; + hpsa_disable_interrupt_mode(h); /* pci_init 2 */ + /* + * call pci_disable_device before pci_release_regions per + * Documentation/PCI/pci.txt + */ + pci_disable_device(h->pdev); /* pci_init 1 */ + pci_release_regions(h->pdev); /* pci_init 2 */ +} + +/* several items must be freed later */ static int hpsa_pci_init(struct ctlr_info *h) { int prod_index, err; @@ -6272,19 +7290,24 @@ static int hpsa_pci_init(struct ctlr_info *h) h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); + h->needs_abort_tags_swizzled = + ctlr_needs_abort_tags_swizzled(h->board_id); + pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); err = pci_enable_device(h->pdev); if (err) { - dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); + dev_err(&h->pdev->dev, "failed to enable PCI device\n"); + pci_disable_device(h->pdev); return err; } err = pci_request_regions(h->pdev, HPSA); if (err) { dev_err(&h->pdev->dev, - "cannot obtain PCI resources, aborting\n"); + "failed to obtain PCI resources\n"); + pci_disable_device(h->pdev); return err; } @@ -6293,38 +7316,43 @@ static int hpsa_pci_init(struct ctlr_info *h) hpsa_interrupt_mode(h); err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) - goto err_out_free_res; + goto clean2; /* intmode+region, pci */ h->vaddr = remap_pci_mem(h->paddr, 0x250); if (!h->vaddr) { + dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); err = -ENOMEM; - goto err_out_free_res; + goto clean2; /* intmode+region, pci */ } err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) - goto err_out_free_res; + goto clean3; /* vaddr, intmode+region, pci */ err = hpsa_find_cfgtables(h); if (err) - goto err_out_free_res; + goto clean3; /* vaddr, intmode+region, pci */ hpsa_find_board_params(h); if (!hpsa_CISS_signature_present(h)) { err = -ENODEV; - goto err_out_free_res; + goto clean4; /* cfgtables, vaddr, intmode+region, pci */ } hpsa_set_driver_support_bits(h); hpsa_p600_dma_prefetch_quirk(h); err = hpsa_enter_simple_mode(h); if (err) - goto err_out_free_res; + goto clean4; /* cfgtables, vaddr, intmode+region, pci */ return 0; -err_out_free_res: - if (h->transtable) - iounmap(h->transtable); - if (h->cfgtable) - iounmap(h->cfgtable); - if (h->vaddr) - iounmap(h->vaddr); +clean4: /* cfgtables, vaddr, intmode+region, pci */ + hpsa_free_cfgtables(h); +clean3: /* vaddr, intmode+region, pci */ + iounmap(h->vaddr); + h->vaddr = NULL; +clean2: /* intmode+region, pci */ + hpsa_disable_interrupt_mode(h); + /* + * call pci_disable_device before pci_release_regions per + * Documentation/PCI/pci.txt + */ pci_disable_device(h->pdev); pci_release_regions(h->pdev); return err; @@ -6346,7 +7374,7 @@ static void hpsa_hba_inquiry(struct ctlr_info *h) } } -static int hpsa_init_reset_devices(struct pci_dev *pdev) +static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) { int rc, i; void __iomem *vaddr; @@ -6382,7 +7410,7 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) iounmap(vaddr); /* Reset the controller with a PCI power-cycle or via doorbell */ - rc = hpsa_kdump_hard_reset_controller(pdev); + rc = hpsa_kdump_hard_reset_controller(pdev, board_id); /* -ENOTSUPP here means we cannot reset the controller * but it's already (and still) up and running in @@ -6408,7 +7436,29 @@ out_disable: return rc; } -static int hpsa_allocate_cmd_pool(struct ctlr_info *h) +static void hpsa_free_cmd_pool(struct ctlr_info *h) +{ + kfree(h->cmd_pool_bits); + h->cmd_pool_bits = NULL; + if (h->cmd_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct CommandList), + h->cmd_pool, + h->cmd_pool_dhandle); + h->cmd_pool = NULL; + h->cmd_pool_dhandle = 0; + } + if (h->errinfo_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct ErrorInfo), + h->errinfo_pool, + h->errinfo_pool_dhandle); + h->errinfo_pool = NULL; + h->errinfo_pool_dhandle = 0; + } +} + +static int hpsa_alloc_cmd_pool(struct ctlr_info *h) { h->cmd_pool_bits = kzalloc( DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * @@ -6425,34 +7475,13 @@ static int hpsa_allocate_cmd_pool(struct ctlr_info *h) dev_err(&h->pdev->dev, "out of memory in %s", __func__); goto clean_up; } + hpsa_preinitialize_commands(h); return 0; clean_up: hpsa_free_cmd_pool(h); return -ENOMEM; } -static void hpsa_free_cmd_pool(struct ctlr_info *h) -{ - kfree(h->cmd_pool_bits); - if (h->cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct CommandList), - h->cmd_pool, h->cmd_pool_dhandle); - if (h->ioaccel2_cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), - h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); - if (h->errinfo_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct ErrorInfo), - h->errinfo_pool, - h->errinfo_pool_dhandle); - if (h->ioaccel_cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct io_accel1_cmd), - h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); -} - static void hpsa_irq_affinity_hints(struct ctlr_info *h) { int i, cpu; @@ -6474,12 +7503,14 @@ static void hpsa_free_irqs(struct ctlr_info *h) i = h->intr_mode; irq_set_affinity_hint(h->intr[i], NULL); free_irq(h->intr[i], &h->q[i]); + h->q[i] = 0; return; } for (i = 0; i < h->msix_vector; i++) { irq_set_affinity_hint(h->intr[i], NULL); free_irq(h->intr[i], &h->q[i]); + h->q[i] = 0; } for (; i < MAX_REPLY_QUEUES; i++) h->q[i] = 0; @@ -6502,8 +7533,9 @@ static int hpsa_request_irqs(struct ctlr_info *h, if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { /* If performant mode and MSI-X, use multiple reply queues */ for (i = 0; i < h->msix_vector; i++) { + sprintf(h->intrname[i], "%s-msix%d", h->devname, i); rc = request_irq(h->intr[i], msixhandler, - 0, h->devname, + 0, h->intrname[i], &h->q[i]); if (rc) { int j; @@ -6524,18 +7556,30 @@ static int hpsa_request_irqs(struct ctlr_info *h, } else { /* Use single reply pool */ if (h->msix_vector > 0 || h->msi_vector) { + if (h->msix_vector) + sprintf(h->intrname[h->intr_mode], + "%s-msix", h->devname); + else + sprintf(h->intrname[h->intr_mode], + "%s-msi", h->devname); rc = request_irq(h->intr[h->intr_mode], - msixhandler, 0, h->devname, + msixhandler, 0, + h->intrname[h->intr_mode], &h->q[h->intr_mode]); } else { + sprintf(h->intrname[h->intr_mode], + "%s-intx", h->devname); rc = request_irq(h->intr[h->intr_mode], - intxhandler, IRQF_SHARED, h->devname, + intxhandler, IRQF_SHARED, + h->intrname[h->intr_mode], &h->q[h->intr_mode]); } + irq_set_affinity_hint(h->intr[h->intr_mode], NULL); } if (rc) { - dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", + dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", h->intr[h->intr_mode], h->devname); + hpsa_free_irqs(h); return -ENODEV; } return 0; @@ -6543,42 +7587,27 @@ static int hpsa_request_irqs(struct ctlr_info *h, static int hpsa_kdump_soft_reset(struct ctlr_info *h) { - if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, - HPSA_RESET_TYPE_CONTROLLER)) { - dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); - return -EIO; - } + int rc; + hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); - if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { + rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); + if (rc) { dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); - return -1; + return rc; } dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); - if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { + rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); + if (rc) { dev_warn(&h->pdev->dev, "Board failed to become ready " "after soft reset.\n"); - return -1; + return rc; } return 0; } -static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) -{ - hpsa_free_irqs(h); -#ifdef CONFIG_PCI_MSI - if (h->msix_vector) { - if (h->pdev->msix_enabled) - pci_disable_msix(h->pdev); - } else if (h->msi_vector) { - if (h->pdev->msi_enabled) - pci_disable_msi(h->pdev); - } -#endif /* CONFIG_PCI_MSI */ -} - static void hpsa_free_reply_queues(struct ctlr_info *h) { int i; @@ -6586,30 +7615,36 @@ static void hpsa_free_reply_queues(struct ctlr_info *h) for (i = 0; i < h->nreply_queues; i++) { if (!h->reply_queue[i].head) continue; - pci_free_consistent(h->pdev, h->reply_queue_size, - h->reply_queue[i].head, h->reply_queue[i].busaddr); + pci_free_consistent(h->pdev, + h->reply_queue_size, + h->reply_queue[i].head, + h->reply_queue[i].busaddr); h->reply_queue[i].head = NULL; h->reply_queue[i].busaddr = 0; } + h->reply_queue_size = 0; } static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) { - hpsa_free_irqs_and_disable_msix(h); - hpsa_free_sg_chain_blocks(h); - hpsa_free_cmd_pool(h); - kfree(h->ioaccel1_blockFetchTable); - kfree(h->blockFetchTable); - hpsa_free_reply_queues(h); - if (h->vaddr) - iounmap(h->vaddr); - if (h->transtable) - iounmap(h->transtable); - if (h->cfgtable) - iounmap(h->cfgtable); - pci_disable_device(h->pdev); - pci_release_regions(h->pdev); - kfree(h); + hpsa_free_performant_mode(h); /* init_one 7 */ + hpsa_free_sg_chain_blocks(h); /* init_one 6 */ + hpsa_free_cmd_pool(h); /* init_one 5 */ + hpsa_free_irqs(h); /* init_one 4 */ + scsi_host_put(h->scsi_host); /* init_one 3 */ + h->scsi_host = NULL; /* init_one 3 */ + hpsa_free_pci_init(h); /* init_one 2_5 */ + free_percpu(h->lockup_detected); /* init_one 2 */ + h->lockup_detected = NULL; /* init_one 2 */ + if (h->resubmit_wq) { + destroy_workqueue(h->resubmit_wq); /* init_one 1 */ + h->resubmit_wq = NULL; + } + if (h->rescan_ctlr_wq) { + destroy_workqueue(h->rescan_ctlr_wq); + h->rescan_ctlr_wq = NULL; + } + kfree(h); /* init_one 1 */ } /* Called when controller lockup detected. */ @@ -6617,17 +7652,22 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h) { int i, refcount; struct CommandList *c; + int failcount = 0; flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ for (i = 0; i < h->nr_cmds; i++) { c = h->cmd_pool + i; refcount = atomic_inc_return(&c->refcount); if (refcount > 1) { - c->err_info->CommandStatus = CMD_HARDWARE_ERR; + c->err_info->CommandStatus = CMD_CTLR_LOCKUP; finish_cmd(c); + atomic_dec(&h->commands_outstanding); + failcount++; } cmd_free(h, c); } + dev_warn(&h->pdev->dev, + "failed %d commands in fail_all\n", failcount); } static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) @@ -6653,18 +7693,19 @@ static void controller_lockup_detected(struct ctlr_info *h) if (!lockup_detected) { /* no heartbeat, but controller gave us a zero. */ dev_warn(&h->pdev->dev, - "lockup detected but scratchpad register is zero\n"); + "lockup detected after %d but scratchpad register is zero\n", + h->heartbeat_sample_interval / HZ); lockup_detected = 0xffffffff; } set_lockup_detected_for_all_cpus(h, lockup_detected); spin_unlock_irqrestore(&h->lock, flags); - dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", - lockup_detected); + dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", + lockup_detected, h->heartbeat_sample_interval / HZ); pci_disable_device(h->pdev); fail_all_outstanding_cmds(h); } -static void detect_controller_lockup(struct ctlr_info *h) +static int detect_controller_lockup(struct ctlr_info *h) { u64 now; u32 heartbeat; @@ -6674,7 +7715,7 @@ static void detect_controller_lockup(struct ctlr_info *h) /* If we've received an interrupt recently, we're ok. */ if (time_after64(h->last_intr_timestamp + (h->heartbeat_sample_interval), now)) - return; + return false; /* * If we've already checked the heartbeat recently, we're ok. @@ -6683,7 +7724,7 @@ static void detect_controller_lockup(struct ctlr_info *h) */ if (time_after64(h->last_heartbeat_timestamp + (h->heartbeat_sample_interval), now)) - return; + return false; /* If heartbeat has not changed since we last looked, we're not ok. */ spin_lock_irqsave(&h->lock, flags); @@ -6691,12 +7732,13 @@ static void detect_controller_lockup(struct ctlr_info *h) spin_unlock_irqrestore(&h->lock, flags); if (h->last_heartbeat == heartbeat) { controller_lockup_detected(h); - return; + return true; } /* We're ok. */ h->last_heartbeat = heartbeat; h->last_heartbeat_timestamp = now; + return false; } static void hpsa_ack_ctlr_events(struct ctlr_info *h) @@ -6843,11 +7885,18 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct ctlr_info *h; int try_soft_reset = 0; unsigned long flags; + u32 board_id; if (number_of_controllers == 0) printk(KERN_INFO DRIVER_NAME "\n"); - rc = hpsa_init_reset_devices(pdev); + rc = hpsa_lookup_board_id(pdev, &board_id); + if (rc < 0) { + dev_warn(&pdev->dev, "Board ID not found\n"); + return rc; + } + + rc = hpsa_init_reset_devices(pdev, board_id); if (rc) { if (rc != -ENOTSUPP) return rc; @@ -6868,42 +7917,41 @@ reinit_after_soft_reset: */ BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); h = kzalloc(sizeof(*h), GFP_KERNEL); - if (!h) + if (!h) { + dev_err(&pdev->dev, "Failed to allocate controller head\n"); return -ENOMEM; + } h->pdev = pdev; + h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; INIT_LIST_HEAD(&h->offline_device_list); spin_lock_init(&h->lock); spin_lock_init(&h->offline_device_lock); spin_lock_init(&h->scan_lock); atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); - - h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); - if (!h->rescan_ctlr_wq) { - rc = -ENOMEM; - goto clean1; - } - - h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); - if (!h->resubmit_wq) { - rc = -ENOMEM; - goto clean1; - } + atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); /* Allocate and clear per-cpu variable lockup_detected */ h->lockup_detected = alloc_percpu(u32); if (!h->lockup_detected) { + dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); rc = -ENOMEM; - goto clean1; + goto clean1; /* aer/h */ } set_lockup_detected_for_all_cpus(h, 0); rc = hpsa_pci_init(h); - if (rc != 0) - goto clean1; + if (rc) + goto clean2; /* lu, aer/h */ + + /* relies on h-> settings made by hpsa_pci_init, including + * interrupt_mode h->intr */ + rc = hpsa_scsi_host_alloc(h); + if (rc) + goto clean2_5; /* pci, lu, aer/h */ - sprintf(h->devname, HPSA "%d", number_of_controllers); + sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); h->ctlr = number_of_controllers; number_of_controllers++; @@ -6917,34 +7965,57 @@ reinit_after_soft_reset: dac = 0; } else { dev_err(&pdev->dev, "no suitable DMA available\n"); - goto clean1; + goto clean3; /* shost, pci, lu, aer/h */ } } /* make sure the board interrupts are off */ h->access.set_intr_mask(h, HPSA_INTR_OFF); - if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) - goto clean2; - dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", - h->devname, pdev->device, - h->intr[h->intr_mode], dac ? "" : " not"); - rc = hpsa_allocate_cmd_pool(h); + rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); if (rc) - goto clean2_and_free_irqs; - if (hpsa_allocate_sg_chain_blocks(h)) - goto clean4; + goto clean3; /* shost, pci, lu, aer/h */ + rc = hpsa_alloc_cmd_pool(h); + if (rc) + goto clean4; /* irq, shost, pci, lu, aer/h */ + rc = hpsa_alloc_sg_chain_blocks(h); + if (rc) + goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ init_waitqueue_head(&h->scan_wait_queue); + init_waitqueue_head(&h->abort_cmd_wait_queue); + init_waitqueue_head(&h->event_sync_wait_queue); + mutex_init(&h->reset_mutex); h->scan_finished = 1; /* no scan currently in progress */ pci_set_drvdata(pdev, h); h->ndevices = 0; h->hba_mode_enabled = 0; - h->scsi_host = NULL; + spin_lock_init(&h->devlock); - hpsa_put_ctlr_into_performant_mode(h); + rc = hpsa_put_ctlr_into_performant_mode(h); + if (rc) + goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ + + /* hook into SCSI subsystem */ + rc = hpsa_scsi_add_host(h); + if (rc) + goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + + /* create the resubmit workqueue */ + h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); + if (!h->rescan_ctlr_wq) { + rc = -ENOMEM; + goto clean7; + } + + h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); + if (!h->resubmit_wq) { + rc = -ENOMEM; + goto clean7; /* aer/h */ + } - /* At this point, the controller is ready to take commands. + /* + * At this point, the controller is ready to take commands. * Now, if reset_devices and the hard reset didn't work, try * the soft reset and see if that works. */ @@ -6966,13 +8037,24 @@ reinit_after_soft_reset: if (rc) { dev_warn(&h->pdev->dev, "Failed to request_irq after soft reset.\n"); - goto clean4; + /* + * cannot goto clean7 or free_irqs will be called + * again. Instead, do its work + */ + hpsa_free_performant_mode(h); /* clean7 */ + hpsa_free_sg_chain_blocks(h); /* clean6 */ + hpsa_free_cmd_pool(h); /* clean5 */ + /* + * skip hpsa_free_irqs(h) clean4 since that + * was just called before request_irqs failed + */ + goto clean3; } rc = hpsa_kdump_soft_reset(h); if (rc) /* Neither hard nor soft reset worked, we're hosed. */ - goto clean4; + goto clean9; dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, @@ -6993,21 +8075,20 @@ reinit_after_soft_reset: hpsa_undo_allocations_after_kdump_soft_reset(h); try_soft_reset = 0; if (rc) - /* don't go to clean4, we already unallocated */ + /* don't goto clean, we already unallocated */ return -ENODEV; goto reinit_after_soft_reset; } - /* Enable Accelerated IO path at driver layer */ - h->acciopath_status = 1; + /* Enable Accelerated IO path at driver layer */ + h->acciopath_status = 1; /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, HPSA_INTR_ON); hpsa_hba_inquiry(h); - hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ /* Monitor the controller for firmware lockups */ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; @@ -7019,19 +8100,36 @@ reinit_after_soft_reset: h->heartbeat_sample_interval); return 0; -clean4: +clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */ + kfree(h->hba_inquiry_data); +clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + hpsa_free_performant_mode(h); + h->access.set_intr_mask(h, HPSA_INTR_OFF); +clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ hpsa_free_sg_chain_blocks(h); +clean5: /* cmd, irq, shost, pci, lu, aer/h */ hpsa_free_cmd_pool(h); -clean2_and_free_irqs: +clean4: /* irq, shost, pci, lu, aer/h */ hpsa_free_irqs(h); -clean2: -clean1: - if (h->resubmit_wq) +clean3: /* shost, pci, lu, aer/h */ + scsi_host_put(h->scsi_host); + h->scsi_host = NULL; +clean2_5: /* pci, lu, aer/h */ + hpsa_free_pci_init(h); +clean2: /* lu, aer/h */ + if (h->lockup_detected) { + free_percpu(h->lockup_detected); + h->lockup_detected = NULL; + } +clean1: /* wq/aer/h */ + if (h->resubmit_wq) { destroy_workqueue(h->resubmit_wq); - if (h->rescan_ctlr_wq) + h->resubmit_wq = NULL; + } + if (h->rescan_ctlr_wq) { destroy_workqueue(h->rescan_ctlr_wq); - if (h->lockup_detected) - free_percpu(h->lockup_detected); + h->rescan_ctlr_wq = NULL; + } kfree(h); return rc; } @@ -7040,8 +8138,8 @@ static void hpsa_flush_cache(struct ctlr_info *h) { char *flush_buf; struct CommandList *c; + int rc; - /* Don't bother trying to flush the cache if locked up */ if (unlikely(lockup_detected(h))) return; flush_buf = kzalloc(4, GFP_KERNEL); @@ -7049,21 +8147,20 @@ static void hpsa_flush_cache(struct ctlr_info *h) return; c = cmd_alloc(h); - if (!c) { - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - goto out_of_memory; - } + if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, RAID_CTLR_LUNID, TYPE_CMD)) { goto out; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_TODEVICE, NO_TIMEOUT); + if (rc) + goto out; if (c->err_info->CommandStatus != 0) out: dev_warn(&h->pdev->dev, "error flushing cache on controller\n"); cmd_free(h, c); -out_of_memory: kfree(flush_buf); } @@ -7078,15 +8175,18 @@ static void hpsa_shutdown(struct pci_dev *pdev) */ hpsa_flush_cache(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); - hpsa_free_irqs_and_disable_msix(h); + hpsa_free_irqs(h); /* init_one 4 */ + hpsa_disable_interrupt_mode(h); /* pci_init 2 */ } static void hpsa_free_device_info(struct ctlr_info *h) { int i; - for (i = 0; i < h->ndevices; i++) + for (i = 0; i < h->ndevices; i++) { kfree(h->dev[i]); + h->dev[i] = NULL; + } } static void hpsa_remove_one(struct pci_dev *pdev) @@ -7108,29 +8208,34 @@ static void hpsa_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&h->rescan_ctlr_work); destroy_workqueue(h->rescan_ctlr_wq); destroy_workqueue(h->resubmit_wq); - hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ + + /* includes hpsa_free_irqs - init_one 4 */ + /* includes hpsa_disable_interrupt_mode - pci_init 2 */ hpsa_shutdown(pdev); - iounmap(h->vaddr); - iounmap(h->transtable); - iounmap(h->cfgtable); - hpsa_free_device_info(h); - hpsa_free_sg_chain_blocks(h); - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct CommandList), - h->cmd_pool, h->cmd_pool_dhandle); - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct ErrorInfo), - h->errinfo_pool, h->errinfo_pool_dhandle); - hpsa_free_reply_queues(h); - kfree(h->cmd_pool_bits); - kfree(h->blockFetchTable); - kfree(h->ioaccel1_blockFetchTable); - kfree(h->ioaccel2_blockFetchTable); - kfree(h->hba_inquiry_data); - pci_disable_device(pdev); - pci_release_regions(pdev); - free_percpu(h->lockup_detected); - kfree(h); + + hpsa_free_device_info(h); /* scan */ + + kfree(h->hba_inquiry_data); /* init_one 10 */ + h->hba_inquiry_data = NULL; /* init_one 10 */ + if (h->scsi_host) + scsi_remove_host(h->scsi_host); /* init_one 8 */ + hpsa_free_ioaccel2_sg_chain_blocks(h); + hpsa_free_performant_mode(h); /* init_one 7 */ + hpsa_free_sg_chain_blocks(h); /* init_one 6 */ + hpsa_free_cmd_pool(h); /* init_one 5 */ + + /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ + + scsi_host_put(h->scsi_host); /* init_one 3 */ + h->scsi_host = NULL; /* init_one 3 */ + + /* includes hpsa_disable_interrupt_mode - pci_init 2 */ + hpsa_free_pci_init(h); /* init_one 2.5 */ + + free_percpu(h->lockup_detected); /* init_one 2 */ + h->lockup_detected = NULL; /* init_one 2 */ + /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ + kfree(h); /* init_one 1 */ } static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, @@ -7188,7 +8293,10 @@ static void calc_bucket_map(int bucket[], int num_buckets, } } -/* return -ENODEV or other reason on error, 0 on success */ +/* + * return -ENODEV on err, 0 on success (or no action) + * allocates numerous items that must be freed later + */ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) { int i; @@ -7370,7 +8478,23 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) return 0; } -static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) +/* Free ioaccel1 mode command blocks and block fetch table */ +static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) +{ + if (h->ioaccel_cmd_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + h->ioaccel_cmd_pool, + h->ioaccel_cmd_pool_dhandle); + h->ioaccel_cmd_pool = NULL; + h->ioaccel_cmd_pool_dhandle = 0; + } + kfree(h->ioaccel1_blockFetchTable); + h->ioaccel1_blockFetchTable = NULL; +} + +/* Allocate ioaccel1 mode command blocks and block fetch table */ +static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) { h->ioaccel_maxsg = readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); @@ -7401,16 +8525,32 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) return 0; clean_up: - if (h->ioaccel_cmd_pool) + hpsa_free_ioaccel1_cmd_and_bft(h); + return -ENOMEM; +} + +/* Free ioaccel2 mode command blocks and block fetch table */ +static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) +{ + hpsa_free_ioaccel2_sg_chain_blocks(h); + + if (h->ioaccel2_cmd_pool) { pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), - h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); - kfree(h->ioaccel1_blockFetchTable); - return 1; + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + h->ioaccel2_cmd_pool, + h->ioaccel2_cmd_pool_dhandle); + h->ioaccel2_cmd_pool = NULL; + h->ioaccel2_cmd_pool_dhandle = 0; + } + kfree(h->ioaccel2_blockFetchTable); + h->ioaccel2_blockFetchTable = NULL; } -static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) +/* Allocate ioaccel2 mode command blocks and block fetch table */ +static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) { + int rc; + /* Allocate ioaccel2 mode command blocks and block fetch table */ h->ioaccel_maxsg = @@ -7430,7 +8570,13 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) sizeof(u32)), GFP_KERNEL); if ((h->ioaccel2_cmd_pool == NULL) || - (h->ioaccel2_blockFetchTable == NULL)) + (h->ioaccel2_blockFetchTable == NULL)) { + rc = -ENOMEM; + goto clean_up; + } + + rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); + if (rc) goto clean_up; memset(h->ioaccel2_cmd_pool, 0, @@ -7438,41 +8584,50 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) return 0; clean_up: - if (h->ioaccel2_cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), - h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); - kfree(h->ioaccel2_blockFetchTable); - return 1; + hpsa_free_ioaccel2_cmd_and_bft(h); + return rc; +} + +/* Free items allocated by hpsa_put_ctlr_into_performant_mode */ +static void hpsa_free_performant_mode(struct ctlr_info *h) +{ + kfree(h->blockFetchTable); + h->blockFetchTable = NULL; + hpsa_free_reply_queues(h); + hpsa_free_ioaccel1_cmd_and_bft(h); + hpsa_free_ioaccel2_cmd_and_bft(h); } -static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) +/* return -ENODEV on error, 0 on success (or no action) + * allocates numerous items that must be freed later + */ +static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; unsigned long transMethod = CFGTBL_Trans_Performant | CFGTBL_Trans_use_short_tags; - int i; + int i, rc; if (hpsa_simple_mode) - return; + return 0; trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) - return; + return 0; /* Check for I/O accelerator mode support */ if (trans_support & CFGTBL_Trans_io_accel1) { transMethod |= CFGTBL_Trans_io_accel1 | CFGTBL_Trans_enable_directed_msix; - if (hpsa_alloc_ioaccel_cmd_and_bft(h)) - goto clean_up; - } else { - if (trans_support & CFGTBL_Trans_io_accel2) { - transMethod |= CFGTBL_Trans_io_accel2 | + rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); + if (rc) + return rc; + } else if (trans_support & CFGTBL_Trans_io_accel2) { + transMethod |= CFGTBL_Trans_io_accel2 | CFGTBL_Trans_enable_directed_msix; - if (ioaccel2_alloc_cmds_and_bft(h)) - goto clean_up; - } + rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); + if (rc) + return rc; } h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; @@ -7484,8 +8639,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) h->reply_queue[i].head = pci_alloc_consistent(h->pdev, h->reply_queue_size, &(h->reply_queue[i].busaddr)); - if (!h->reply_queue[i].head) - goto clean_up; + if (!h->reply_queue[i].head) { + rc = -ENOMEM; + goto clean1; /* rq, ioaccel */ + } h->reply_queue[i].size = h->max_commands; h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ h->reply_queue[i].current_entry = 0; @@ -7494,15 +8651,24 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * sizeof(u32)), GFP_KERNEL); - if (!h->blockFetchTable) - goto clean_up; + if (!h->blockFetchTable) { + rc = -ENOMEM; + goto clean1; /* rq, ioaccel */ + } - hpsa_enter_performant_mode(h, trans_support); - return; + rc = hpsa_enter_performant_mode(h, trans_support); + if (rc) + goto clean2; /* bft, rq, ioaccel */ + return 0; -clean_up: - hpsa_free_reply_queues(h); +clean2: /* bft, rq, ioaccel */ kfree(h->blockFetchTable); + h->blockFetchTable = NULL; +clean1: /* rq, ioaccel */ + hpsa_free_reply_queues(h); + hpsa_free_ioaccel1_cmd_and_bft(h); + hpsa_free_ioaccel2_cmd_and_bft(h); + return rc; } static int is_accelerated_cmd(struct CommandList *c) -- cgit v1.2.3-54-g00ecf