diff options
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/dca.c | 374 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 1655 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 353 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 916 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 175 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 1717 | ||||
-rw-r--r-- | drivers/dma/ioat/hw.h | 16 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 1314 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 258 | ||||
-rw-r--r-- | drivers/dma/ioat/prep.c | 715 | ||||
-rw-r--r-- | drivers/dma/ioat/sysfs.c | 135 |
12 files changed, 3107 insertions, 4523 deletions
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 0ff7270af..cf5fedbe2 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o +ioatdma-y := init.o dma.o prep.o dca.o sysfs.o diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index ea1e107ae..2cb7c308d 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -31,7 +31,6 @@ #include "dma.h" #include "registers.h" -#include "dma_v2.h" /* * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 @@ -71,14 +70,6 @@ static inline int dca2_tag_map_valid(u8 *tag_map) #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x)) #define IOAT_TAG_MAP_LEN 8 -static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; -static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), }; -static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = { - 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), }; -static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 }; - /* pack PCI B/D/F into a u16 */ static inline u16 dcaid_from_pcidev(struct pci_dev *pci) { @@ -126,96 +117,6 @@ struct ioat_dca_priv { struct ioat_dca_slot req_slots[0]; }; -/* 5000 series chipset DCA Port Requester ID Table Entry Format - * [15:8] PCI-Express Bus Number - * [7:3] PCI-Express Device Number - * [2:0] PCI-Express Function Number - * - * 5000 series chipset DCA control register format - * [7:1] Reserved (0) - * [0] Ignore Function Number - */ - -static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 id; - - /* This implementation only supports PCI-Express */ - if (!dev_is_pci(dev)) - return -ENODEV; - pdev = to_pci_dev(dev); - id = dcaid_from_pcidev(pdev); - - if (ioatdca->requester_count == ioatdca->max_requesters) - return -ENODEV; - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == NULL) { - /* found an empty slot */ - ioatdca->requester_count++; - ioatdca->req_slots[i].pdev = pdev; - ioatdca->req_slots[i].rid = id; - writew(id, ioatdca->dca_base + (i * 4)); - /* make sure the ignore function bit is off */ - writeb(0, ioatdca->dca_base + (i * 4) + 2); - return i; - } - } - /* Error, ioatdma->requester_count is out of whack */ - return -EFAULT; -} - -static int ioat_dca_remove_requester(struct dca_provider *dca, - struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - - /* This implementation only supports PCI-Express */ - if (!dev_is_pci(dev)) - return -ENODEV; - pdev = to_pci_dev(dev); - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == pdev) { - writew(0, ioatdca->dca_base + (i * 4)); - ioatdca->req_slots[i].pdev = NULL; - ioatdca->req_slots[i].rid = 0; - ioatdca->requester_count--; - return i; - } - } - return -ENODEV; -} - -static u8 ioat_dca_get_tag(struct dca_provider *dca, - struct device *dev, - int cpu) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - int i, apic_id, bit, value; - u8 entry, tag; - - tag = 0; - apic_id = cpu_physical_id(cpu); - - for (i = 0; i < IOAT_TAG_MAP_LEN; i++) { - entry = ioatdca->tag_map[i]; - if (entry & DCA_TAG_MAP_VALID) { - bit = entry & ~DCA_TAG_MAP_VALID; - value = (apic_id & (1 << bit)) ? 1 : 0; - } else { - value = entry ? 1 : 0; - } - tag |= (value << i); - } - return tag; -} - static int ioat_dca_dev_managed(struct dca_provider *dca, struct device *dev) { @@ -231,260 +132,7 @@ static int ioat_dca_dev_managed(struct dca_provider *dca, return 0; } -static struct dca_ops ioat_dca_ops = { - .add_requester = ioat_dca_add_requester, - .remove_requester = ioat_dca_remove_requester, - .get_tag = ioat_dca_get_tag, - .dev_managed = ioat_dca_dev_managed, -}; - - -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) -{ - struct dca_provider *dca; - struct ioat_dca_priv *ioatdca; - u8 *tag_map = NULL; - int i; - int err; - u8 version; - u8 max_requesters; - - if (!system_has_dca_enabled(pdev)) - return NULL; - - /* I/OAT v1 systems must have a known tag_map to support DCA */ - switch (pdev->vendor) { - case PCI_VENDOR_ID_INTEL: - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT: - tag_map = ioat_tag_map_BNB; - break; - case PCI_DEVICE_ID_INTEL_IOAT_CNB: - tag_map = ioat_tag_map_CNB; - break; - case PCI_DEVICE_ID_INTEL_IOAT_SCNB: - tag_map = ioat_tag_map_SCNB; - break; - } - break; - case PCI_VENDOR_ID_UNISYS: - switch (pdev->device) { - case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR: - tag_map = ioat_tag_map_UNISYS; - break; - } - break; - } - if (tag_map == NULL) - return NULL; - - version = readb(iobase + IOAT_VER_OFFSET); - if (version == IOAT_VER_3_0) - max_requesters = IOAT3_DCA_MAX_REQ; - else - max_requesters = IOAT_DCA_MAX_REQ; - - dca = alloc_dca_provider(&ioat_dca_ops, - sizeof(*ioatdca) + - (sizeof(struct ioat_dca_slot) * max_requesters)); - if (!dca) - return NULL; - - ioatdca = dca_priv(dca); - ioatdca->max_requesters = max_requesters; - ioatdca->dca_base = iobase + 0x54; - - /* copy over the APIC ID to DCA tag mapping */ - for (i = 0; i < IOAT_TAG_MAP_LEN; i++) - ioatdca->tag_map[i] = tag_map[i]; - - err = register_dca_provider(dca, &pdev->dev); - if (err) { - free_dca_provider(dca); - return NULL; - } - - return dca; -} - - -static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 id; - u16 global_req_table; - - /* This implementation only supports PCI-Express */ - if (!dev_is_pci(dev)) - return -ENODEV; - pdev = to_pci_dev(dev); - id = dcaid_from_pcidev(pdev); - - if (ioatdca->requester_count == ioatdca->max_requesters) - return -ENODEV; - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == NULL) { - /* found an empty slot */ - ioatdca->requester_count++; - ioatdca->req_slots[i].pdev = pdev; - ioatdca->req_slots[i].rid = id; - global_req_table = - readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); - writel(id | IOAT_DCA_GREQID_VALID, - ioatdca->iobase + global_req_table + (i * 4)); - return i; - } - } - /* Error, ioatdma->requester_count is out of whack */ - return -EFAULT; -} - -static int ioat2_dca_remove_requester(struct dca_provider *dca, - struct device *dev) -{ - struct ioat_dca_priv *ioatdca = dca_priv(dca); - struct pci_dev *pdev; - int i; - u16 global_req_table; - - /* This implementation only supports PCI-Express */ - if (!dev_is_pci(dev)) - return -ENODEV; - pdev = to_pci_dev(dev); - - for (i = 0; i < ioatdca->max_requesters; i++) { - if (ioatdca->req_slots[i].pdev == pdev) { - global_req_table = - readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); - writel(0, ioatdca->iobase + global_req_table + (i * 4)); - ioatdca->req_slots[i].pdev = NULL; - ioatdca->req_slots[i].rid = 0; - ioatdca->requester_count--; - return i; - } - } - return -ENODEV; -} - -static u8 ioat2_dca_get_tag(struct dca_provider *dca, - struct device *dev, - int cpu) -{ - u8 tag; - - tag = ioat_dca_get_tag(dca, dev, cpu); - tag = (~tag) & 0x1F; - return tag; -} - -static struct dca_ops ioat2_dca_ops = { - .add_requester = ioat2_dca_add_requester, - .remove_requester = ioat2_dca_remove_requester, - .get_tag = ioat2_dca_get_tag, - .dev_managed = ioat_dca_dev_managed, -}; - -static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) -{ - int slots = 0; - u32 req; - u16 global_req_table; - - global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); - if (global_req_table == 0) - return 0; - do { - req = readl(iobase + global_req_table + (slots * sizeof(u32))); - slots++; - } while ((req & IOAT_DCA_GREQID_LASTID) == 0); - - return slots; -} - -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) -{ - struct dca_provider *dca; - struct ioat_dca_priv *ioatdca; - int slots; - int i; - int err; - u32 tag_map; - u16 dca_offset; - u16 csi_fsb_control; - u16 pcie_control; - u8 bit; - - if (!system_has_dca_enabled(pdev)) - return NULL; - - dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); - if (dca_offset == 0) - return NULL; - - slots = ioat2_dca_count_dca_slots(iobase, dca_offset); - if (slots == 0) - return NULL; - - dca = alloc_dca_provider(&ioat2_dca_ops, - sizeof(*ioatdca) - + (sizeof(struct ioat_dca_slot) * slots)); - if (!dca) - return NULL; - - ioatdca = dca_priv(dca); - ioatdca->iobase = iobase; - ioatdca->dca_base = iobase + dca_offset; - ioatdca->max_requesters = slots; - - /* some bios might not know to turn these on */ - csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); - if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { - csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; - writew(csi_fsb_control, - ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); - } - pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); - if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { - pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; - writew(pcie_control, - ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); - } - - - /* TODO version, compatibility and configuration checks */ - - /* copy out the APIC to DCA tag map */ - tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); - for (i = 0; i < 5; i++) { - bit = (tag_map >> (4 * i)) & 0x0f; - if (bit < 8) - ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; - else - ioatdca->tag_map[i] = 0; - } - - if (!dca2_tag_map_valid(ioatdca->tag_map)) { - WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, - "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", - dev_driver_string(&pdev->dev), - dev_name(&pdev->dev)); - free_dca_provider(dca); - return NULL; - } - - err = register_dca_provider(dca, &pdev->dev); - if (err) { - free_dca_provider(dca); - return NULL; - } - - return dca; -} - -static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) +static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; @@ -518,7 +166,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) return -EFAULT; } -static int ioat3_dca_remove_requester(struct dca_provider *dca, +static int ioat_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); @@ -545,7 +193,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca, return -ENODEV; } -static u8 ioat3_dca_get_tag(struct dca_provider *dca, +static u8 ioat_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { @@ -576,14 +224,14 @@ static u8 ioat3_dca_get_tag(struct dca_provider *dca, return tag; } -static struct dca_ops ioat3_dca_ops = { - .add_requester = ioat3_dca_add_requester, - .remove_requester = ioat3_dca_remove_requester, - .get_tag = ioat3_dca_get_tag, +static struct dca_ops ioat_dca_ops = { + .add_requester = ioat_dca_add_requester, + .remove_requester = ioat_dca_remove_requester, + .get_tag = ioat_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; -static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) +static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset) { int slots = 0; u32 req; @@ -618,7 +266,7 @@ static inline int dca3_tag_map_invalid(u8 *tag_map) (tag_map[4] == DCA_TAG_MAP_VALID)); } -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; @@ -645,11 +293,11 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) if (dca_offset == 0) return NULL; - slots = ioat3_dca_count_dca_slots(iobase, dca_offset); + slots = ioat_dca_count_dca_slots(iobase, dca_offset); if (slots == 0) return NULL; - dca = alloc_dca_provider(&ioat3_dca_ops, + dca = alloc_dca_provider(&ioat_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * slots)); if (!dca) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index ee0aa9f4c..f66b7e640 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -1,6 +1,6 @@ /* * Intel I/OAT DMA Linux driver - * Copyright(c) 2004 - 2009 Intel Corporation. + * Copyright(c) 2004 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -31,31 +31,23 @@ #include <linux/dma-mapping.h> #include <linux/workqueue.h> #include <linux/prefetch.h> -#include <linux/i7300_idle.h> #include "dma.h" #include "registers.h" #include "hw.h" #include "../dmaengine.h" -int ioat_pending_level = 4; -module_param(ioat_pending_level, int, 0644); -MODULE_PARM_DESC(ioat_pending_level, - "high-water mark for pushing ioat descriptors (default: 4)"); - -/* internal functions */ -static void ioat1_cleanup(struct ioat_dma_chan *ioat); -static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); +static void ioat_eh(struct ioatdma_chan *ioat_chan); /** * ioat_dma_do_interrupt - handler used for single vector interrupt mode * @irq: interrupt id * @data: interrupt data */ -static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) +irqreturn_t ioat_dma_do_interrupt(int irq, void *data) { struct ioatdma_device *instance = data; - struct ioat_chan_common *chan; + struct ioatdma_chan *ioat_chan; unsigned long attnstatus; int bit; u8 intrctrl; @@ -72,9 +64,9 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { - chan = ioat_chan_by_index(instance, bit); - if (test_bit(IOAT_RUN, &chan->state)) - tasklet_schedule(&chan->cleanup_task); + ioat_chan = ioat_chan_by_index(instance, bit); + if (test_bit(IOAT_RUN, &ioat_chan->state)) + tasklet_schedule(&ioat_chan->cleanup_task); } writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); @@ -86,1161 +78,912 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) * @irq: interrupt id * @data: interrupt data */ -static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) +irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) { - struct ioat_chan_common *chan = data; + struct ioatdma_chan *ioat_chan = data; - if (test_bit(IOAT_RUN, &chan->state)) - tasklet_schedule(&chan->cleanup_task); + if (test_bit(IOAT_RUN, &ioat_chan->state)) + tasklet_schedule(&ioat_chan->cleanup_task); return IRQ_HANDLED; } -/* common channel initialization */ -void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) +void ioat_stop(struct ioatdma_chan *ioat_chan) { - struct dma_device *dma = &device->common; - struct dma_chan *c = &chan->common; - unsigned long data = (unsigned long) c; - - chan->device = device; - chan->reg_base = device->reg_base + (0x80 * (idx + 1)); - spin_lock_init(&chan->cleanup_lock); - chan->common.device = dma; - dma_cookie_init(&chan->common); - list_add_tail(&chan->common.device_node, &dma->channels); - device->idx[idx] = chan; - init_timer(&chan->timer); - chan->timer.function = device->timer_fn; - chan->timer.data = data; - tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct pci_dev *pdev = ioat_dma->pdev; + int chan_id = chan_num(ioat_chan); + struct msix_entry *msix; + + /* 1/ stop irq from firing tasklets + * 2/ stop the tasklet from re-arming irqs + */ + clear_bit(IOAT_RUN, &ioat_chan->state); + + /* flush inflight interrupts */ + switch (ioat_dma->irq_mode) { + case IOAT_MSIX: + msix = &ioat_dma->msix_entries[chan_id]; + synchronize_irq(msix->vector); + break; + case IOAT_MSI: + case IOAT_INTX: + synchronize_irq(pdev->irq); + break; + default: + break; + } + + /* flush inflight timers */ + del_timer_sync(&ioat_chan->timer); + + /* flush inflight tasklet runs */ + tasklet_kill(&ioat_chan->cleanup_task); + + /* final cleanup now that everything is quiesced and can't re-arm */ + ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan); } -/** - * ioat1_dma_enumerate_channels - find and initialize the device's channels - * @device: the device to be enumerated - */ -static int ioat1_enumerate_channels(struct ioatdma_device *device) +static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) { - u8 xfercap_scale; - u32 xfercap; - int i; - struct ioat_dma_chan *ioat; - struct device *dev = &device->pdev->dev; - struct dma_device *dma = &device->common; - - INIT_LIST_HEAD(&dma->channels); - dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); - dma->chancnt &= 0x1f; /* bits [4:0] valid */ - if (dma->chancnt > ARRAY_SIZE(device->idx)) { - dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", - dma->chancnt, ARRAY_SIZE(device->idx)); - dma->chancnt = ARRAY_SIZE(device->idx); - } - xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); - xfercap_scale &= 0x1f; /* bits [4:0] valid */ - xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); - dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); - -#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) - dma->chancnt--; -#endif - for (i = 0; i < dma->chancnt; i++) { - ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); - if (!ioat) - break; + ioat_chan->dmacount += ioat_ring_pending(ioat_chan); + ioat_chan->issued = ioat_chan->head; + writew(ioat_chan->dmacount, + ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + dev_dbg(to_dev(ioat_chan), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, + ioat_chan->issued, ioat_chan->dmacount); +} + +void ioat_issue_pending(struct dma_chan *c) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); - ioat_init_channel(device, &ioat->base, i); - ioat->xfercap = xfercap; - spin_lock_init(&ioat->desc_lock); - INIT_LIST_HEAD(&ioat->free_desc); - INIT_LIST_HEAD(&ioat->used_desc); + if (ioat_ring_pending(ioat_chan)) { + spin_lock_bh(&ioat_chan->prep_lock); + __ioat_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); } - dma->chancnt = i; - return i; } /** - * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended - * descriptors to hw - * @chan: DMA channel handle + * ioat_update_pending - log pending descriptors + * @ioat: ioat+ channel + * + * Check if the number of unsubmitted descriptors has exceeded the + * watermark. Called with prep_lock held */ -static inline void -__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) +static void ioat_update_pending(struct ioatdma_chan *ioat_chan) { - void __iomem *reg_base = ioat->base.reg_base; - - dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", - __func__, ioat->pending); - ioat->pending = 0; - writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); + if (ioat_ring_pending(ioat_chan) > ioat_pending_level) + __ioat_issue_pending(ioat_chan); } -static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) +static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan) { - struct ioat_dma_chan *ioat = to_ioat_chan(chan); + struct ioat_ring_ent *desc; + struct ioat_dma_descriptor *hw; - if (ioat->pending > 0) { - spin_lock_bh(&ioat->desc_lock); - __ioat1_dma_memcpy_issue_pending(ioat); - spin_unlock_bh(&ioat->desc_lock); + if (ioat_ring_space(ioat_chan) < 1) { + dev_err(to_dev(ioat_chan), + "Unable to start null desc - ring full\n"); + return; } + + dev_dbg(to_dev(ioat_chan), + "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); + + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.compl_write = 1; + /* set size to non-zero value (channel returns error when size is 0) */ + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; + async_tx_ack(&desc->txd); + ioat_set_chainaddr(ioat_chan, desc->txd.phys); + dump_desc_dbg(ioat_chan, desc); + /* make sure descriptors are written before we submit */ + wmb(); + ioat_chan->head += 1; + __ioat_issue_pending(ioat_chan); } -/** - * ioat1_reset_channel - restart a channel - * @ioat: IOAT DMA channel handle - */ -static void ioat1_reset_channel(struct ioat_dma_chan *ioat) +void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) { - struct ioat_chan_common *chan = &ioat->base; - void __iomem *reg_base = chan->reg_base; - u32 chansts, chanerr; - - dev_warn(to_dev(chan), "reset\n"); - chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); - chansts = *chan->completion & IOAT_CHANSTS_STATUS; - if (chanerr) { - dev_err(to_dev(chan), - "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", - chan_num(chan), chansts, chanerr); - writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); + spin_lock_bh(&ioat_chan->prep_lock); + __ioat_start_null_desc(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); +} + +static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) +{ + /* set the tail to be re-issued */ + ioat_chan->issued = ioat_chan->tail; + ioat_chan->dmacount = 0; + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + + dev_dbg(to_dev(ioat_chan), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, + ioat_chan->issued, ioat_chan->dmacount); + + if (ioat_ring_pending(ioat_chan)) { + struct ioat_ring_ent *desc; + + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); + ioat_set_chainaddr(ioat_chan, desc->txd.phys); + __ioat_issue_pending(ioat_chan); + } else + __ioat_start_null_desc(ioat_chan); +} + +static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo) +{ + unsigned long end = jiffies + tmo; + int err = 0; + u32 status; + + status = ioat_chansts(ioat_chan); + if (is_ioat_active(status) || is_ioat_idle(status)) + ioat_suspend(ioat_chan); + while (is_ioat_active(status) || is_ioat_idle(status)) { + if (tmo && time_after(jiffies, end)) { + err = -ETIMEDOUT; + break; + } + status = ioat_chansts(ioat_chan); + cpu_relax(); } - /* - * whack it upside the head with a reset - * and wait for things to settle out. - * force the pending count to a really big negative - * to make sure no one forces an issue_pending - * while we're waiting. - */ + return err; +} - ioat->pending = INT_MIN; - writeb(IOAT_CHANCMD_RESET, - reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); - set_bit(IOAT_RESET_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + RESET_DELAY); +static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) +{ + unsigned long end = jiffies + tmo; + int err = 0; + + ioat_reset(ioat_chan); + while (ioat_reset_pending(ioat_chan)) { + if (end && time_after(jiffies, end)) { + err = -ETIMEDOUT; + break; + } + cpu_relax(); + } + + return err; } -static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) +static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx) + __releases(&ioat_chan->prep_lock) { struct dma_chan *c = tx->chan; - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); - struct ioat_chan_common *chan = &ioat->base; - struct ioat_desc_sw *first; - struct ioat_desc_sw *chain_tail; + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); dma_cookie_t cookie; - spin_lock_bh(&ioat->desc_lock); - /* cookie incr and addition to used_list must be atomic */ cookie = dma_cookie_assign(tx); - dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); + dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie); + + if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - /* write address into NextDescriptor field of last desc in chain */ - first = to_ioat_desc(desc->tx_list.next); - chain_tail = to_ioat_desc(ioat->used_desc.prev); - /* make descriptor updates globally visible before chaining */ + /* make descriptor updates visible before advancing ioat->head, + * this is purposefully not smp_wmb() since we are also + * publishing the descriptor updates to a dma device + */ wmb(); - chain_tail->hw->next = first->txd.phys; - list_splice_tail_init(&desc->tx_list, &ioat->used_desc); - dump_desc_dbg(ioat, chain_tail); - dump_desc_dbg(ioat, first); - if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + ioat_chan->head += ioat_chan->produce; - ioat->active += desc->hw->tx_cnt; - ioat->pending += desc->hw->tx_cnt; - if (ioat->pending >= ioat_pending_level) - __ioat1_dma_memcpy_issue_pending(ioat); - spin_unlock_bh(&ioat->desc_lock); + ioat_update_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); return cookie; } -/** - * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair - * @ioat: the channel supplying the memory pool for the descriptors - * @flags: allocation flags - */ -static struct ioat_desc_sw * -ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) +static struct ioat_ring_ent * +ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) { - struct ioat_dma_descriptor *desc; - struct ioat_desc_sw *desc_sw; - struct ioatdma_device *ioatdma_device; + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *desc; + struct ioatdma_device *ioat_dma; dma_addr_t phys; - ioatdma_device = ioat->base.device; - desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); - if (unlikely(!desc)) + ioat_dma = to_ioatdma_device(chan->device); + hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys); + if (!hw) return NULL; + memset(hw, 0, sizeof(*hw)); - desc_sw = kzalloc(sizeof(*desc_sw), flags); - if (unlikely(!desc_sw)) { - pci_pool_free(ioatdma_device->dma_pool, desc, phys); + desc = kmem_cache_zalloc(ioat_cache, flags); + if (!desc) { + pci_pool_free(ioat_dma->dma_pool, hw, phys); return NULL; } - memset(desc, 0, sizeof(*desc)); + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = ioat_tx_submit_unlock; + desc->hw = hw; + desc->txd.phys = phys; + return desc; +} - INIT_LIST_HEAD(&desc_sw->tx_list); - dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); - desc_sw->txd.tx_submit = ioat1_tx_submit; - desc_sw->hw = desc; - desc_sw->txd.phys = phys; - set_desc_id(desc_sw, -1); +void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) +{ + struct ioatdma_device *ioat_dma; - return desc_sw; + ioat_dma = to_ioatdma_device(chan->device); + pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys); + kmem_cache_free(ioat_cache, desc); } -static int ioat_initial_desc_count = 256; -module_param(ioat_initial_desc_count, int, 0644); -MODULE_PARM_DESC(ioat_initial_desc_count, - "ioat1: initial descriptors per channel (default: 256)"); -/** - * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors - * @chan: the channel to be filled out - */ -static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) +struct ioat_ring_ent ** +ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) { - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioat_desc_sw *desc; - u32 chanerr; + struct ioat_ring_ent **ring; + int descs = 1 << order; int i; - LIST_HEAD(tmp_list); - - /* have we already been set up? */ - if (!list_empty(&ioat->free_desc)) - return ioat->desccount; - /* Setup register to interrupt and write completion status on error */ - writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); + if (order > ioat_get_max_alloc_order()) + return NULL; - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - if (chanerr) { - dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); + /* allocate the array to hold the software ring */ + ring = kcalloc(descs, sizeof(*ring), flags); + if (!ring) + return NULL; + for (i = 0; i < descs; i++) { + ring[i] = ioat_alloc_ring_ent(c, flags); + if (!ring[i]) { + while (i--) + ioat_free_ring_ent(ring[i], c); + kfree(ring); + return NULL; + } + set_desc_id(ring[i], i); } - /* Allocate descriptors */ - for (i = 0; i < ioat_initial_desc_count; i++) { - desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); - if (!desc) { - dev_err(to_dev(chan), "Only %d initial descriptors\n", i); - break; - } - set_desc_id(desc, i); - list_add_tail(&desc->node, &tmp_list); + /* link descs */ + for (i = 0; i < descs-1; i++) { + struct ioat_ring_ent *next = ring[i+1]; + struct ioat_dma_descriptor *hw = ring[i]->hw; + + hw->next = next->txd.phys; } - spin_lock_bh(&ioat->desc_lock); - ioat->desccount = i; - list_splice(&tmp_list, &ioat->free_desc); - spin_unlock_bh(&ioat->desc_lock); - - /* allocate a completion writeback area */ - /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion = pci_pool_alloc(chan->device->completion_pool, - GFP_KERNEL, &chan->completion_dma); - memset(chan->completion, 0, sizeof(*chan->completion)); - writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) chan->completion_dma) >> 32, - chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - - set_bit(IOAT_RUN, &chan->state); - ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ - dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", - __func__, ioat->desccount); - return ioat->desccount; + ring[i]->hw->next = ring[0]->txd.phys; + + return ring; } -void ioat_stop(struct ioat_chan_common *chan) +static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) { - struct ioatdma_device *device = chan->device; - struct pci_dev *pdev = device->pdev; - int chan_id = chan_num(chan); - struct msix_entry *msix; + /* reshape differs from normal ring allocation in that we want + * to allocate a new software ring while only + * extending/truncating the hardware ring + */ + struct dma_chan *c = &ioat_chan->dma_chan; + const u32 curr_size = ioat_ring_size(ioat_chan); + const u16 active = ioat_ring_active(ioat_chan); + const u32 new_size = 1 << order; + struct ioat_ring_ent **ring; + u32 i; + + if (order > ioat_get_max_alloc_order()) + return false; - /* 1/ stop irq from firing tasklets - * 2/ stop the tasklet from re-arming irqs + /* double check that we have at least 1 free descriptor */ + if (active == curr_size) + return false; + + /* when shrinking, verify that we can hold the current active + * set in the new ring */ - clear_bit(IOAT_RUN, &chan->state); + if (active >= new_size) + return false; - /* flush inflight interrupts */ - switch (device->irq_mode) { - case IOAT_MSIX: - msix = &device->msix_entries[chan_id]; - synchronize_irq(msix->vector); - break; - case IOAT_MSI: - case IOAT_INTX: - synchronize_irq(pdev->irq); - break; - default: - break; - } + /* allocate the array to hold the software ring */ + ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); + if (!ring) + return false; - /* flush inflight timers */ - del_timer_sync(&chan->timer); + /* allocate/trim descriptors as needed */ + if (new_size > curr_size) { + /* copy current descriptors to the new ring */ + for (i = 0; i < curr_size; i++) { + u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1); + u16 new_idx = (ioat_chan->tail+i) & (new_size-1); - /* flush inflight tasklet runs */ - tasklet_kill(&chan->cleanup_task); + ring[new_idx] = ioat_chan->ring[curr_idx]; + set_desc_id(ring[new_idx], new_idx); + } - /* final cleanup now that everything is quiesced and can't re-arm */ - device->cleanup_fn((unsigned long) &chan->common); -} + /* add new descriptors to the ring */ + for (i = curr_size; i < new_size; i++) { + u16 new_idx = (ioat_chan->tail+i) & (new_size-1); -/** - * ioat1_dma_free_chan_resources - release all the descriptors - * @chan: the channel to be cleaned - */ -static void ioat1_dma_free_chan_resources(struct dma_chan *c) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *ioatdma_device = chan->device; - struct ioat_desc_sw *desc, *_desc; - int in_use_descs = 0; - - /* Before freeing channel resources first check - * if they have been previously allocated for this channel. - */ - if (ioat->desccount == 0) - return; + ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT); + if (!ring[new_idx]) { + while (i--) { + u16 new_idx = (ioat_chan->tail+i) & + (new_size-1); + + ioat_free_ring_ent(ring[new_idx], c); + } + kfree(ring); + return false; + } + set_desc_id(ring[new_idx], new_idx); + } - ioat_stop(chan); + /* hw link new descriptors */ + for (i = curr_size-1; i < new_size; i++) { + u16 new_idx = (ioat_chan->tail+i) & (new_size-1); + struct ioat_ring_ent *next = + ring[(new_idx+1) & (new_size-1)]; + struct ioat_dma_descriptor *hw = ring[new_idx]->hw; - /* Delay 100ms after reset to allow internal DMA logic to quiesce - * before removing DMA descriptor resources. - */ - writeb(IOAT_CHANCMD_RESET, - chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); - mdelay(100); - - spin_lock_bh(&ioat->desc_lock); - list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { - dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", - __func__, desc_id(desc)); - dump_desc_dbg(ioat, desc); - in_use_descs++; - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); - } - list_for_each_entry_safe(desc, _desc, - &ioat->free_desc, node) { - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); + hw->next = next->txd.phys; + } + } else { + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *next; + + /* copy current descriptors to the new ring, dropping the + * removed descriptors + */ + for (i = 0; i < new_size; i++) { + u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1); + u16 new_idx = (ioat_chan->tail+i) & (new_size-1); + + ring[new_idx] = ioat_chan->ring[curr_idx]; + set_desc_id(ring[new_idx], new_idx); + } + + /* free deleted descriptors */ + for (i = new_size; i < curr_size; i++) { + struct ioat_ring_ent *ent; + + ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i); + ioat_free_ring_ent(ent, c); + } + + /* fix up hardware ring */ + hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw; + next = ring[(ioat_chan->tail+new_size) & (new_size-1)]; + hw->next = next->txd.phys; } - spin_unlock_bh(&ioat->desc_lock); - pci_pool_free(ioatdma_device->completion_pool, - chan->completion, - chan->completion_dma); + dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n", + __func__, new_size); - /* one is ok since we left it on there on purpose */ - if (in_use_descs > 1) - dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", - in_use_descs - 1); + kfree(ioat_chan->ring); + ioat_chan->ring = ring; + ioat_chan->alloc_order = order; - chan->last_completion = 0; - chan->completion_dma = 0; - ioat->pending = 0; - ioat->desccount = 0; + return true; } /** - * ioat1_dma_get_next_descriptor - return the next available descriptor - * @ioat: IOAT DMA channel handle - * - * Gets the next descriptor from the chain, and must be called with the - * channel's desc_lock held. Allocates more descriptors if the channel - * has run out. + * ioat_check_space_lock - verify space and grab ring producer lock + * @ioat: ioat,3 channel (ring) to operate on + * @num_descs: allocation length */ -static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) +int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) + __acquires(&ioat_chan->prep_lock) { - struct ioat_desc_sw *new; + bool retry; - if (!list_empty(&ioat->free_desc)) { - new = to_ioat_desc(ioat->free_desc.next); - list_del(&new->node); - } else { - /* try to get another desc */ - new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); - if (!new) { - dev_err(to_dev(&ioat->base), "alloc failed\n"); - return NULL; - } + retry: + spin_lock_bh(&ioat_chan->prep_lock); + /* never allow the last descriptor to be consumed, we need at + * least one free at all times to allow for on-the-fly ring + * resizing. + */ + if (likely(ioat_ring_space(ioat_chan) > num_descs)) { + dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat_chan->head, + ioat_chan->tail, ioat_chan->issued); + ioat_chan->produce = num_descs; + return 0; /* with ioat->prep_lock held */ + } + retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + + /* is another cpu already trying to expand the ring? */ + if (retry) + goto retry; + + spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock_bh(&ioat_chan->prep_lock); + retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1); + clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + + /* if we were able to expand the ring retry the allocation */ + if (retry) + goto retry; + + dev_dbg_ratelimited(to_dev(ioat_chan), + "%s: ring full! num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat_chan->head, + ioat_chan->tail, ioat_chan->issued); + + /* progress reclaim in the allocation failure case we may be + * called under bh_disabled so we need to trigger the timer + * event directly + */ + if (time_is_before_jiffies(ioat_chan->timer.expires) + && timer_pending(&ioat_chan->timer)) { + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + ioat_timer_event((unsigned long)ioat_chan); } - dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", - __func__, desc_id(new)); - prefetch(new->hw); - return new; + + return -ENOMEM; } -static struct dma_async_tx_descriptor * -ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) +static bool desc_has_ext(struct ioat_ring_ent *desc) { - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_desc_sw *desc; - size_t copy; - LIST_HEAD(chain); - dma_addr_t src = dma_src; - dma_addr_t dest = dma_dest; - size_t total_len = len; - struct ioat_dma_descriptor *hw = NULL; - int tx_cnt = 0; - - spin_lock_bh(&ioat->desc_lock); - desc = ioat1_dma_get_next_descriptor(ioat); - do { - if (!desc) - break; - - tx_cnt++; - copy = min_t(size_t, len, ioat->xfercap); + struct ioat_dma_descriptor *hw = desc->hw; - hw = desc->hw; - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dest; + if (hw->ctl_f.op == IOAT_OP_XOR || + hw->ctl_f.op == IOAT_OP_XOR_VAL) { + struct ioat_xor_descriptor *xor = desc->xor; - list_add_tail(&desc->node, &chain); + if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) + return true; + } else if (hw->ctl_f.op == IOAT_OP_PQ || + hw->ctl_f.op == IOAT_OP_PQ_VAL) { + struct ioat_pq_descriptor *pq = desc->pq; - len -= copy; - dest += copy; - src += copy; - if (len) { - struct ioat_desc_sw *next; - - async_tx_ack(&desc->txd); - next = ioat1_dma_get_next_descriptor(ioat); - hw->next = next ? next->txd.phys : 0; - dump_desc_dbg(ioat, desc); - desc = next; - } else - hw->next = 0; - } while (len); - - if (!desc) { - struct ioat_chan_common *chan = &ioat->base; - - dev_err(to_dev(chan), - "chan%d - get_next_desc failed\n", chan_num(chan)); - list_splice(&chain, &ioat->free_desc); - spin_unlock_bh(&ioat->desc_lock); - return NULL; + if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) + return true; } - spin_unlock_bh(&ioat->desc_lock); - desc->txd.flags = flags; - desc->len = total_len; - list_splice(&chain, &desc->tx_list); - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.compl_write = 1; - hw->tx_cnt = tx_cnt; - dump_desc_dbg(ioat, desc); - - return &desc->txd; + return false; } -static void ioat1_cleanup_event(unsigned long data) +static void +ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) { - struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - - ioat1_cleanup(ioat); - if (!test_bit(IOAT_RUN, &chan->state)) + if (!sed) return; - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + + dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); + kmem_cache_free(ioat_sed_cache, sed); } -dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) +static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan) { - dma_addr_t phys_complete; + u64 phys_complete; u64 completion; - completion = *chan->completion; + completion = *ioat_chan->completion; phys_complete = ioat_chansts_to_addr(completion); - dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, + dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, (unsigned long long) phys_complete); - if (is_ioat_halted(completion)) { - u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", - chanerr); - - /* TODO do something to salvage the situation */ - } - return phys_complete; } -bool ioat_cleanup_preamble(struct ioat_chan_common *chan, - dma_addr_t *phys_complete) +static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, + u64 *phys_complete) { - *phys_complete = ioat_get_current_completion(chan); - if (*phys_complete == chan->last_completion) + *phys_complete = ioat_get_current_completion(ioat_chan); + if (*phys_complete == ioat_chan->last_completion) return false; - clear_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + + clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); return true; } -static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) +static void +desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) { - struct ioat_chan_common *chan = &ioat->base; - struct list_head *_desc, *n; - struct dma_async_tx_descriptor *tx; + struct ioat_dma_descriptor *hw = desc->hw; - dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", - __func__, (unsigned long long) phys_complete); - list_for_each_safe(_desc, n, &ioat->used_desc) { - struct ioat_desc_sw *desc; + switch (hw->ctl_f.op) { + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + { + struct ioat_pq_descriptor *pq = desc->pq; - prefetch(n); - desc = list_entry(_desc, typeof(*desc), node); - tx = &desc->txd; - /* - * Incoming DMA requests may use multiple descriptors, - * due to exceeding xfercap, perhaps. If so, only the - * last one will have a cookie, and require unmapping. - */ - dump_desc_dbg(ioat, desc); - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - ioat->active -= desc->hw->tx_cnt; - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } + /* check if there's error written */ + if (!pq->dwbes_f.wbes) + return; - if (tx->phys != phys_complete) { - /* - * a completed entry, but not the last, so clean - * up if the client is done with the descriptor - */ - if (async_tx_test_ack(tx)) - list_move_tail(&desc->node, &ioat->free_desc); - } else { - /* - * last used desc. Do not remove, so we can - * append from it. - */ - - /* if nothing else is pending, cancel the - * completion timeout - */ - if (n == &ioat->used_desc) { - dev_dbg(to_dev(chan), - "%s cancel completion timeout\n", - __func__); - clear_bit(IOAT_COMPLETION_PENDING, &chan->state); - } + /* need to set a chanerr var for checking to clear later */ - /* TODO check status bits? */ - break; - } - } + if (pq->dwbes_f.p_val_err) + *desc->result |= SUM_CHECK_P_RESULT; + + if (pq->dwbes_f.q_val_err) + *desc->result |= SUM_CHECK_Q_RESULT; - chan->last_completion = phys_complete; + return; + } + default: + return; + } } /** - * ioat1_cleanup - cleanup up finished descriptors - * @chan: ioat channel to be cleaned up - * - * To prevent lock contention we defer cleanup when the locks are - * contended with a terminal timeout that forces cleanup and catches - * completion notification errors. + * __cleanup - reclaim used descriptors + * @ioat: channel (ring) to clean */ -static void ioat1_cleanup(struct ioat_dma_chan *ioat) +static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) { - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - - prefetch(chan->completion); - - if (!spin_trylock_bh(&chan->cleanup_lock)) - return; + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; + bool seen_current = false; + int idx = ioat_chan->tail, i; + u16 active; - if (!ioat_cleanup_preamble(chan, &phys_complete)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } + dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); - if (!spin_trylock_bh(&ioat->desc_lock)) { - spin_unlock_bh(&chan->cleanup_lock); + /* + * At restart of the channel, the completion address and the + * channel status will be 0 due to starting a new chain. Since + * it's new chain and the first descriptor "fails", there is + * nothing to clean up. We do not want to reap the entire submitted + * chain due to this 0 address value and then BUG. + */ + if (!phys_complete) return; - } - __cleanup(ioat, phys_complete); + active = ioat_ring_active(ioat_chan); + for (i = 0; i < active && !seen_current; i++) { + struct dma_async_tx_descriptor *tx; - spin_unlock_bh(&ioat->desc_lock); - spin_unlock_bh(&chan->cleanup_lock); -} - -static void ioat1_timer_event(unsigned long data) -{ - struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; + smp_read_barrier_depends(); + prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1)); + desc = ioat_get_ring_ent(ioat_chan, idx + i); + dump_desc_dbg(ioat_chan, desc); - dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); + /* set err stat if we are using dwbes */ + if (ioat_dma->cap & IOAT_CAP_DWBES) + desc_get_errstat(ioat_chan, desc); - spin_lock_bh(&chan->cleanup_lock); - if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { - struct ioat_desc_sw *desc; - - spin_lock_bh(&ioat->desc_lock); + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } - /* restart active descriptors */ - desc = to_ioat_desc(ioat->used_desc.prev); - ioat_set_chainaddr(ioat, desc->txd.phys); - ioat_start(chan); + if (tx->phys == phys_complete) + seen_current = true; - ioat->pending = 0; - set_bit(IOAT_COMPLETION_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - spin_unlock_bh(&ioat->desc_lock); - } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { - dma_addr_t phys_complete; + /* skip extended descriptors */ + if (desc_has_ext(desc)) { + BUG_ON(i + 1 >= active); + i++; + } - spin_lock_bh(&ioat->desc_lock); - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) - ioat1_reset_channel(ioat); - else { - u64 status = ioat_chansts(chan); - - /* manually update the last completion address */ - if (ioat_chansts_to_addr(status) != 0) - *chan->completion = status; - - set_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + /* cleanup super extended descriptors */ + if (desc->sed) { + ioat_free_sed(ioat_dma, desc->sed); + desc->sed = NULL; } - spin_unlock_bh(&ioat->desc_lock); } - spin_unlock_bh(&chan->cleanup_lock); -} - -enum dma_status -ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct ioat_chan_common *chan = to_chan_common(c); - struct ioatdma_device *device = chan->device; - enum dma_status ret; - ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; + /* finish all descriptor reads before incrementing tail */ + smp_mb(); + ioat_chan->tail = idx + i; + /* no active descs have written a completion? */ + BUG_ON(active && !seen_current); + ioat_chan->last_completion = phys_complete; - device->cleanup_fn((unsigned long) c); + if (active - i == 0) { + dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", + __func__); + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + } - return dma_cookie_status(c, cookie, txstate); + /* 5 microsecond delay per pending descriptor */ + writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), + ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); } -static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) +static void ioat_cleanup(struct ioatdma_chan *ioat_chan) { - struct ioat_chan_common *chan = &ioat->base; - struct ioat_desc_sw *desc; - struct ioat_dma_descriptor *hw; + u64 phys_complete; - spin_lock_bh(&ioat->desc_lock); + spin_lock_bh(&ioat_chan->cleanup_lock); - desc = ioat1_dma_get_next_descriptor(ioat); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); - if (!desc) { - dev_err(to_dev(chan), - "Unable to start null desc - get next desc failed\n"); - spin_unlock_bh(&ioat->desc_lock); - return; - } + if (is_ioat_halted(*ioat_chan->completion)) { + u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - hw = desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = 1; - hw->ctl_f.compl_write = 1; - /* set size to non-zero value (channel returns error when size is 0) */ - hw->size = NULL_DESC_BUFFER_SIZE; - hw->src_addr = 0; - hw->dst_addr = 0; - async_tx_ack(&desc->txd); - hw->next = 0; - list_add_tail(&desc->node, &ioat->used_desc); - dump_desc_dbg(ioat, desc); + if (chanerr & IOAT_CHANERR_HANDLE_MASK) { + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + ioat_eh(ioat_chan); + } + } - ioat_set_chainaddr(ioat, desc->txd.phys); - ioat_start(chan); - spin_unlock_bh(&ioat->desc_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); } -/* - * Perform a IOAT transaction to verify the HW works. - */ -#define IOAT_TEST_SIZE 2000 +void ioat_cleanup_event(unsigned long data) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); + + ioat_cleanup(ioat_chan); + if (!test_bit(IOAT_RUN, &ioat_chan->state)) + return; + writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); +} -static void ioat_dma_test_callback(void *dma_async_param) +static void ioat_restart_channel(struct ioatdma_chan *ioat_chan) { - struct completion *cmp = dma_async_param; + u64 phys_complete; + + ioat_quiesce(ioat_chan, 0); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); - complete(cmp); + __ioat_restart_chan(ioat_chan); } -/** - * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. - * @device: device to be tested - */ -int ioat_dma_self_test(struct ioatdma_device *device) +static void ioat_eh(struct ioatdma_chan *ioat_chan) { - int i; - u8 *src; - u8 *dest; - struct dma_device *dma = &device->common; - struct device *dev = &device->pdev->dev; - struct dma_chan *dma_chan; + struct pci_dev *pdev = to_pdev(ioat_chan); + struct ioat_dma_descriptor *hw; struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - int err = 0; - struct completion cmp; - unsigned long tmo; - unsigned long flags; - - src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); - if (!src) - return -ENOMEM; - dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); - if (!dest) { - kfree(src); - return -ENOMEM; - } + u64 phys_complete; + struct ioat_ring_ent *desc; + u32 err_handled = 0; + u32 chanerr_int; + u32 chanerr; - /* Fill in src buffer */ - for (i = 0; i < IOAT_TEST_SIZE; i++) - src[i] = (u8)i; - - /* Start copy, using first DMA channel */ - dma_chan = container_of(dma->channels.next, struct dma_chan, - device_node); - if (dma->device_alloc_chan_resources(dma_chan) < 1) { - dev_err(dev, "selftest cannot allocate chan resource\n"); - err = -ENODEV; - goto out; - } + /* cleanup so tail points to descriptor that caused the error */ + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); - dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_src)) { - dev_err(dev, "mapping src buffer failed\n"); - goto free_resources; - } - dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, dma_dest)) { - dev_err(dev, "mapping dest buffer failed\n"); - goto unmap_src; - } - flags = DMA_PREP_INTERRUPT; - tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, - IOAT_TEST_SIZE, flags); - if (!tx) { - dev_err(dev, "Self-test prep failed, disabling\n"); - err = -ENODEV; - goto unmap_dma; - } + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test setup failed, disabling\n"); - err = -ENODEV; - goto unmap_dma; - } - dma->device_issue_pending(dma_chan); + dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n", + __func__, chanerr, chanerr_int); - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); + hw = desc->hw; + dump_desc_dbg(ioat_chan, desc); - if (tmo == 0 || - dma->device_tx_status(dma_chan, cookie, NULL) - != DMA_COMPLETE) { - dev_err(dev, "Self-test copy timed out, disabling\n"); - err = -ENODEV; - goto unmap_dma; - } - if (memcmp(src, dest, IOAT_TEST_SIZE)) { - dev_err(dev, "Self-test copy failed compare, disabling\n"); - err = -ENODEV; - goto free_resources; + switch (hw->ctl_f.op) { + case IOAT_OP_XOR_VAL: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + break; + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { + *desc->result |= SUM_CHECK_Q_RESULT; + err_handled |= IOAT_CHANERR_XOR_Q_ERR; + } + break; } -unmap_dma: - dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); -unmap_src: - dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); -free_resources: - dma->device_free_chan_resources(dma_chan); -out: - kfree(src); - kfree(dest); - return err; -} - -static char ioat_interrupt_style[32] = "msix"; -module_param_string(ioat_interrupt_style, ioat_interrupt_style, - sizeof(ioat_interrupt_style), 0644); -MODULE_PARM_DESC(ioat_interrupt_style, - "set ioat interrupt style: msix (default), msi, intx"); - -/** - * ioat_dma_setup_interrupts - setup interrupt handler - * @device: ioat device - */ -int ioat_dma_setup_interrupts(struct ioatdma_device *device) -{ - struct ioat_chan_common *chan; - struct pci_dev *pdev = device->pdev; - struct device *dev = &pdev->dev; - struct msix_entry *msix; - int i, j, msixcnt; - int err = -EINVAL; - u8 intrctrl = 0; - - if (!strcmp(ioat_interrupt_style, "msix")) - goto msix; - if (!strcmp(ioat_interrupt_style, "msi")) - goto msi; - if (!strcmp(ioat_interrupt_style, "intx")) - goto intx; - dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); - goto err_no_irq; - -msix: - /* The number of MSI-X vectors should equal the number of channels */ - msixcnt = device->common.chancnt; - for (i = 0; i < msixcnt; i++) - device->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); - if (err) - goto msi; - - for (i = 0; i < msixcnt; i++) { - msix = &device->msix_entries[i]; - chan = ioat_chan_by_index(device, i); - err = devm_request_irq(dev, msix->vector, - ioat_dma_do_interrupt_msix, 0, - "ioat-msix", chan); - if (err) { - for (j = 0; j < i; j++) { - msix = &device->msix_entries[j]; - chan = ioat_chan_by_index(device, j); - devm_free_irq(dev, msix->vector, chan); + /* fault on unhandled error or spurious halt */ + if (chanerr ^ err_handled || chanerr == 0) { + dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", + __func__, chanerr, err_handled); + BUG(); + } else { /* cleanup the faulty descriptor */ + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; } - goto msi; } } - intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; - device->irq_mode = IOAT_MSIX; - goto done; -msi: - err = pci_enable_msi(pdev); - if (err) - goto intx; + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); - err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, - "ioat-msi", device); - if (err) { - pci_disable_msi(pdev); - goto intx; - } - device->irq_mode = IOAT_MSI; - goto done; - -intx: - err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, - IRQF_SHARED, "ioat-intx", device); - if (err) - goto err_no_irq; - - device->irq_mode = IOAT_INTX; -done: - if (device->intr_quirk) - device->intr_quirk(device); - intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; - writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); - return 0; - -err_no_irq: - /* Disable all interrupt generation */ - writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - device->irq_mode = IOAT_NOIRQ; - dev_err(dev, "no usable interrupts\n"); - return err; -} -EXPORT_SYMBOL(ioat_dma_setup_interrupts); + /* mark faulting descriptor as complete */ + *ioat_chan->completion = desc->txd.phys; -static void ioat_disable_interrupts(struct ioatdma_device *device) -{ - /* Disable all interrupt generation */ - writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); + spin_lock_bh(&ioat_chan->prep_lock); + ioat_restart_channel(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); } -int ioat_probe(struct ioatdma_device *device) +static void check_active(struct ioatdma_chan *ioat_chan) { - int err = -ENODEV; - struct dma_device *dma = &device->common; - struct pci_dev *pdev = device->pdev; - struct device *dev = &pdev->dev; - - /* DMA coherent memory pool for DMA descriptor allocations */ - device->dma_pool = pci_pool_create("dma_desc_pool", pdev, - sizeof(struct ioat_dma_descriptor), - 64, 0); - if (!device->dma_pool) { - err = -ENOMEM; - goto err_dma_pool; - } - - device->completion_pool = pci_pool_create("completion_pool", pdev, - sizeof(u64), SMP_CACHE_BYTES, - SMP_CACHE_BYTES); - - if (!device->completion_pool) { - err = -ENOMEM; - goto err_completion_pool; + if (ioat_ring_active(ioat_chan)) { + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + return; } - device->enumerate_channels(device); - - dma_cap_set(DMA_MEMCPY, dma->cap_mask); - dma->dev = &pdev->dev; + if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + else if (ioat_chan->alloc_order > ioat_get_alloc_order()) { + /* if the ring is idle, empty, and oversized try to step + * down the size + */ + reshape_ring(ioat_chan, ioat_chan->alloc_order - 1); - if (!dma->chancnt) { - dev_err(dev, "channel enumeration error\n"); - goto err_setup_interrupts; + /* keep shrinking until we get back to our minimum + * default size + */ + if (ioat_chan->alloc_order > ioat_get_alloc_order()) + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); } - err = ioat_dma_setup_interrupts(device); - if (err) - goto err_setup_interrupts; +} - err = device->self_test(device); - if (err) - goto err_self_test; +void ioat_timer_event(unsigned long data) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); + dma_addr_t phys_complete; + u64 status; - return 0; + status = ioat_chansts(ioat_chan); -err_self_test: - ioat_disable_interrupts(device); -err_setup_interrupts: - pci_pool_destroy(device->completion_pool); -err_completion_pool: - pci_pool_destroy(device->dma_pool); -err_dma_pool: - return err; -} + /* when halted due to errors check for channel + * programming errors before advancing the completion state + */ + if (is_ioat_halted(status)) { + u32 chanerr; -int ioat_register(struct ioatdma_device *device) -{ - int err = dma_async_device_register(&device->common); + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", + __func__, chanerr); + if (test_bit(IOAT_RUN, &ioat_chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; + } - if (err) { - ioat_disable_interrupts(device); - pci_pool_destroy(device->completion_pool); - pci_pool_destroy(device->dma_pool); + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + spin_lock_bh(&ioat_chan->cleanup_lock); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { + spin_lock_bh(&ioat_chan->prep_lock); + ioat_restart_channel(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + return; + } else { + set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); } - return err; -} -/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ -static void ioat1_intr_quirk(struct ioatdma_device *device) -{ - struct pci_dev *pdev = device->pdev; - u32 dmactrl; - - pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); - if (pdev->msi_enabled) - dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; - else - dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; - pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); + if (ioat_ring_active(ioat_chan)) + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + else { + spin_lock_bh(&ioat_chan->prep_lock); + check_active(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); + } + spin_unlock_bh(&ioat_chan->cleanup_lock); } -static ssize_t ring_size_show(struct dma_chan *c, char *page) +enum dma_status +ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) { - struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + enum dma_status ret; - return sprintf(page, "%d\n", ioat->desccount); -} -static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); + ret = dma_cookie_status(c, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; -static ssize_t ring_active_show(struct dma_chan *c, char *page) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(c); + ioat_cleanup(ioat_chan); - return sprintf(page, "%d\n", ioat->active); + return dma_cookie_status(c, cookie, txstate); } -static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); -static ssize_t cap_show(struct dma_chan *c, char *page) +static int ioat_irq_reinit(struct ioatdma_device *ioat_dma) { - struct dma_device *dma = c->device; + struct pci_dev *pdev = ioat_dma->pdev; + int irq = pdev->irq, i; - return sprintf(page, "copy%s%s%s%s%s\n", - dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", - dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", - dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", - dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", - dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); + if (!is_bwd_ioat(pdev)) + return 0; -} -struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); - -static ssize_t version_show(struct dma_chan *c, char *page) -{ - struct dma_device *dma = c->device; - struct ioatdma_device *device = to_ioatdma_device(dma); + switch (ioat_dma->irq_mode) { + case IOAT_MSIX: + for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { + struct msix_entry *msix = &ioat_dma->msix_entries[i]; + struct ioatdma_chan *ioat_chan; - return sprintf(page, "%d.%d\n", - device->version >> 4, device->version & 0xf); -} -struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); - -static struct attribute *ioat1_attrs[] = { - &ring_size_attr.attr, - &ring_active_attr.attr, - &ioat_cap_attr.attr, - &ioat_version_attr.attr, - NULL, -}; - -static ssize_t -ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) -{ - struct ioat_sysfs_entry *entry; - struct ioat_chan_common *chan; + ioat_chan = ioat_chan_by_index(ioat_dma, i); + devm_free_irq(&pdev->dev, msix->vector, ioat_chan); + } - entry = container_of(attr, struct ioat_sysfs_entry, attr); - chan = container_of(kobj, struct ioat_chan_common, kobj); + pci_disable_msix(pdev); + break; + case IOAT_MSI: + pci_disable_msi(pdev); + /* fall through */ + case IOAT_INTX: + devm_free_irq(&pdev->dev, irq, ioat_dma); + break; + default: + return 0; + } + ioat_dma->irq_mode = IOAT_NOIRQ; - if (!entry->show) - return -EIO; - return entry->show(&chan->common, page); + return ioat_dma_setup_interrupts(ioat_dma); } -const struct sysfs_ops ioat_sysfs_ops = { - .show = ioat_attr_show, -}; - -static struct kobj_type ioat1_ktype = { - .sysfs_ops = &ioat_sysfs_ops, - .default_attrs = ioat1_attrs, -}; - -void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) +int ioat_reset_hw(struct ioatdma_chan *ioat_chan) { - struct dma_device *dma = &device->common; - struct dma_chan *c; + /* throw away whatever the channel was doing and get it + * initialized, with ioat3 specific workarounds + */ + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct pci_dev *pdev = ioat_dma->pdev; + u32 chanerr; + u16 dev_id; + int err; + + ioat_quiesce(ioat_chan, msecs_to_jiffies(100)); - list_for_each_entry(c, &dma->channels, device_node) { - struct ioat_chan_common *chan = to_chan_common(c); - struct kobject *parent = &c->dev->device.kobj; - int err; + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); + if (ioat_dma->version < IOAT_VER_3_3) { + /* clear any pending errors */ + err = pci_read_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); if (err) { - dev_warn(to_dev(chan), - "sysfs init error (%d), continuing...\n", err); - kobject_put(&chan->kobj); - set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); + dev_err(&pdev->dev, + "channel error register unreachable\n"); + return err; } - } -} + pci_write_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, chanerr); -void ioat_kobject_del(struct ioatdma_device *device) -{ - struct dma_device *dma = &device->common; - struct dma_chan *c; - - list_for_each_entry(c, &dma->channels, device_node) { - struct ioat_chan_common *chan = to_chan_common(c); - - if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { - kobject_del(&chan->kobj); - kobject_put(&chan->kobj); + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { + pci_write_config_dword(pdev, + IOAT_PCI_DMAUNCERRSTS_OFFSET, + 0x10); } } -} -int ioat1_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - int err; + err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); + if (!err) + err = ioat_irq_reinit(ioat_dma); - device->intr_quirk = ioat1_intr_quirk; - device->enumerate_channels = ioat1_enumerate_channels; - device->self_test = ioat_dma_self_test; - device->timer_fn = ioat1_timer_event; - device->cleanup_fn = ioat1_cleanup_event; - dma = &device->common; - dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; - dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; - dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; - dma->device_free_chan_resources = ioat1_dma_free_chan_resources; - dma->device_tx_status = ioat_dma_tx_status; - - err = ioat_probe(device); - if (err) - return err; - err = ioat_register(device); if (err) - return err; - ioat_kobject_add(device, &ioat1_ktype); - - if (dca) - device->dca = ioat_dca_init(pdev, device->reg_base); + dev_err(&pdev->dev, "Failed to reset: %d\n", err); return err; } - -void ioat_dma_remove(struct ioatdma_device *device) -{ - struct dma_device *dma = &device->common; - - ioat_disable_interrupts(device); - - ioat_kobject_del(device); - - dma_async_device_unregister(dma); - - pci_pool_destroy(device->dma_pool); - pci_pool_destroy(device->completion_pool); - - INIT_LIST_HEAD(&dma->channels); -} diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 30f5c7eed..1bc084986 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -18,26 +18,32 @@ #define IOATDMA_H #include <linux/dmaengine.h> -#include "hw.h" -#include "registers.h" #include <linux/init.h> #include <linux/dmapool.h> #include <linux/cache.h> #include <linux/pci_ids.h> -#include <net/tcp.h> +#include <linux/circ_buf.h> +#include <linux/interrupt.h> +#include "registers.h" +#include "hw.h" #define IOAT_DMA_VERSION "4.00" -#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 #define IOAT_DMA_DCA_ANY_CPU ~0 -#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) -#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) -#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) -#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) -#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) +#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) +#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) +#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) + +#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) -#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) +/* ioat hardware assumes at least two sources for raid operations */ +#define src_cnt_to_sw(x) ((x) + 2) +#define src_cnt_to_hw(x) ((x) - 2) +#define ndest_to_sw(x) ((x) + 1) +#define ndest_to_hw(x) ((x) - 1) +#define src16_cnt_to_sw(x) ((x) + 9) +#define src16_cnt_to_hw(x) ((x) - 9) /* * workaround for IOAT ver.3.0 null descriptor issue @@ -57,19 +63,15 @@ enum ioat_irq_mode { * @pdev: PCI-Express device * @reg_base: MMIO register space base address * @dma_pool: for allocating DMA descriptors - * @common: embedded struct dma_device + * @completion_pool: DMA buffers for completion ops + * @sed_hw_pool: DMA super descriptor pools + * @dma_dev: embedded struct dma_device * @version: version of ioatdma device * @msix_entries: irq handlers * @idx: per channel data * @dca: direct cache access context - * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) - * @enumerate_channels: hw version specific channel enumeration - * @reset_hw: hw version specific channel (re)initialization - * @cleanup_fn: select between the v2 and v3 cleanup routines - * @timer_fn: select between the v2 and v3 timer watchdog routines - * @self_test: hardware version specific self test for each supported op type - * - * Note: the v3 cleanup routine supports raid operations + * @irq_mode: interrupt mode (INTX, MSI, MSIX) + * @cap: read DMA capabilities register */ struct ioatdma_device { struct pci_dev *pdev; @@ -78,28 +80,21 @@ struct ioatdma_device { struct pci_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; - struct dma_device common; + struct dma_device dma_dev; u8 version; struct msix_entry msix_entries[4]; - struct ioat_chan_common *idx[4]; + struct ioatdma_chan *idx[4]; struct dca_provider *dca; enum ioat_irq_mode irq_mode; u32 cap; - void (*intr_quirk)(struct ioatdma_device *device); - int (*enumerate_channels)(struct ioatdma_device *device); - int (*reset_hw)(struct ioat_chan_common *chan); - void (*cleanup_fn)(unsigned long data); - void (*timer_fn)(unsigned long data); - int (*self_test)(struct ioatdma_device *device); }; -struct ioat_chan_common { - struct dma_chan common; +struct ioatdma_chan { + struct dma_chan dma_chan; void __iomem *reg_base; dma_addr_t last_completion; spinlock_t cleanup_lock; unsigned long state; - #define IOAT_COMPLETION_PENDING 0 #define IOAT_COMPLETION_ACK 1 #define IOAT_RESET_PENDING 2 #define IOAT_KOBJ_INIT_FAIL 3 @@ -110,11 +105,32 @@ struct ioat_chan_common { #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) - struct ioatdma_device *device; + struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; u64 *completion; struct tasklet_struct cleanup_task; struct kobject kobj; + +/* ioat v2 / v3 channel attributes + * @xfercap_log; log2 of channel max transfer length (for fast division) + * @head: allocated index + * @issued: hardware notification point + * @tail: cleanup index + * @dmacount: identical to 'head' except for occasionally resetting to zero + * @alloc_order: log2 of the number of allocated descriptors + * @produce: number of descriptors to produce at submit time + * @ring: software ring buffer implementation of hardware ring + * @prep_lock: serializes descriptor preparation (producers) + */ + size_t xfercap_log; + u16 head; + u16 issued; + u16 tail; + u16 dmacount; + u16 alloc_order; + u16 produce; + struct ioat_ring_ent **ring; + spinlock_t prep_lock; }; struct ioat_sysfs_entry { @@ -123,28 +139,11 @@ struct ioat_sysfs_entry { }; /** - * struct ioat_dma_chan - internal representation of a DMA channel - */ -struct ioat_dma_chan { - struct ioat_chan_common base; - - size_t xfercap; /* XFERCAP register value expanded out */ - - spinlock_t desc_lock; - struct list_head free_desc; - struct list_head used_desc; - - int pending; - u16 desccount; - u16 active; -}; - -/** * struct ioat_sed_ent - wrapper around super extended hardware descriptor * @hw: hardware SED - * @sed_dma: dma address for the SED - * @list: list member + * @dma: dma address for the SED * @parent: point to the dma descriptor that's the parent + * @hw_pool: descriptor pool index */ struct ioat_sed_ent { struct ioat_sed_raw_descriptor *hw; @@ -153,39 +152,57 @@ struct ioat_sed_ent { unsigned int hw_pool; }; -static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) -{ - return container_of(c, struct ioat_chan_common, common); -} - -static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) -{ - struct ioat_chan_common *chan = to_chan_common(c); - - return container_of(chan, struct ioat_dma_chan, base); -} - -/* wrapper around hardware descriptor format + additional software fields */ - /** - * struct ioat_desc_sw - wrapper around hardware descriptor + * struct ioat_ring_ent - wrapper around hardware descriptor * @hw: hardware DMA descriptor (for memcpy) - * @node: this descriptor will either be on the free list, - * or attached to a transaction list (tx_list) + * @xor: hardware xor descriptor + * @xor_ex: hardware xor extension descriptor + * @pq: hardware pq descriptor + * @pq_ex: hardware pq extension descriptor + * @pqu: hardware pq update descriptor + * @raw: hardware raw (un-typed) descriptor * @txd: the generic software descriptor for all engines + * @len: total transaction length for unmap + * @result: asynchronous result of validate operations * @id: identifier for debug + * @sed: pointer to super extended descriptor sw desc */ -struct ioat_desc_sw { - struct ioat_dma_descriptor *hw; - struct list_head node; + +struct ioat_ring_ent { + union { + struct ioat_dma_descriptor *hw; + struct ioat_xor_descriptor *xor; + struct ioat_xor_ext_descriptor *xor_ex; + struct ioat_pq_descriptor *pq; + struct ioat_pq_ext_descriptor *pq_ex; + struct ioat_pq_update_descriptor *pqu; + struct ioat_raw_descriptor *raw; + }; size_t len; - struct list_head tx_list; struct dma_async_tx_descriptor txd; + enum sum_check_flags *result; #ifdef DEBUG int id; #endif + struct ioat_sed_ent *sed; }; +extern const struct sysfs_ops ioat_sysfs_ops; +extern struct ioat_sysfs_entry ioat_version_attr; +extern struct ioat_sysfs_entry ioat_cap_attr; +extern int ioat_pending_level; +extern int ioat_ring_alloc_order; +extern struct kobj_type ioat_ktype; +extern struct kmem_cache *ioat_cache; +extern int ioat_ring_max_alloc_order; +extern struct kmem_cache *ioat_sed_cache; + +static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) +{ + return container_of(c, struct ioatdma_chan, dma_chan); +} + +/* wrapper around hardware descriptor format + additional software fields */ #ifdef DEBUG #define set_desc_id(desc, i) ((desc)->id = (i)) #define desc_id(desc) ((desc)->id) @@ -195,10 +212,10 @@ struct ioat_desc_sw { #endif static inline void -__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, +__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, struct dma_async_tx_descriptor *tx, int id) { - struct device *dev = to_dev(chan); + struct device *dev = to_dev(ioat_chan); dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, @@ -208,25 +225,25 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, } #define dump_desc_dbg(c, d) \ - ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) + ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) -static inline struct ioat_chan_common * -ioat_chan_by_index(struct ioatdma_device *device, int index) +static inline struct ioatdma_chan * +ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) { - return device->idx[index]; + return ioat_dma->idx[index]; } -static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) +static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u64 status; u32 status_lo; /* We need to read the low address first as this causes the * chipset to latch the upper bits for the subsequent read */ - status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); - status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); + status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); + status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); status <<= 32; status |= status_lo; @@ -235,16 +252,16 @@ static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) #if BITS_PER_LONG == 64 -static inline u64 ioat_chansts(struct ioat_chan_common *chan) +static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u64 status; /* With IOAT v3.3 the status register is 64bit. */ if (ver >= IOAT_VER_3_3) - status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); + status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); else - status = ioat_chansts_32(chan); + status = ioat_chansts_32(ioat_chan); return status; } @@ -253,56 +270,41 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan) #define ioat_chansts ioat_chansts_32 #endif -static inline void ioat_start(struct ioat_chan_common *chan) -{ - u8 ver = chan->device->version; - - writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); -} - static inline u64 ioat_chansts_to_addr(u64 status) { return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; } -static inline u32 ioat_chanerr(struct ioat_chan_common *chan) +static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) { - return readl(chan->reg_base + IOAT_CHANERR_OFFSET); + return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); } -static inline void ioat_suspend(struct ioat_chan_common *chan) +static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; - writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); + writeb(IOAT_CHANCMD_SUSPEND, + ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); } -static inline void ioat_reset(struct ioat_chan_common *chan) +static inline void ioat_reset(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; - writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); + writeb(IOAT_CHANCMD_RESET, + ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); } -static inline bool ioat_reset_pending(struct ioat_chan_common *chan) +static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) { - u8 ver = chan->device->version; + u8 ver = ioat_chan->ioat_dma->version; u8 cmd; - cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); + cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; } -static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) -{ - struct ioat_chan_common *chan = &ioat->base; - - writel(addr & 0x00000000FFFFFFFF, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(addr >> 32, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); -} - static inline bool is_ioat_active(unsigned long status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); @@ -329,24 +331,111 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -int ioat_probe(struct ioatdma_device *device); -int ioat_register(struct ioatdma_device *device); -int ioat1_dma_probe(struct ioatdma_device *dev, int dca); -int ioat_dma_self_test(struct ioatdma_device *device); -void ioat_dma_remove(struct ioatdma_device *device); +#define IOAT_MAX_ORDER 16 +#define ioat_get_alloc_order() \ + (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) +#define ioat_get_max_alloc_order() \ + (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) + +static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) +{ + return 1 << ioat_chan->alloc_order; +} + +/* count of descriptors in flight with the engine */ +static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) +{ + return CIRC_CNT(ioat_chan->head, ioat_chan->tail, + ioat_ring_size(ioat_chan)); +} + +/* count of descriptors pending submission to hardware */ +static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) +{ + return CIRC_CNT(ioat_chan->head, ioat_chan->issued, + ioat_ring_size(ioat_chan)); +} + +static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) +{ + return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); +} + +static inline u16 +ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) +{ + u16 num_descs = len >> ioat_chan->xfercap_log; + + num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); + return num_descs; +} + +static inline struct ioat_ring_ent * +ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) +{ + return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; +} + +static inline void +ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) +{ + writel(addr & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(addr >> 32, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); +} + +/* IOAT Prep functions */ +struct dma_async_tx_descriptor * +ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags); +struct dma_async_tx_descriptor * +ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags); + +/* IOAT Operation functions */ +irqreturn_t ioat_dma_do_interrupt(int irq, void *data); +irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); +struct ioat_ring_ent ** +ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); +void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); +void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); +int ioat_reset_hw(struct ioatdma_chan *ioat_chan); +enum dma_status +ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate); +void ioat_cleanup_event(unsigned long data); +void ioat_timer_event(unsigned long data); +int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); +void ioat_issue_pending(struct dma_chan *chan); +void ioat_timer_event(unsigned long data); + +/* IOAT Init functions */ +bool is_bwd_ioat(struct pci_dev *pdev); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); -dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan); -void ioat_init_channel(struct ioatdma_device *device, - struct ioat_chan_common *chan, int idx); -enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate); -bool ioat_cleanup_preamble(struct ioat_chan_common *chan, - dma_addr_t *phys_complete); -void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); -void ioat_kobject_del(struct ioatdma_device *device); -int ioat_dma_setup_interrupts(struct ioatdma_device *device); -void ioat_stop(struct ioat_chan_common *chan); -extern const struct sysfs_ops ioat_sysfs_ops; -extern struct ioat_sysfs_entry ioat_version_attr; -extern struct ioat_sysfs_entry ioat_cap_attr; +void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); +void ioat_kobject_del(struct ioatdma_device *ioat_dma); +int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); +void ioat_stop(struct ioatdma_chan *ioat_chan); #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c deleted file mode 100644 index 69c7dfcad..000000000 --- a/drivers/dma/ioat/dma_v2.c +++ /dev/null @@ -1,916 +0,0 @@ -/* - * Intel I/OAT DMA Linux driver - * Copyright(c) 2004 - 2009 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - */ - -/* - * This driver supports an Intel I/OAT DMA engine (versions >= 2), which - * does asynchronous data movement and checksumming operations. - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/pci.h> -#include <linux/interrupt.h> -#include <linux/dmaengine.h> -#include <linux/delay.h> -#include <linux/dma-mapping.h> -#include <linux/workqueue.h> -#include <linux/prefetch.h> -#include <linux/i7300_idle.h> -#include "dma.h" -#include "dma_v2.h" -#include "registers.h" -#include "hw.h" - -#include "../dmaengine.h" - -int ioat_ring_alloc_order = 8; -module_param(ioat_ring_alloc_order, int, 0644); -MODULE_PARM_DESC(ioat_ring_alloc_order, - "ioat2+: allocate 2^n descriptors per channel" - " (default: 8 max: 16)"); -static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; -module_param(ioat_ring_max_alloc_order, int, 0644); -MODULE_PARM_DESC(ioat_ring_max_alloc_order, - "ioat2+: upper limit for ring size (default: 16)"); - -void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - - ioat->dmacount += ioat2_ring_pending(ioat); - ioat->issued = ioat->head; - writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - dev_dbg(to_dev(chan), - "%s: head: %#x tail: %#x issued: %#x count: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); -} - -void ioat2_issue_pending(struct dma_chan *c) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - - if (ioat2_ring_pending(ioat)) { - spin_lock_bh(&ioat->prep_lock); - __ioat2_issue_pending(ioat); - spin_unlock_bh(&ioat->prep_lock); - } -} - -/** - * ioat2_update_pending - log pending descriptors - * @ioat: ioat2+ channel - * - * Check if the number of unsubmitted descriptors has exceeded the - * watermark. Called with prep_lock held - */ -static void ioat2_update_pending(struct ioat2_dma_chan *ioat) -{ - if (ioat2_ring_pending(ioat) > ioat_pending_level) - __ioat2_issue_pending(ioat); -} - -static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) -{ - struct ioat_ring_ent *desc; - struct ioat_dma_descriptor *hw; - - if (ioat2_ring_space(ioat) < 1) { - dev_err(to_dev(&ioat->base), - "Unable to start null desc - ring full\n"); - return; - } - - dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued); - desc = ioat2_get_ring_ent(ioat, ioat->head); - - hw = desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = 1; - hw->ctl_f.compl_write = 1; - /* set size to non-zero value (channel returns error when size is 0) */ - hw->size = NULL_DESC_BUFFER_SIZE; - hw->src_addr = 0; - hw->dst_addr = 0; - async_tx_ack(&desc->txd); - ioat2_set_chainaddr(ioat, desc->txd.phys); - dump_desc_dbg(ioat, desc); - wmb(); - ioat->head += 1; - __ioat2_issue_pending(ioat); -} - -static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) -{ - spin_lock_bh(&ioat->prep_lock); - __ioat2_start_null_desc(ioat); - spin_unlock_bh(&ioat->prep_lock); -} - -static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) -{ - struct ioat_chan_common *chan = &ioat->base; - struct dma_async_tx_descriptor *tx; - struct ioat_ring_ent *desc; - bool seen_current = false; - u16 active; - int idx = ioat->tail, i; - - dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued); - - active = ioat2_ring_active(ioat); - for (i = 0; i < active && !seen_current; i++) { - smp_read_barrier_depends(); - prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); - desc = ioat2_get_ring_ent(ioat, idx + i); - tx = &desc->txd; - dump_desc_dbg(ioat, desc); - if (tx->cookie) { - dma_descriptor_unmap(tx); - dma_cookie_complete(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - - if (tx->phys == phys_complete) - seen_current = true; - } - smp_mb(); /* finish all descriptor reads before incrementing tail */ - ioat->tail = idx + i; - BUG_ON(active && !seen_current); /* no active descs have written a completion? */ - - chan->last_completion = phys_complete; - if (active - i == 0) { - dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", - __func__); - clear_bit(IOAT_COMPLETION_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - } -} - -/** - * ioat2_cleanup - clean finished descriptors (advance tail pointer) - * @chan: ioat channel to be cleaned up - */ -static void ioat2_cleanup(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - - spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - spin_unlock_bh(&chan->cleanup_lock); -} - -void ioat2_cleanup_event(unsigned long data) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - - ioat2_cleanup(ioat); - if (!test_bit(IOAT_RUN, &chan->state)) - return; - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); -} - -void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - - /* set the tail to be re-issued */ - ioat->issued = ioat->tail; - ioat->dmacount = 0; - set_bit(IOAT_COMPLETION_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - - dev_dbg(to_dev(chan), - "%s: head: %#x tail: %#x issued: %#x count: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); - - if (ioat2_ring_pending(ioat)) { - struct ioat_ring_ent *desc; - - desc = ioat2_get_ring_ent(ioat, ioat->tail); - ioat2_set_chainaddr(ioat, desc->txd.phys); - __ioat2_issue_pending(ioat); - } else - __ioat2_start_null_desc(ioat); -} - -int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) -{ - unsigned long end = jiffies + tmo; - int err = 0; - u32 status; - - status = ioat_chansts(chan); - if (is_ioat_active(status) || is_ioat_idle(status)) - ioat_suspend(chan); - while (is_ioat_active(status) || is_ioat_idle(status)) { - if (tmo && time_after(jiffies, end)) { - err = -ETIMEDOUT; - break; - } - status = ioat_chansts(chan); - cpu_relax(); - } - - return err; -} - -int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) -{ - unsigned long end = jiffies + tmo; - int err = 0; - - ioat_reset(chan); - while (ioat_reset_pending(chan)) { - if (end && time_after(jiffies, end)) { - err = -ETIMEDOUT; - break; - } - cpu_relax(); - } - - return err; -} - -static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - - ioat2_quiesce(chan, 0); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - - __ioat2_restart_chan(ioat); -} - -static void check_active(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - - if (ioat2_ring_active(ioat)) { - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - return; - } - - if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - else if (ioat->alloc_order > ioat_get_alloc_order()) { - /* if the ring is idle, empty, and oversized try to step - * down the size - */ - reshape_ring(ioat, ioat->alloc_order - 1); - - /* keep shrinking until we get back to our minimum - * default size - */ - if (ioat->alloc_order > ioat_get_alloc_order()) - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - } - -} - -void ioat2_timer_event(unsigned long data) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - u64 status; - - status = ioat_chansts(chan); - - /* when halted due to errors check for channel - * programming errors before advancing the completion state - */ - if (is_ioat_halted(status)) { - u32 chanerr; - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(chan), "%s: Channel halted (%x)\n", - __func__, chanerr); - if (test_bit(IOAT_RUN, &chan->state)) - BUG_ON(is_ioat_bug(chanerr)); - else /* we never got off the ground */ - return; - } - - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { - spin_lock_bh(&ioat->prep_lock); - ioat2_restart_channel(ioat); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - return; - } else { - set_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - } - - - if (ioat2_ring_active(ioat)) - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - else { - spin_lock_bh(&ioat->prep_lock); - check_active(ioat); - spin_unlock_bh(&ioat->prep_lock); - } - spin_unlock_bh(&chan->cleanup_lock); -} - -static int ioat2_reset_hw(struct ioat_chan_common *chan) -{ - /* throw away whatever the channel was doing and get it initialized */ - u32 chanerr; - - ioat2_quiesce(chan, msecs_to_jiffies(100)); - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - - return ioat2_reset_sync(chan, msecs_to_jiffies(200)); -} - -/** - * ioat2_enumerate_channels - find and initialize the device's channels - * @device: the device to be enumerated - */ -int ioat2_enumerate_channels(struct ioatdma_device *device) -{ - struct ioat2_dma_chan *ioat; - struct device *dev = &device->pdev->dev; - struct dma_device *dma = &device->common; - u8 xfercap_log; - int i; - - INIT_LIST_HEAD(&dma->channels); - dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); - dma->chancnt &= 0x1f; /* bits [4:0] valid */ - if (dma->chancnt > ARRAY_SIZE(device->idx)) { - dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", - dma->chancnt, ARRAY_SIZE(device->idx)); - dma->chancnt = ARRAY_SIZE(device->idx); - } - xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); - xfercap_log &= 0x1f; /* bits [4:0] valid */ - if (xfercap_log == 0) - return 0; - dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); - - /* FIXME which i/oat version is i7300? */ -#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) - dma->chancnt--; -#endif - for (i = 0; i < dma->chancnt; i++) { - ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); - if (!ioat) - break; - - ioat_init_channel(device, &ioat->base, i); - ioat->xfercap_log = xfercap_log; - spin_lock_init(&ioat->prep_lock); - if (device->reset_hw(&ioat->base)) { - i = 0; - break; - } - } - dma->chancnt = i; - return i; -} - -static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) -{ - struct dma_chan *c = tx->chan; - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - dma_cookie_t cookie; - - cookie = dma_cookie_assign(tx); - dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); - - if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state)) - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - - /* make descriptor updates visible before advancing ioat->head, - * this is purposefully not smp_wmb() since we are also - * publishing the descriptor updates to a dma device - */ - wmb(); - - ioat->head += ioat->produce; - - ioat2_update_pending(ioat); - spin_unlock_bh(&ioat->prep_lock); - - return cookie; -} - -static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) -{ - struct ioat_dma_descriptor *hw; - struct ioat_ring_ent *desc; - struct ioatdma_device *dma; - dma_addr_t phys; - - dma = to_ioatdma_device(chan->device); - hw = pci_pool_alloc(dma->dma_pool, flags, &phys); - if (!hw) - return NULL; - memset(hw, 0, sizeof(*hw)); - - desc = kmem_cache_zalloc(ioat2_cache, flags); - if (!desc) { - pci_pool_free(dma->dma_pool, hw, phys); - return NULL; - } - - dma_async_tx_descriptor_init(&desc->txd, chan); - desc->txd.tx_submit = ioat2_tx_submit_unlock; - desc->hw = hw; - desc->txd.phys = phys; - return desc; -} - -static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) -{ - struct ioatdma_device *dma; - - dma = to_ioatdma_device(chan->device); - pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); - kmem_cache_free(ioat2_cache, desc); -} - -static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) -{ - struct ioat_ring_ent **ring; - int descs = 1 << order; - int i; - - if (order > ioat_get_max_alloc_order()) - return NULL; - - /* allocate the array to hold the software ring */ - ring = kcalloc(descs, sizeof(*ring), flags); - if (!ring) - return NULL; - for (i = 0; i < descs; i++) { - ring[i] = ioat2_alloc_ring_ent(c, flags); - if (!ring[i]) { - while (i--) - ioat2_free_ring_ent(ring[i], c); - kfree(ring); - return NULL; - } - set_desc_id(ring[i], i); - } - - /* link descs */ - for (i = 0; i < descs-1; i++) { - struct ioat_ring_ent *next = ring[i+1]; - struct ioat_dma_descriptor *hw = ring[i]->hw; - - hw->next = next->txd.phys; - } - ring[i]->hw->next = ring[0]->txd.phys; - - return ring; -} - -void ioat2_free_chan_resources(struct dma_chan *c); - -/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring - * @chan: channel to be initialized - */ -int ioat2_alloc_chan_resources(struct dma_chan *c) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioat_ring_ent **ring; - u64 status; - int order; - int i = 0; - - /* have we already been set up? */ - if (ioat->ring) - return 1 << ioat->alloc_order; - - /* Setup register to interrupt and write completion status on error */ - writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); - - /* allocate a completion writeback area */ - /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion = pci_pool_alloc(chan->device->completion_pool, - GFP_KERNEL, &chan->completion_dma); - if (!chan->completion) - return -ENOMEM; - - memset(chan->completion, 0, sizeof(*chan->completion)); - writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) chan->completion_dma) >> 32, - chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - - order = ioat_get_alloc_order(); - ring = ioat2_alloc_ring(c, order, GFP_KERNEL); - if (!ring) - return -ENOMEM; - - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->prep_lock); - ioat->ring = ring; - ioat->head = 0; - ioat->issued = 0; - ioat->tail = 0; - ioat->alloc_order = order; - set_bit(IOAT_RUN, &chan->state); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - - ioat2_start_null_desc(ioat); - - /* check that we got off the ground */ - do { - udelay(1); - status = ioat_chansts(chan); - } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); - - if (is_ioat_active(status) || is_ioat_idle(status)) { - return 1 << ioat->alloc_order; - } else { - u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - - dev_WARN(to_dev(chan), - "failed to start channel chanerr: %#x\n", chanerr); - ioat2_free_chan_resources(c); - return -EFAULT; - } -} - -bool reshape_ring(struct ioat2_dma_chan *ioat, int order) -{ - /* reshape differs from normal ring allocation in that we want - * to allocate a new software ring while only - * extending/truncating the hardware ring - */ - struct ioat_chan_common *chan = &ioat->base; - struct dma_chan *c = &chan->common; - const u32 curr_size = ioat2_ring_size(ioat); - const u16 active = ioat2_ring_active(ioat); - const u32 new_size = 1 << order; - struct ioat_ring_ent **ring; - u16 i; - - if (order > ioat_get_max_alloc_order()) - return false; - - /* double check that we have at least 1 free descriptor */ - if (active == curr_size) - return false; - - /* when shrinking, verify that we can hold the current active - * set in the new ring - */ - if (active >= new_size) - return false; - - /* allocate the array to hold the software ring */ - ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); - if (!ring) - return false; - - /* allocate/trim descriptors as needed */ - if (new_size > curr_size) { - /* copy current descriptors to the new ring */ - for (i = 0; i < curr_size; i++) { - u16 curr_idx = (ioat->tail+i) & (curr_size-1); - u16 new_idx = (ioat->tail+i) & (new_size-1); - - ring[new_idx] = ioat->ring[curr_idx]; - set_desc_id(ring[new_idx], new_idx); - } - - /* add new descriptors to the ring */ - for (i = curr_size; i < new_size; i++) { - u16 new_idx = (ioat->tail+i) & (new_size-1); - - ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); - if (!ring[new_idx]) { - while (i--) { - u16 new_idx = (ioat->tail+i) & (new_size-1); - - ioat2_free_ring_ent(ring[new_idx], c); - } - kfree(ring); - return false; - } - set_desc_id(ring[new_idx], new_idx); - } - - /* hw link new descriptors */ - for (i = curr_size-1; i < new_size; i++) { - u16 new_idx = (ioat->tail+i) & (new_size-1); - struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; - struct ioat_dma_descriptor *hw = ring[new_idx]->hw; - - hw->next = next->txd.phys; - } - } else { - struct ioat_dma_descriptor *hw; - struct ioat_ring_ent *next; - - /* copy current descriptors to the new ring, dropping the - * removed descriptors - */ - for (i = 0; i < new_size; i++) { - u16 curr_idx = (ioat->tail+i) & (curr_size-1); - u16 new_idx = (ioat->tail+i) & (new_size-1); - - ring[new_idx] = ioat->ring[curr_idx]; - set_desc_id(ring[new_idx], new_idx); - } - - /* free deleted descriptors */ - for (i = new_size; i < curr_size; i++) { - struct ioat_ring_ent *ent; - - ent = ioat2_get_ring_ent(ioat, ioat->tail+i); - ioat2_free_ring_ent(ent, c); - } - - /* fix up hardware ring */ - hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; - next = ring[(ioat->tail+new_size) & (new_size-1)]; - hw->next = next->txd.phys; - } - - dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", - __func__, new_size); - - kfree(ioat->ring); - ioat->ring = ring; - ioat->alloc_order = order; - - return true; -} - -/** - * ioat2_check_space_lock - verify space and grab ring producer lock - * @ioat: ioat2,3 channel (ring) to operate on - * @num_descs: allocation length - */ -int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) -{ - struct ioat_chan_common *chan = &ioat->base; - bool retry; - - retry: - spin_lock_bh(&ioat->prep_lock); - /* never allow the last descriptor to be consumed, we need at - * least one free at all times to allow for on-the-fly ring - * resizing. - */ - if (likely(ioat2_ring_space(ioat) > num_descs)) { - dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", - __func__, num_descs, ioat->head, ioat->tail, ioat->issued); - ioat->produce = num_descs; - return 0; /* with ioat->prep_lock held */ - } - retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state); - spin_unlock_bh(&ioat->prep_lock); - - /* is another cpu already trying to expand the ring? */ - if (retry) - goto retry; - - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->prep_lock); - retry = reshape_ring(ioat, ioat->alloc_order + 1); - clear_bit(IOAT_RESHAPE_PENDING, &chan->state); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - - /* if we were able to expand the ring retry the allocation */ - if (retry) - goto retry; - - if (printk_ratelimit()) - dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", - __func__, num_descs, ioat->head, ioat->tail, ioat->issued); - - /* progress reclaim in the allocation failure case we may be - * called under bh_disabled so we need to trigger the timer - * event directly - */ - if (time_is_before_jiffies(chan->timer.expires) - && timer_pending(&chan->timer)) { - struct ioatdma_device *device = chan->device; - - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - device->timer_fn((unsigned long) &chan->common); - } - - return -ENOMEM; -} - -struct dma_async_tx_descriptor * -ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_dma_descriptor *hw; - struct ioat_ring_ent *desc; - dma_addr_t dst = dma_dest; - dma_addr_t src = dma_src; - size_t total_len = len; - int num_descs, idx, i; - - num_descs = ioat2_xferlen_to_descs(ioat, len); - if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) - idx = ioat->head; - else - return NULL; - i = 0; - do { - size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); - - desc = ioat2_get_ring_ent(ioat, idx + i); - hw = desc->hw; - - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dst; - - len -= copy; - dst += copy; - src += copy; - dump_desc_dbg(ioat, desc); - } while (++i < num_descs); - - desc->txd.flags = flags; - desc->len = total_len; - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - hw->ctl_f.compl_write = 1; - dump_desc_dbg(ioat, desc); - /* we leave the channel locked to ensure in order submission */ - - return &desc->txd; -} - -/** - * ioat2_free_chan_resources - release all the descriptors - * @chan: the channel to be cleaned - */ -void ioat2_free_chan_resources(struct dma_chan *c) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *device = chan->device; - struct ioat_ring_ent *desc; - const u16 total_descs = 1 << ioat->alloc_order; - int descs; - int i; - - /* Before freeing channel resources first check - * if they have been previously allocated for this channel. - */ - if (!ioat->ring) - return; - - ioat_stop(chan); - device->reset_hw(chan); - - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->prep_lock); - descs = ioat2_ring_space(ioat); - dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); - for (i = 0; i < descs; i++) { - desc = ioat2_get_ring_ent(ioat, ioat->head + i); - ioat2_free_ring_ent(desc, c); - } - - if (descs < total_descs) - dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", - total_descs - descs); - - for (i = 0; i < total_descs - descs; i++) { - desc = ioat2_get_ring_ent(ioat, ioat->tail + i); - dump_desc_dbg(ioat, desc); - ioat2_free_ring_ent(desc, c); - } - - kfree(ioat->ring); - ioat->ring = NULL; - ioat->alloc_order = 0; - pci_pool_free(device->completion_pool, chan->completion, - chan->completion_dma); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - - chan->last_completion = 0; - chan->completion_dma = 0; - ioat->dmacount = 0; -} - -static ssize_t ring_size_show(struct dma_chan *c, char *page) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - - return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1); -} -static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); - -static ssize_t ring_active_show(struct dma_chan *c, char *page) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - - /* ...taken outside the lock, no need to be precise */ - return sprintf(page, "%d\n", ioat2_ring_active(ioat)); -} -static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); - -static struct attribute *ioat2_attrs[] = { - &ring_size_attr.attr, - &ring_active_attr.attr, - &ioat_cap_attr.attr, - &ioat_version_attr.attr, - NULL, -}; - -struct kobj_type ioat2_ktype = { - .sysfs_ops = &ioat_sysfs_ops, - .default_attrs = ioat2_attrs, -}; - -int ioat2_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - int err; - - device->enumerate_channels = ioat2_enumerate_channels; - device->reset_hw = ioat2_reset_hw; - device->cleanup_fn = ioat2_cleanup_event; - device->timer_fn = ioat2_timer_event; - device->self_test = ioat_dma_self_test; - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; - dma->device_issue_pending = ioat2_issue_pending; - dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; - dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_tx_status = ioat_dma_tx_status; - - err = ioat_probe(device); - if (err) - return err; - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - - ioat_kobject_add(device, &ioat2_ktype); - - if (dca) - device->dca = ioat2_dca_init(pdev, device->reg_base); - - return err; -} diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h deleted file mode 100644 index bf24ebe87..000000000 --- a/drivers/dma/ioat/dma_v2.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ -#ifndef IOATDMA_V2_H -#define IOATDMA_V2_H - -#include <linux/dmaengine.h> -#include <linux/circ_buf.h> -#include "dma.h" -#include "hw.h" - - -extern int ioat_pending_level; -extern int ioat_ring_alloc_order; - -/* - * workaround for IOAT ver.3.0 null descriptor issue - * (channel returns error when size is 0) - */ -#define NULL_DESC_BUFFER_SIZE 1 - -#define IOAT_MAX_ORDER 16 -#define ioat_get_alloc_order() \ - (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) -#define ioat_get_max_alloc_order() \ - (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) - -/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes - * @base: common ioat channel parameters - * @xfercap_log; log2 of channel max transfer length (for fast division) - * @head: allocated index - * @issued: hardware notification point - * @tail: cleanup index - * @dmacount: identical to 'head' except for occasionally resetting to zero - * @alloc_order: log2 of the number of allocated descriptors - * @produce: number of descriptors to produce at submit time - * @ring: software ring buffer implementation of hardware ring - * @prep_lock: serializes descriptor preparation (producers) - */ -struct ioat2_dma_chan { - struct ioat_chan_common base; - size_t xfercap_log; - u16 head; - u16 issued; - u16 tail; - u16 dmacount; - u16 alloc_order; - u16 produce; - struct ioat_ring_ent **ring; - spinlock_t prep_lock; -}; - -static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) -{ - struct ioat_chan_common *chan = to_chan_common(c); - - return container_of(chan, struct ioat2_dma_chan, base); -} - -static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat) -{ - return 1 << ioat->alloc_order; -} - -/* count of descriptors in flight with the engine */ -static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) -{ - return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat)); -} - -/* count of descriptors pending submission to hardware */ -static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) -{ - return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); -} - -static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat) -{ - return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); -} - -static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) -{ - u16 num_descs = len >> ioat->xfercap_log; - - num_descs += !!(len & ((1 << ioat->xfercap_log) - 1)); - return num_descs; -} - -/** - * struct ioat_ring_ent - wrapper around hardware descriptor - * @hw: hardware DMA descriptor (for memcpy) - * @fill: hardware fill descriptor - * @xor: hardware xor descriptor - * @xor_ex: hardware xor extension descriptor - * @pq: hardware pq descriptor - * @pq_ex: hardware pq extension descriptor - * @pqu: hardware pq update descriptor - * @raw: hardware raw (un-typed) descriptor - * @txd: the generic software descriptor for all engines - * @len: total transaction length for unmap - * @result: asynchronous result of validate operations - * @id: identifier for debug - */ - -struct ioat_ring_ent { - union { - struct ioat_dma_descriptor *hw; - struct ioat_xor_descriptor *xor; - struct ioat_xor_ext_descriptor *xor_ex; - struct ioat_pq_descriptor *pq; - struct ioat_pq_ext_descriptor *pq_ex; - struct ioat_pq_update_descriptor *pqu; - struct ioat_raw_descriptor *raw; - }; - size_t len; - struct dma_async_tx_descriptor txd; - enum sum_check_flags *result; - #ifdef DEBUG - int id; - #endif - struct ioat_sed_ent *sed; -}; - -static inline struct ioat_ring_ent * -ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) -{ - return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)]; -} - -static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) -{ - struct ioat_chan_common *chan = &ioat->base; - - writel(addr & 0x00000000FFFFFFFF, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(addr >> 32, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); -} - -int ioat2_dma_probe(struct ioatdma_device *dev, int dca); -int ioat3_dma_probe(struct ioatdma_device *dev, int dca); -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); -int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); -int ioat2_enumerate_channels(struct ioatdma_device *device); -struct dma_async_tx_descriptor * -ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags); -void ioat2_issue_pending(struct dma_chan *chan); -int ioat2_alloc_chan_resources(struct dma_chan *c); -void ioat2_free_chan_resources(struct dma_chan *c); -void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); -bool reshape_ring(struct ioat2_dma_chan *ioat, int order); -void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); -void ioat2_cleanup_event(unsigned long data); -void ioat2_timer_event(unsigned long data); -int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); -int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); -extern struct kobj_type ioat2_ktype; -extern struct kmem_cache *ioat2_cache; -#endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c deleted file mode 100644 index 64790a45e..000000000 --- a/drivers/dma/ioat/dma_v3.c +++ /dev/null @@ -1,1717 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * BSD LICENSE - * - * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Support routines for v3+ hardware - */ -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/gfp.h> -#include <linux/dmaengine.h> -#include <linux/dma-mapping.h> -#include <linux/prefetch.h> -#include "../dmaengine.h" -#include "registers.h" -#include "hw.h" -#include "dma.h" -#include "dma_v2.h" - -extern struct kmem_cache *ioat3_sed_cache; - -/* ioat hardware assumes at least two sources for raid operations */ -#define src_cnt_to_sw(x) ((x) + 2) -#define src_cnt_to_hw(x) ((x) - 2) -#define ndest_to_sw(x) ((x) + 1) -#define ndest_to_hw(x) ((x) - 1) -#define src16_cnt_to_sw(x) ((x) + 9) -#define src16_cnt_to_hw(x) ((x) - 9) - -/* provide a lookup table for setting the source address in the base or - * extended descriptor of an xor or pq descriptor - */ -static const u8 xor_idx_to_desc = 0xe0; -static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; -static const u8 pq_idx_to_desc = 0xf8; -static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2 }; -static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; -static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, - 0, 1, 2, 3, 4, 5, 6 }; - -static void ioat3_eh(struct ioat2_dma_chan *ioat); - -static void xor_set_src(struct ioat_raw_descriptor *descs[2], - dma_addr_t addr, u32 offset, int idx) -{ - struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; - - raw->field[xor_idx_to_field[idx]] = addr + offset; -} - -static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) -{ - struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; - - return raw->field[pq_idx_to_field[idx]]; -} - -static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) -{ - struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; - - return raw->field[pq16_idx_to_field[idx]]; -} - -static void pq_set_src(struct ioat_raw_descriptor *descs[2], - dma_addr_t addr, u32 offset, u8 coef, int idx) -{ - struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; - struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; - - raw->field[pq_idx_to_field[idx]] = addr + offset; - pq->coef[idx] = coef; -} - -static bool is_jf_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_JSF0: - case PCI_DEVICE_ID_INTEL_IOAT_JSF1: - case PCI_DEVICE_ID_INTEL_IOAT_JSF2: - case PCI_DEVICE_ID_INTEL_IOAT_JSF3: - case PCI_DEVICE_ID_INTEL_IOAT_JSF4: - case PCI_DEVICE_ID_INTEL_IOAT_JSF5: - case PCI_DEVICE_ID_INTEL_IOAT_JSF6: - case PCI_DEVICE_ID_INTEL_IOAT_JSF7: - case PCI_DEVICE_ID_INTEL_IOAT_JSF8: - case PCI_DEVICE_ID_INTEL_IOAT_JSF9: - return true; - default: - return false; - } -} - -static bool is_snb_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_SNB0: - case PCI_DEVICE_ID_INTEL_IOAT_SNB1: - case PCI_DEVICE_ID_INTEL_IOAT_SNB2: - case PCI_DEVICE_ID_INTEL_IOAT_SNB3: - case PCI_DEVICE_ID_INTEL_IOAT_SNB4: - case PCI_DEVICE_ID_INTEL_IOAT_SNB5: - case PCI_DEVICE_ID_INTEL_IOAT_SNB6: - case PCI_DEVICE_ID_INTEL_IOAT_SNB7: - case PCI_DEVICE_ID_INTEL_IOAT_SNB8: - case PCI_DEVICE_ID_INTEL_IOAT_SNB9: - return true; - default: - return false; - } -} - -static bool is_ivb_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_IVB0: - case PCI_DEVICE_ID_INTEL_IOAT_IVB1: - case PCI_DEVICE_ID_INTEL_IOAT_IVB2: - case PCI_DEVICE_ID_INTEL_IOAT_IVB3: - case PCI_DEVICE_ID_INTEL_IOAT_IVB4: - case PCI_DEVICE_ID_INTEL_IOAT_IVB5: - case PCI_DEVICE_ID_INTEL_IOAT_IVB6: - case PCI_DEVICE_ID_INTEL_IOAT_IVB7: - case PCI_DEVICE_ID_INTEL_IOAT_IVB8: - case PCI_DEVICE_ID_INTEL_IOAT_IVB9: - return true; - default: - return false; - } - -} - -static bool is_hsw_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_HSW0: - case PCI_DEVICE_ID_INTEL_IOAT_HSW1: - case PCI_DEVICE_ID_INTEL_IOAT_HSW2: - case PCI_DEVICE_ID_INTEL_IOAT_HSW3: - case PCI_DEVICE_ID_INTEL_IOAT_HSW4: - case PCI_DEVICE_ID_INTEL_IOAT_HSW5: - case PCI_DEVICE_ID_INTEL_IOAT_HSW6: - case PCI_DEVICE_ID_INTEL_IOAT_HSW7: - case PCI_DEVICE_ID_INTEL_IOAT_HSW8: - case PCI_DEVICE_ID_INTEL_IOAT_HSW9: - return true; - default: - return false; - } - -} - -static bool is_xeon_cb32(struct pci_dev *pdev) -{ - return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || - is_hsw_ioat(pdev); -} - -static bool is_bwd_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_BWD0: - case PCI_DEVICE_ID_INTEL_IOAT_BWD1: - case PCI_DEVICE_ID_INTEL_IOAT_BWD2: - case PCI_DEVICE_ID_INTEL_IOAT_BWD3: - /* even though not Atom, BDX-DE has same DMA silicon */ - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: - return true; - default: - return false; - } -} - -static bool is_bwd_noraid(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_BWD2: - case PCI_DEVICE_ID_INTEL_IOAT_BWD3: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: - case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: - return true; - default: - return false; - } - -} - -static void pq16_set_src(struct ioat_raw_descriptor *desc[3], - dma_addr_t addr, u32 offset, u8 coef, unsigned idx) -{ - struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; - struct ioat_pq16a_descriptor *pq16 = - (struct ioat_pq16a_descriptor *)desc[1]; - struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; - - raw->field[pq16_idx_to_field[idx]] = addr + offset; - - if (idx < 8) - pq->coef[idx] = coef; - else - pq16->coef[idx - 8] = coef; -} - -static struct ioat_sed_ent * -ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) -{ - struct ioat_sed_ent *sed; - gfp_t flags = __GFP_ZERO | GFP_ATOMIC; - - sed = kmem_cache_alloc(ioat3_sed_cache, flags); - if (!sed) - return NULL; - - sed->hw_pool = hw_pool; - sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], - flags, &sed->dma); - if (!sed->hw) { - kmem_cache_free(ioat3_sed_cache, sed); - return NULL; - } - - return sed; -} - -static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) -{ - if (!sed) - return; - - dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); - kmem_cache_free(ioat3_sed_cache, sed); -} - -static bool desc_has_ext(struct ioat_ring_ent *desc) -{ - struct ioat_dma_descriptor *hw = desc->hw; - - if (hw->ctl_f.op == IOAT_OP_XOR || - hw->ctl_f.op == IOAT_OP_XOR_VAL) { - struct ioat_xor_descriptor *xor = desc->xor; - - if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) - return true; - } else if (hw->ctl_f.op == IOAT_OP_PQ || - hw->ctl_f.op == IOAT_OP_PQ_VAL) { - struct ioat_pq_descriptor *pq = desc->pq; - - if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) - return true; - } - - return false; -} - -static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) -{ - u64 phys_complete; - u64 completion; - - completion = *chan->completion; - phys_complete = ioat_chansts_to_addr(completion); - - dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, - (unsigned long long) phys_complete); - - return phys_complete; -} - -static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, - u64 *phys_complete) -{ - *phys_complete = ioat3_get_current_completion(chan); - if (*phys_complete == chan->last_completion) - return false; - - clear_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - - return true; -} - -static void -desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) -{ - struct ioat_dma_descriptor *hw = desc->hw; - - switch (hw->ctl_f.op) { - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ_VAL_16S: - { - struct ioat_pq_descriptor *pq = desc->pq; - - /* check if there's error written */ - if (!pq->dwbes_f.wbes) - return; - - /* need to set a chanerr var for checking to clear later */ - - if (pq->dwbes_f.p_val_err) - *desc->result |= SUM_CHECK_P_RESULT; - - if (pq->dwbes_f.q_val_err) - *desc->result |= SUM_CHECK_Q_RESULT; - - return; - } - default: - return; - } -} - -/** - * __cleanup - reclaim used descriptors - * @ioat: channel (ring) to clean - * - * The difference from the dma_v2.c __cleanup() is that this routine - * handles extended descriptors and dma-unmapping raid operations. - */ -static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) -{ - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *device = chan->device; - struct ioat_ring_ent *desc; - bool seen_current = false; - int idx = ioat->tail, i; - u16 active; - - dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued); - - /* - * At restart of the channel, the completion address and the - * channel status will be 0 due to starting a new chain. Since - * it's new chain and the first descriptor "fails", there is - * nothing to clean up. We do not want to reap the entire submitted - * chain due to this 0 address value and then BUG. - */ - if (!phys_complete) - return; - - active = ioat2_ring_active(ioat); - for (i = 0; i < active && !seen_current; i++) { - struct dma_async_tx_descriptor *tx; - - smp_read_barrier_depends(); - prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); - desc = ioat2_get_ring_ent(ioat, idx + i); - dump_desc_dbg(ioat, desc); - - /* set err stat if we are using dwbes */ - if (device->cap & IOAT_CAP_DWBES) - desc_get_errstat(ioat, desc); - - tx = &desc->txd; - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - - if (tx->phys == phys_complete) - seen_current = true; - - /* skip extended descriptors */ - if (desc_has_ext(desc)) { - BUG_ON(i + 1 >= active); - i++; - } - - /* cleanup super extended descriptors */ - if (desc->sed) { - ioat3_free_sed(device, desc->sed); - desc->sed = NULL; - } - } - smp_mb(); /* finish all descriptor reads before incrementing tail */ - ioat->tail = idx + i; - BUG_ON(active && !seen_current); /* no active descs have written a completion? */ - chan->last_completion = phys_complete; - - if (active - i == 0) { - dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", - __func__); - clear_bit(IOAT_COMPLETION_PENDING, &chan->state); - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - } - /* 5 microsecond delay per pending descriptor */ - writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), - chan->device->reg_base + IOAT_INTRDELAY_OFFSET); -} - -static void ioat3_cleanup(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - u64 phys_complete; - - spin_lock_bh(&chan->cleanup_lock); - - if (ioat3_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - - if (is_ioat_halted(*chan->completion)) { - u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - - if (chanerr & IOAT_CHANERR_HANDLE_MASK) { - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - ioat3_eh(ioat); - } - } - - spin_unlock_bh(&chan->cleanup_lock); -} - -static void ioat3_cleanup_event(unsigned long data) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - - ioat3_cleanup(ioat); - if (!test_bit(IOAT_RUN, &chan->state)) - return; - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); -} - -static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - u64 phys_complete; - - ioat2_quiesce(chan, 0); - if (ioat3_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - - __ioat2_restart_chan(ioat); -} - -static void ioat3_eh(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - struct pci_dev *pdev = to_pdev(chan); - struct ioat_dma_descriptor *hw; - struct dma_async_tx_descriptor *tx; - u64 phys_complete; - struct ioat_ring_ent *desc; - u32 err_handled = 0; - u32 chanerr_int; - u32 chanerr; - - /* cleanup so tail points to descriptor that caused the error */ - if (ioat3_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); - - dev_dbg(to_dev(chan), "%s: error = %x:%x\n", - __func__, chanerr, chanerr_int); - - desc = ioat2_get_ring_ent(ioat, ioat->tail); - hw = desc->hw; - dump_desc_dbg(ioat, desc); - - switch (hw->ctl_f.op) { - case IOAT_OP_XOR_VAL: - if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { - *desc->result |= SUM_CHECK_P_RESULT; - err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; - } - break; - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ_VAL_16S: - if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { - *desc->result |= SUM_CHECK_P_RESULT; - err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; - } - if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { - *desc->result |= SUM_CHECK_Q_RESULT; - err_handled |= IOAT_CHANERR_XOR_Q_ERR; - } - break; - } - - /* fault on unhandled error or spurious halt */ - if (chanerr ^ err_handled || chanerr == 0) { - dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", - __func__, chanerr, err_handled); - BUG(); - } else { /* cleanup the faulty descriptor */ - tx = &desc->txd; - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - } - - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); - - /* mark faulting descriptor as complete */ - *chan->completion = desc->txd.phys; - - spin_lock_bh(&ioat->prep_lock); - ioat3_restart_channel(ioat); - spin_unlock_bh(&ioat->prep_lock); -} - -static void check_active(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - - if (ioat2_ring_active(ioat)) { - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - return; - } - - if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - else if (ioat->alloc_order > ioat_get_alloc_order()) { - /* if the ring is idle, empty, and oversized try to step - * down the size - */ - reshape_ring(ioat, ioat->alloc_order - 1); - - /* keep shrinking until we get back to our minimum - * default size - */ - if (ioat->alloc_order > ioat_get_alloc_order()) - mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); - } - -} - -static void ioat3_timer_event(unsigned long data) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; - u64 status; - - status = ioat_chansts(chan); - - /* when halted due to errors check for channel - * programming errors before advancing the completion state - */ - if (is_ioat_halted(status)) { - u32 chanerr; - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(chan), "%s: Channel halted (%x)\n", - __func__, chanerr); - if (test_bit(IOAT_RUN, &chan->state)) - BUG_ON(is_ioat_bug(chanerr)); - else /* we never got off the ground */ - return; - } - - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { - spin_lock_bh(&ioat->prep_lock); - ioat3_restart_channel(ioat); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); - return; - } else { - set_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - } - - - if (ioat2_ring_active(ioat)) - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - else { - spin_lock_bh(&ioat->prep_lock); - check_active(ioat); - spin_unlock_bh(&ioat->prep_lock); - } - spin_unlock_bh(&chan->cleanup_lock); -} - -static enum dma_status -ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - enum dma_status ret; - - ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - - ioat3_cleanup(ioat); - - return dma_cookie_status(c, cookie, txstate); -} - -static struct dma_async_tx_descriptor * -__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, - dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, - size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_ring_ent *compl_desc; - struct ioat_ring_ent *desc; - struct ioat_ring_ent *ext; - size_t total_len = len; - struct ioat_xor_descriptor *xor; - struct ioat_xor_ext_descriptor *xor_ex = NULL; - struct ioat_dma_descriptor *hw; - int num_descs, with_ext, idx, i; - u32 offset = 0; - u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; - - BUG_ON(src_cnt < 2); - - num_descs = ioat2_xferlen_to_descs(ioat, len); - /* we need 2x the number of descriptors to cover greater than 5 - * sources - */ - if (src_cnt > 5) { - with_ext = 1; - num_descs *= 2; - } else - with_ext = 0; - - /* completion writes from the raid engine may pass completion - * writes from the legacy engine, so we need one extra null - * (legacy) descriptor to ensure all completion writes arrive in - * order. - */ - if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) - idx = ioat->head; - else - return NULL; - i = 0; - do { - struct ioat_raw_descriptor *descs[2]; - size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); - int s; - - desc = ioat2_get_ring_ent(ioat, idx + i); - xor = desc->xor; - - /* save a branch by unconditionally retrieving the - * extended descriptor xor_set_src() knows to not write - * to it in the single descriptor case - */ - ext = ioat2_get_ring_ent(ioat, idx + i + 1); - xor_ex = ext->xor_ex; - - descs[0] = (struct ioat_raw_descriptor *) xor; - descs[1] = (struct ioat_raw_descriptor *) xor_ex; - for (s = 0; s < src_cnt; s++) - xor_set_src(descs, src[s], offset, s); - xor->size = xfer_size; - xor->dst_addr = dest + offset; - xor->ctl = 0; - xor->ctl_f.op = op; - xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); - - len -= xfer_size; - offset += xfer_size; - dump_desc_dbg(ioat, desc); - } while ((i += 1 + with_ext) < num_descs); - - /* last xor descriptor carries the unmap parameters and fence bit */ - desc->txd.flags = flags; - desc->len = total_len; - if (result) - desc->result = result; - xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - - /* completion descriptor carries interrupt bit */ - compl_desc = ioat2_get_ring_ent(ioat, idx + i); - compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; - hw = compl_desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.compl_write = 1; - hw->size = NULL_DESC_BUFFER_SIZE; - dump_desc_dbg(ioat, compl_desc); - - /* we leave the channel locked to ensure in order submission */ - return &compl_desc->txd; -} - -static struct dma_async_tx_descriptor * -ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, - unsigned int src_cnt, size_t len, unsigned long flags) -{ - return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); -} - -static struct dma_async_tx_descriptor * -ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, - unsigned int src_cnt, size_t len, - enum sum_check_flags *result, unsigned long flags) -{ - /* the cleanup routine only sets bits on validate failure, it - * does not clear bits on validate success... so clear it here - */ - *result = 0; - - return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], - src_cnt - 1, len, flags); -} - -static void -dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) -{ - struct device *dev = to_dev(&ioat->base); - struct ioat_pq_descriptor *pq = desc->pq; - struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; - struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; - int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); - int i; - - dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" - " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" - " src_cnt: %d)\n", - desc_id(desc), (unsigned long long) desc->txd.phys, - (unsigned long long) (pq_ex ? pq_ex->next : pq->next), - desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, - pq->ctl_f.compl_write, - pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", - pq->ctl_f.src_cnt); - for (i = 0; i < src_cnt; i++) - dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, - (unsigned long long) pq_get_src(descs, i), pq->coef[i]); - dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); - dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); - dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); -} - -static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, - struct ioat_ring_ent *desc) -{ - struct device *dev = to_dev(&ioat->base); - struct ioat_pq_descriptor *pq = desc->pq; - struct ioat_raw_descriptor *descs[] = { (void *)pq, - (void *)pq, - (void *)pq }; - int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); - int i; - - if (desc->sed) { - descs[1] = (void *)desc->sed->hw; - descs[2] = (void *)desc->sed->hw + 64; - } - - dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" - " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" - " src_cnt: %d)\n", - desc_id(desc), (unsigned long long) desc->txd.phys, - (unsigned long long) pq->next, - desc->txd.flags, pq->size, pq->ctl, - pq->ctl_f.op, pq->ctl_f.int_en, - pq->ctl_f.compl_write, - pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", - pq->ctl_f.src_cnt); - for (i = 0; i < src_cnt; i++) { - dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, - (unsigned long long) pq16_get_src(descs, i), - pq->coef[i]); - } - dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); - dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); -} - -static struct dma_async_tx_descriptor * -__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, - const dma_addr_t *dst, const dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, - size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *device = chan->device; - struct ioat_ring_ent *compl_desc; - struct ioat_ring_ent *desc; - struct ioat_ring_ent *ext; - size_t total_len = len; - struct ioat_pq_descriptor *pq; - struct ioat_pq_ext_descriptor *pq_ex = NULL; - struct ioat_dma_descriptor *hw; - u32 offset = 0; - u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; - int i, s, idx, with_ext, num_descs; - int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; - - dev_dbg(to_dev(chan), "%s\n", __func__); - /* the engine requires at least two sources (we provide - * at least 1 implied source in the DMA_PREP_CONTINUE case) - */ - BUG_ON(src_cnt + dmaf_continue(flags) < 2); - - num_descs = ioat2_xferlen_to_descs(ioat, len); - /* we need 2x the number of descriptors to cover greater than 3 - * sources (we need 1 extra source in the q-only continuation - * case and 3 extra sources in the p+q continuation case. - */ - if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || - (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { - with_ext = 1; - num_descs *= 2; - } else - with_ext = 0; - - /* completion writes from the raid engine may pass completion - * writes from the legacy engine, so we need one extra null - * (legacy) descriptor to ensure all completion writes arrive in - * order. - */ - if (likely(num_descs) && - ioat2_check_space_lock(ioat, num_descs + cb32) == 0) - idx = ioat->head; - else - return NULL; - i = 0; - do { - struct ioat_raw_descriptor *descs[2]; - size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); - - desc = ioat2_get_ring_ent(ioat, idx + i); - pq = desc->pq; - - /* save a branch by unconditionally retrieving the - * extended descriptor pq_set_src() knows to not write - * to it in the single descriptor case - */ - ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); - pq_ex = ext->pq_ex; - - descs[0] = (struct ioat_raw_descriptor *) pq; - descs[1] = (struct ioat_raw_descriptor *) pq_ex; - - for (s = 0; s < src_cnt; s++) - pq_set_src(descs, src[s], offset, scf[s], s); - - /* see the comment for dma_maxpq in include/linux/dmaengine.h */ - if (dmaf_p_disabled_continue(flags)) - pq_set_src(descs, dst[1], offset, 1, s++); - else if (dmaf_continue(flags)) { - pq_set_src(descs, dst[0], offset, 0, s++); - pq_set_src(descs, dst[1], offset, 1, s++); - pq_set_src(descs, dst[1], offset, 0, s++); - } - pq->size = xfer_size; - pq->p_addr = dst[0] + offset; - pq->q_addr = dst[1] + offset; - pq->ctl = 0; - pq->ctl_f.op = op; - /* we turn on descriptor write back error status */ - if (device->cap & IOAT_CAP_DWBES) - pq->ctl_f.wb_en = result ? 1 : 0; - pq->ctl_f.src_cnt = src_cnt_to_hw(s); - pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); - pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); - - len -= xfer_size; - offset += xfer_size; - } while ((i += 1 + with_ext) < num_descs); - - /* last pq descriptor carries the unmap parameters and fence bit */ - desc->txd.flags = flags; - desc->len = total_len; - if (result) - desc->result = result; - pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - dump_pq_desc_dbg(ioat, desc, ext); - - if (!cb32) { - pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - pq->ctl_f.compl_write = 1; - compl_desc = desc; - } else { - /* completion descriptor carries interrupt bit */ - compl_desc = ioat2_get_ring_ent(ioat, idx + i); - compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; - hw = compl_desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.compl_write = 1; - hw->size = NULL_DESC_BUFFER_SIZE; - dump_desc_dbg(ioat, compl_desc); - } - - - /* we leave the channel locked to ensure in order submission */ - return &compl_desc->txd; -} - -static struct dma_async_tx_descriptor * -__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, - const dma_addr_t *dst, const dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, - size_t len, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *device = chan->device; - struct ioat_ring_ent *desc; - size_t total_len = len; - struct ioat_pq_descriptor *pq; - u32 offset = 0; - u8 op; - int i, s, idx, num_descs; - - /* this function is only called with 9-16 sources */ - op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; - - dev_dbg(to_dev(chan), "%s\n", __func__); - - num_descs = ioat2_xferlen_to_descs(ioat, len); - - /* - * 16 source pq is only available on cb3.3 and has no completion - * write hw bug. - */ - if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) - idx = ioat->head; - else - return NULL; - - i = 0; - - do { - struct ioat_raw_descriptor *descs[4]; - size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); - - desc = ioat2_get_ring_ent(ioat, idx + i); - pq = desc->pq; - - descs[0] = (struct ioat_raw_descriptor *) pq; - - desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); - if (!desc->sed) { - dev_err(to_dev(chan), - "%s: no free sed entries\n", __func__); - return NULL; - } - - pq->sed_addr = desc->sed->dma; - desc->sed->parent = desc; - - descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; - descs[2] = (void *)descs[1] + 64; - - for (s = 0; s < src_cnt; s++) - pq16_set_src(descs, src[s], offset, scf[s], s); - - /* see the comment for dma_maxpq in include/linux/dmaengine.h */ - if (dmaf_p_disabled_continue(flags)) - pq16_set_src(descs, dst[1], offset, 1, s++); - else if (dmaf_continue(flags)) { - pq16_set_src(descs, dst[0], offset, 0, s++); - pq16_set_src(descs, dst[1], offset, 1, s++); - pq16_set_src(descs, dst[1], offset, 0, s++); - } - - pq->size = xfer_size; - pq->p_addr = dst[0] + offset; - pq->q_addr = dst[1] + offset; - pq->ctl = 0; - pq->ctl_f.op = op; - pq->ctl_f.src_cnt = src16_cnt_to_hw(s); - /* we turn on descriptor write back error status */ - if (device->cap & IOAT_CAP_DWBES) - pq->ctl_f.wb_en = result ? 1 : 0; - pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); - pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); - - len -= xfer_size; - offset += xfer_size; - } while (++i < num_descs); - - /* last pq descriptor carries the unmap parameters and fence bit */ - desc->txd.flags = flags; - desc->len = total_len; - if (result) - desc->result = result; - pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - - /* with cb3.3 we should be able to do completion w/o a null desc */ - pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - pq->ctl_f.compl_write = 1; - - dump_pq16_desc_dbg(ioat, desc); - - /* we leave the channel locked to ensure in order submission */ - return &desc->txd; -} - -static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) -{ - if (dmaf_p_disabled_continue(flags)) - return src_cnt + 1; - else if (dmaf_continue(flags)) - return src_cnt + 3; - else - return src_cnt; -} - -static struct dma_async_tx_descriptor * -ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, size_t len, - unsigned long flags) -{ - /* specify valid address for disabled result */ - if (flags & DMA_PREP_PQ_DISABLE_P) - dst[0] = dst[1]; - if (flags & DMA_PREP_PQ_DISABLE_Q) - dst[1] = dst[0]; - - /* handle the single source multiply case from the raid6 - * recovery path - */ - if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { - dma_addr_t single_source[2]; - unsigned char single_source_coef[2]; - - BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); - single_source[0] = src[0]; - single_source[1] = src[0]; - single_source_coef[0] = scf[0]; - single_source_coef[1] = 0; - - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, - 2, single_source_coef, len, - flags) : - __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, - single_source_coef, len, flags); - - } else { - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, - scf, len, flags) : - __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, - scf, len, flags); - } -} - -static struct dma_async_tx_descriptor * -ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, - unsigned int src_cnt, const unsigned char *scf, size_t len, - enum sum_check_flags *pqres, unsigned long flags) -{ - /* specify valid address for disabled result */ - if (flags & DMA_PREP_PQ_DISABLE_P) - pq[0] = pq[1]; - if (flags & DMA_PREP_PQ_DISABLE_Q) - pq[1] = pq[0]; - - /* the cleanup routine only sets bits on validate failure, it - * does not clear bits on validate success... so clear it here - */ - *pqres = 0; - - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, - flags) : - __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, - flags); -} - -static struct dma_async_tx_descriptor * -ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, - unsigned int src_cnt, size_t len, unsigned long flags) -{ - unsigned char scf[src_cnt]; - dma_addr_t pq[2]; - - memset(scf, 0, src_cnt); - pq[0] = dst; - flags |= DMA_PREP_PQ_DISABLE_Q; - pq[1] = dst; /* specify valid address for disabled result */ - - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, - flags) : - __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, - flags); -} - -static struct dma_async_tx_descriptor * -ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, - unsigned int src_cnt, size_t len, - enum sum_check_flags *result, unsigned long flags) -{ - unsigned char scf[src_cnt]; - dma_addr_t pq[2]; - - /* the cleanup routine only sets bits on validate failure, it - * does not clear bits on validate success... so clear it here - */ - *result = 0; - - memset(scf, 0, src_cnt); - pq[0] = src[0]; - flags |= DMA_PREP_PQ_DISABLE_Q; - pq[1] = pq[0]; /* specify valid address for disabled result */ - - return src_cnt_flags(src_cnt, flags) > 8 ? - __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, - scf, len, flags) : - __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, - scf, len, flags); -} - -static struct dma_async_tx_descriptor * -ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioat_ring_ent *desc; - struct ioat_dma_descriptor *hw; - - if (ioat2_check_space_lock(ioat, 1) == 0) - desc = ioat2_get_ring_ent(ioat, ioat->head); - else - return NULL; - - hw = desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = 1; - hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); - hw->ctl_f.compl_write = 1; - hw->size = NULL_DESC_BUFFER_SIZE; - hw->src_addr = 0; - hw->dst_addr = 0; - - desc->txd.flags = flags; - desc->len = 1; - - dump_desc_dbg(ioat, desc); - - /* we leave the channel locked to ensure in order submission */ - return &desc->txd; -} - -static void ioat3_dma_test_callback(void *dma_async_param) -{ - struct completion *cmp = dma_async_param; - - complete(cmp); -} - -#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ -static int ioat_xor_val_self_test(struct ioatdma_device *device) -{ - int i, src_idx; - struct page *dest; - struct page *xor_srcs[IOAT_NUM_SRC_TEST]; - struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; - dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; - dma_addr_t dest_dma; - struct dma_async_tx_descriptor *tx; - struct dma_chan *dma_chan; - dma_cookie_t cookie; - u8 cmp_byte = 0; - u32 cmp_word; - u32 xor_val_result; - int err = 0; - struct completion cmp; - unsigned long tmo; - struct device *dev = &device->pdev->dev; - struct dma_device *dma = &device->common; - u8 op = 0; - - dev_dbg(dev, "%s\n", __func__); - - if (!dma_has_cap(DMA_XOR, dma->cap_mask)) - return 0; - - for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { - xor_srcs[src_idx] = alloc_page(GFP_KERNEL); - if (!xor_srcs[src_idx]) { - while (src_idx--) - __free_page(xor_srcs[src_idx]); - return -ENOMEM; - } - } - - dest = alloc_page(GFP_KERNEL); - if (!dest) { - while (src_idx--) - __free_page(xor_srcs[src_idx]); - return -ENOMEM; - } - - /* Fill in src buffers */ - for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { - u8 *ptr = page_address(xor_srcs[src_idx]); - for (i = 0; i < PAGE_SIZE; i++) - ptr[i] = (1 << src_idx); - } - - for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) - cmp_byte ^= (u8) (1 << src_idx); - - cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | - (cmp_byte << 8) | cmp_byte; - - memset(page_address(dest), 0, PAGE_SIZE); - - dma_chan = container_of(dma->channels.next, struct dma_chan, - device_node); - if (dma->device_alloc_chan_resources(dma_chan) < 1) { - err = -ENODEV; - goto out; - } - - /* test xor */ - op = IOAT_OP_XOR; - - dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, dest_dma)) - goto dma_unmap; - - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - dma_srcs[i] = DMA_ERROR_CODE; - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { - dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) - goto dma_unmap; - } - tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, - IOAT_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT); - - if (!tx) { - dev_err(dev, "Self-test xor prep failed\n"); - err = -ENODEV; - goto dma_unmap; - } - - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test xor setup failed\n"); - err = -ENODEV; - goto dma_unmap; - } - dma->device_issue_pending(dma_chan); - - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - - if (tmo == 0 || - dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dev, "Self-test xor timed out\n"); - err = -ENODEV; - goto dma_unmap; - } - - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); - - dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); - for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { - u32 *ptr = page_address(dest); - if (ptr[i] != cmp_word) { - dev_err(dev, "Self-test xor failed compare\n"); - err = -ENODEV; - goto free_resources; - } - } - dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); - - dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); - - /* skip validate if the capability is not present */ - if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) - goto free_resources; - - op = IOAT_OP_XOR_VAL; - - /* validate the sources with the destintation page */ - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - xor_val_srcs[i] = xor_srcs[i]; - xor_val_srcs[i] = dest; - - xor_val_result = 1; - - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = DMA_ERROR_CODE; - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { - dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) - goto dma_unmap; - } - tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, - IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT); - if (!tx) { - dev_err(dev, "Self-test zero prep failed\n"); - err = -ENODEV; - goto dma_unmap; - } - - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test zero setup failed\n"); - err = -ENODEV; - goto dma_unmap; - } - dma->device_issue_pending(dma_chan); - - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - - if (tmo == 0 || - dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dev, "Self-test validate timed out\n"); - err = -ENODEV; - goto dma_unmap; - } - - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); - - if (xor_val_result != 0) { - dev_err(dev, "Self-test validate failed compare\n"); - err = -ENODEV; - goto free_resources; - } - - memset(page_address(dest), 0, PAGE_SIZE); - - /* test for non-zero parity sum */ - op = IOAT_OP_XOR_VAL; - - xor_val_result = 0; - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_srcs[i] = DMA_ERROR_CODE; - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { - dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_srcs[i])) - goto dma_unmap; - } - tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, - IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT); - if (!tx) { - dev_err(dev, "Self-test 2nd zero prep failed\n"); - err = -ENODEV; - goto dma_unmap; - } - - async_tx_ack(tx); - init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; - tx->callback_param = &cmp; - cookie = tx->tx_submit(tx); - if (cookie < 0) { - dev_err(dev, "Self-test 2nd zero setup failed\n"); - err = -ENODEV; - goto dma_unmap; - } - dma->device_issue_pending(dma_chan); - - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - - if (tmo == 0 || - dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { - dev_err(dev, "Self-test 2nd validate timed out\n"); - err = -ENODEV; - goto dma_unmap; - } - - if (xor_val_result != SUM_CHECK_P_RESULT) { - dev_err(dev, "Self-test validate failed compare\n"); - err = -ENODEV; - goto dma_unmap; - } - - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); - - goto free_resources; -dma_unmap: - if (op == IOAT_OP_XOR) { - if (dest_dma != DMA_ERROR_CODE) - dma_unmap_page(dev, dest_dma, PAGE_SIZE, - DMA_FROM_DEVICE); - for (i = 0; i < IOAT_NUM_SRC_TEST; i++) - if (dma_srcs[i] != DMA_ERROR_CODE) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, - DMA_TO_DEVICE); - } else if (op == IOAT_OP_XOR_VAL) { - for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) - if (dma_srcs[i] != DMA_ERROR_CODE) - dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, - DMA_TO_DEVICE); - } -free_resources: - dma->device_free_chan_resources(dma_chan); -out: - src_idx = IOAT_NUM_SRC_TEST; - while (src_idx--) - __free_page(xor_srcs[src_idx]); - __free_page(dest); - return err; -} - -static int ioat3_dma_self_test(struct ioatdma_device *device) -{ - int rc = ioat_dma_self_test(device); - - if (rc) - return rc; - - rc = ioat_xor_val_self_test(device); - if (rc) - return rc; - - return 0; -} - -static int ioat3_irq_reinit(struct ioatdma_device *device) -{ - struct pci_dev *pdev = device->pdev; - int irq = pdev->irq, i; - - if (!is_bwd_ioat(pdev)) - return 0; - - switch (device->irq_mode) { - case IOAT_MSIX: - for (i = 0; i < device->common.chancnt; i++) { - struct msix_entry *msix = &device->msix_entries[i]; - struct ioat_chan_common *chan; - - chan = ioat_chan_by_index(device, i); - devm_free_irq(&pdev->dev, msix->vector, chan); - } - - pci_disable_msix(pdev); - break; - case IOAT_MSI: - pci_disable_msi(pdev); - /* fall through */ - case IOAT_INTX: - devm_free_irq(&pdev->dev, irq, device); - break; - default: - return 0; - } - device->irq_mode = IOAT_NOIRQ; - - return ioat_dma_setup_interrupts(device); -} - -static int ioat3_reset_hw(struct ioat_chan_common *chan) -{ - /* throw away whatever the channel was doing and get it - * initialized, with ioat3 specific workarounds - */ - struct ioatdma_device *device = chan->device; - struct pci_dev *pdev = device->pdev; - u32 chanerr; - u16 dev_id; - int err; - - ioat2_quiesce(chan, msecs_to_jiffies(100)); - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - - if (device->version < IOAT_VER_3_3) { - /* clear any pending errors */ - err = pci_read_config_dword(pdev, - IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); - if (err) { - dev_err(&pdev->dev, - "channel error register unreachable\n"); - return err; - } - pci_write_config_dword(pdev, - IOAT_PCI_CHANERR_INT_OFFSET, chanerr); - - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { - pci_write_config_dword(pdev, - IOAT_PCI_DMAUNCERRSTS_OFFSET, - 0x10); - } - } - - err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); - if (!err) - err = ioat3_irq_reinit(device); - - if (err) - dev_err(&pdev->dev, "Failed to reset: %d\n", err); - - return err; -} - -static void ioat3_intr_quirk(struct ioatdma_device *device) -{ - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - u32 errmask; - - dma = &device->common; - - /* - * if we have descriptor write back error status, we mask the - * error interrupts - */ - if (device->cap & IOAT_CAP_DWBES) { - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - errmask = readl(chan->reg_base + - IOAT_CHANERR_MASK_OFFSET); - errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | - IOAT_CHANERR_XOR_Q_ERR; - writel(errmask, chan->reg_base + - IOAT_CHANERR_MASK_OFFSET); - } - } -} - -int ioat3_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - int dca_en = system_has_dca_enabled(pdev); - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - bool is_raid_device = false; - int err; - - device->enumerate_channels = ioat2_enumerate_channels; - device->reset_hw = ioat3_reset_hw; - device->self_test = ioat3_dma_self_test; - device->intr_quirk = ioat3_intr_quirk; - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; - dma->device_issue_pending = ioat2_issue_pending; - dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; - dma->device_free_chan_resources = ioat2_free_chan_resources; - - dma_cap_set(DMA_INTERRUPT, dma->cap_mask); - dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; - - device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); - - if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) - device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); - - /* dca is incompatible with raid operations */ - if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) - device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); - - if (device->cap & IOAT_CAP_XOR) { - is_raid_device = true; - dma->max_xor = 8; - - dma_cap_set(DMA_XOR, dma->cap_mask); - dma->device_prep_dma_xor = ioat3_prep_xor; - - dma_cap_set(DMA_XOR_VAL, dma->cap_mask); - dma->device_prep_dma_xor_val = ioat3_prep_xor_val; - } - - if (device->cap & IOAT_CAP_PQ) { - is_raid_device = true; - - dma->device_prep_dma_pq = ioat3_prep_pq; - dma->device_prep_dma_pq_val = ioat3_prep_pq_val; - dma_cap_set(DMA_PQ, dma->cap_mask); - dma_cap_set(DMA_PQ_VAL, dma->cap_mask); - - if (device->cap & IOAT_CAP_RAID16SS) { - dma_set_maxpq(dma, 16, 0); - } else { - dma_set_maxpq(dma, 8, 0); - } - - if (!(device->cap & IOAT_CAP_XOR)) { - dma->device_prep_dma_xor = ioat3_prep_pqxor; - dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; - dma_cap_set(DMA_XOR, dma->cap_mask); - dma_cap_set(DMA_XOR_VAL, dma->cap_mask); - - if (device->cap & IOAT_CAP_RAID16SS) { - dma->max_xor = 16; - } else { - dma->max_xor = 8; - } - } - } - - dma->device_tx_status = ioat3_tx_status; - device->cleanup_fn = ioat3_cleanup_event; - device->timer_fn = ioat3_timer_event; - - /* starting with CB3.3 super extended descriptors are supported */ - if (device->cap & IOAT_CAP_RAID16SS) { - char pool_name[14]; - int i; - - for (i = 0; i < MAX_SED_POOLS; i++) { - snprintf(pool_name, 14, "ioat_hw%d_sed", i); - - /* allocate SED DMA pool */ - device->sed_hw_pool[i] = dmam_pool_create(pool_name, - &pdev->dev, - SED_SIZE * (i + 1), 64, 0); - if (!device->sed_hw_pool[i]) - return -ENOMEM; - - } - } - - err = ioat_probe(device); - if (err) - return err; - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - - ioat_kobject_add(device, &ioat2_ktype); - - if (dca) - device->dca = ioat3_dca_init(pdev, device->reg_base); - - return 0; -} diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index a3e731edc..690e3b4f8 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -21,11 +21,6 @@ #define IOAT_MMIO_BAR 0 /* CB device ID's */ -#define IOAT_PCI_DID_5000 0x1A38 -#define IOAT_PCI_DID_CNB 0x360B -#define IOAT_PCI_DID_SCNB 0x65FF -#define IOAT_PCI_DID_SNB 0x402F - #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 @@ -58,6 +53,17 @@ #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2 0x6f52 #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3 0x6f53 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX0 0x6f20 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX1 0x6f21 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX2 0x6f22 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX3 0x6f23 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX4 0x6f24 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX5 0x6f25 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX6 0x6f26 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX7 0x6f27 +#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e +#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c new file mode 100644 index 000000000..1c3c9b0ab --- /dev/null +++ b/drivers/dma/ioat/init.c @@ -0,0 +1,1314 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/dmaengine.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/prefetch.h> +#include <linux/dca.h> +#include "dma.h" +#include "registers.h" +#include "hw.h" + +#include "../dmaengine.h" + +MODULE_VERSION(IOAT_DMA_VERSION); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corporation"); + +static struct pci_device_id ioat_pci_tbl[] = { + /* I/OAT v3 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, + + /* I/OAT v3.2 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, + + /* I/OAT v3.3 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, + + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); + +static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +static void ioat_remove(struct pci_dev *pdev); +static void +ioat_init_channel(struct ioatdma_device *ioat_dma, + struct ioatdma_chan *ioat_chan, int idx); +static void ioat_intr_quirk(struct ioatdma_device *ioat_dma); +static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma); +static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); + +static int ioat_dca_enabled = 1; +module_param(ioat_dca_enabled, int, 0644); +MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); +int ioat_pending_level = 4; +module_param(ioat_pending_level, int, 0644); +MODULE_PARM_DESC(ioat_pending_level, + "high-water mark for pushing ioat descriptors (default: 4)"); +int ioat_ring_alloc_order = 8; +module_param(ioat_ring_alloc_order, int, 0644); +MODULE_PARM_DESC(ioat_ring_alloc_order, + "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)"); +int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; +module_param(ioat_ring_max_alloc_order, int, 0644); +MODULE_PARM_DESC(ioat_ring_max_alloc_order, + "ioat+: upper limit for ring size (default: 16)"); +static char ioat_interrupt_style[32] = "msix"; +module_param_string(ioat_interrupt_style, ioat_interrupt_style, + sizeof(ioat_interrupt_style), 0644); +MODULE_PARM_DESC(ioat_interrupt_style, + "set ioat interrupt style: msix (default), msi, intx"); + +struct kmem_cache *ioat_cache; +struct kmem_cache *ioat_sed_cache; + +static bool is_jf_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_JSF0: + case PCI_DEVICE_ID_INTEL_IOAT_JSF1: + case PCI_DEVICE_ID_INTEL_IOAT_JSF2: + case PCI_DEVICE_ID_INTEL_IOAT_JSF3: + case PCI_DEVICE_ID_INTEL_IOAT_JSF4: + case PCI_DEVICE_ID_INTEL_IOAT_JSF5: + case PCI_DEVICE_ID_INTEL_IOAT_JSF6: + case PCI_DEVICE_ID_INTEL_IOAT_JSF7: + case PCI_DEVICE_ID_INTEL_IOAT_JSF8: + case PCI_DEVICE_ID_INTEL_IOAT_JSF9: + return true; + default: + return false; + } +} + +static bool is_snb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_SNB0: + case PCI_DEVICE_ID_INTEL_IOAT_SNB1: + case PCI_DEVICE_ID_INTEL_IOAT_SNB2: + case PCI_DEVICE_ID_INTEL_IOAT_SNB3: + case PCI_DEVICE_ID_INTEL_IOAT_SNB4: + case PCI_DEVICE_ID_INTEL_IOAT_SNB5: + case PCI_DEVICE_ID_INTEL_IOAT_SNB6: + case PCI_DEVICE_ID_INTEL_IOAT_SNB7: + case PCI_DEVICE_ID_INTEL_IOAT_SNB8: + case PCI_DEVICE_ID_INTEL_IOAT_SNB9: + return true; + default: + return false; + } +} + +static bool is_ivb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_IVB0: + case PCI_DEVICE_ID_INTEL_IOAT_IVB1: + case PCI_DEVICE_ID_INTEL_IOAT_IVB2: + case PCI_DEVICE_ID_INTEL_IOAT_IVB3: + case PCI_DEVICE_ID_INTEL_IOAT_IVB4: + case PCI_DEVICE_ID_INTEL_IOAT_IVB5: + case PCI_DEVICE_ID_INTEL_IOAT_IVB6: + case PCI_DEVICE_ID_INTEL_IOAT_IVB7: + case PCI_DEVICE_ID_INTEL_IOAT_IVB8: + case PCI_DEVICE_ID_INTEL_IOAT_IVB9: + return true; + default: + return false; + } + +} + +static bool is_hsw_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_HSW0: + case PCI_DEVICE_ID_INTEL_IOAT_HSW1: + case PCI_DEVICE_ID_INTEL_IOAT_HSW2: + case PCI_DEVICE_ID_INTEL_IOAT_HSW3: + case PCI_DEVICE_ID_INTEL_IOAT_HSW4: + case PCI_DEVICE_ID_INTEL_IOAT_HSW5: + case PCI_DEVICE_ID_INTEL_IOAT_HSW6: + case PCI_DEVICE_ID_INTEL_IOAT_HSW7: + case PCI_DEVICE_ID_INTEL_IOAT_HSW8: + case PCI_DEVICE_ID_INTEL_IOAT_HSW9: + return true; + default: + return false; + } + +} + +static bool is_bdx_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BDX0: + case PCI_DEVICE_ID_INTEL_IOAT_BDX1: + case PCI_DEVICE_ID_INTEL_IOAT_BDX2: + case PCI_DEVICE_ID_INTEL_IOAT_BDX3: + case PCI_DEVICE_ID_INTEL_IOAT_BDX4: + case PCI_DEVICE_ID_INTEL_IOAT_BDX5: + case PCI_DEVICE_ID_INTEL_IOAT_BDX6: + case PCI_DEVICE_ID_INTEL_IOAT_BDX7: + case PCI_DEVICE_ID_INTEL_IOAT_BDX8: + case PCI_DEVICE_ID_INTEL_IOAT_BDX9: + return true; + default: + return false; + } +} + +static bool is_xeon_cb32(struct pci_dev *pdev) +{ + return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || + is_hsw_ioat(pdev) || is_bdx_ioat(pdev); +} + +bool is_bwd_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BWD0: + case PCI_DEVICE_ID_INTEL_IOAT_BWD1: + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + /* even though not Atom, BDX-DE has same DMA silicon */ + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: + return true; + default: + return false; + } +} + +static bool is_bwd_noraid(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: + return true; + default: + return false; + } + +} + +/* + * Perform a IOAT transaction to verify the HW works. + */ +#define IOAT_TEST_SIZE 2000 + +static void ioat_dma_test_callback(void *dma_async_param) +{ + struct completion *cmp = dma_async_param; + + complete(cmp); +} + +/** + * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. + * @ioat_dma: dma device to be tested + */ +static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) +{ + int i; + u8 *src; + u8 *dest; + struct dma_device *dma = &ioat_dma->dma_dev; + struct device *dev = &ioat_dma->pdev->dev; + struct dma_chan *dma_chan; + struct dma_async_tx_descriptor *tx; + dma_addr_t dma_dest, dma_src; + dma_cookie_t cookie; + int err = 0; + struct completion cmp; + unsigned long tmo; + unsigned long flags; + + src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + if (!src) + return -ENOMEM; + dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + if (!dest) { + kfree(src); + return -ENOMEM; + } + + /* Fill in src buffer */ + for (i = 0; i < IOAT_TEST_SIZE; i++) + src[i] = (u8)i; + + /* Start copy, using first DMA channel */ + dma_chan = container_of(dma->channels.next, struct dma_chan, + device_node); + if (dma->device_alloc_chan_resources(dma_chan) < 1) { + dev_err(dev, "selftest cannot allocate chan resource\n"); + err = -ENODEV; + goto out; + } + + dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_src)) { + dev_err(dev, "mapping src buffer failed\n"); + goto free_resources; + } + dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_dest)) { + dev_err(dev, "mapping dest buffer failed\n"); + goto unmap_src; + } + flags = DMA_PREP_INTERRUPT; + tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, + dma_src, IOAT_TEST_SIZE, + flags); + if (!tx) { + dev_err(dev, "Self-test prep failed, disabling\n"); + err = -ENODEV; + goto unmap_dma; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test setup failed, disabling\n"); + err = -ENODEV; + goto unmap_dma; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) + != DMA_COMPLETE) { + dev_err(dev, "Self-test copy timed out, disabling\n"); + err = -ENODEV; + goto unmap_dma; + } + if (memcmp(src, dest, IOAT_TEST_SIZE)) { + dev_err(dev, "Self-test copy failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + +unmap_dma: + dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); +unmap_src: + dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); +free_resources: + dma->device_free_chan_resources(dma_chan); +out: + kfree(src); + kfree(dest); + return err; +} + +/** + * ioat_dma_setup_interrupts - setup interrupt handler + * @ioat_dma: ioat dma device + */ +int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) +{ + struct ioatdma_chan *ioat_chan; + struct pci_dev *pdev = ioat_dma->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + int i, j, msixcnt; + int err = -EINVAL; + u8 intrctrl = 0; + + if (!strcmp(ioat_interrupt_style, "msix")) + goto msix; + if (!strcmp(ioat_interrupt_style, "msi")) + goto msi; + if (!strcmp(ioat_interrupt_style, "intx")) + goto intx; + dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); + goto err_no_irq; + +msix: + /* The number of MSI-X vectors should equal the number of channels */ + msixcnt = ioat_dma->dma_dev.chancnt; + for (i = 0; i < msixcnt; i++) + ioat_dma->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); + if (err) + goto msi; + + for (i = 0; i < msixcnt; i++) { + msix = &ioat_dma->msix_entries[i]; + ioat_chan = ioat_chan_by_index(ioat_dma, i); + err = devm_request_irq(dev, msix->vector, + ioat_dma_do_interrupt_msix, 0, + "ioat-msix", ioat_chan); + if (err) { + for (j = 0; j < i; j++) { + msix = &ioat_dma->msix_entries[j]; + ioat_chan = ioat_chan_by_index(ioat_dma, j); + devm_free_irq(dev, msix->vector, ioat_chan); + } + goto msi; + } + } + intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; + ioat_dma->irq_mode = IOAT_MSIX; + goto done; + +msi: + err = pci_enable_msi(pdev); + if (err) + goto intx; + + err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, + "ioat-msi", ioat_dma); + if (err) { + pci_disable_msi(pdev); + goto intx; + } + ioat_dma->irq_mode = IOAT_MSI; + goto done; + +intx: + err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, + IRQF_SHARED, "ioat-intx", ioat_dma); + if (err) + goto err_no_irq; + + ioat_dma->irq_mode = IOAT_INTX; +done: + if (is_bwd_ioat(pdev)) + ioat_intr_quirk(ioat_dma); + intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; + writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); + return 0; + +err_no_irq: + /* Disable all interrupt generation */ + writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); + ioat_dma->irq_mode = IOAT_NOIRQ; + dev_err(dev, "no usable interrupts\n"); + return err; +} + +static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) +{ + /* Disable all interrupt generation */ + writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); +} + +static int ioat_probe(struct ioatdma_device *ioat_dma) +{ + int err = -ENODEV; + struct dma_device *dma = &ioat_dma->dma_dev; + struct pci_dev *pdev = ioat_dma->pdev; + struct device *dev = &pdev->dev; + + /* DMA coherent memory pool for DMA descriptor allocations */ + ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, + sizeof(struct ioat_dma_descriptor), + 64, 0); + if (!ioat_dma->dma_pool) { + err = -ENOMEM; + goto err_dma_pool; + } + + ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, + sizeof(u64), + SMP_CACHE_BYTES, + SMP_CACHE_BYTES); + + if (!ioat_dma->completion_pool) { + err = -ENOMEM; + goto err_completion_pool; + } + + ioat_enumerate_channels(ioat_dma); + + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->dev = &pdev->dev; + + if (!dma->chancnt) { + dev_err(dev, "channel enumeration error\n"); + goto err_setup_interrupts; + } + + err = ioat_dma_setup_interrupts(ioat_dma); + if (err) + goto err_setup_interrupts; + + err = ioat3_dma_self_test(ioat_dma); + if (err) + goto err_self_test; + + return 0; + +err_self_test: + ioat_disable_interrupts(ioat_dma); +err_setup_interrupts: + pci_pool_destroy(ioat_dma->completion_pool); +err_completion_pool: + pci_pool_destroy(ioat_dma->dma_pool); +err_dma_pool: + return err; +} + +static int ioat_register(struct ioatdma_device *ioat_dma) +{ + int err = dma_async_device_register(&ioat_dma->dma_dev); + + if (err) { + ioat_disable_interrupts(ioat_dma); + pci_pool_destroy(ioat_dma->completion_pool); + pci_pool_destroy(ioat_dma->dma_pool); + } + + return err; +} + +static void ioat_dma_remove(struct ioatdma_device *ioat_dma) +{ + struct dma_device *dma = &ioat_dma->dma_dev; + + ioat_disable_interrupts(ioat_dma); + + ioat_kobject_del(ioat_dma); + + dma_async_device_unregister(dma); + + pci_pool_destroy(ioat_dma->dma_pool); + pci_pool_destroy(ioat_dma->completion_pool); + + INIT_LIST_HEAD(&dma->channels); +} + +/** + * ioat_enumerate_channels - find and initialize the device's channels + * @ioat_dma: the ioat dma device to be enumerated + */ +static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) +{ + struct ioatdma_chan *ioat_chan; + struct device *dev = &ioat_dma->pdev->dev; + struct dma_device *dma = &ioat_dma->dma_dev; + u8 xfercap_log; + int i; + + INIT_LIST_HEAD(&dma->channels); + dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); + dma->chancnt &= 0x1f; /* bits [4:0] valid */ + if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { + dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", + dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); + dma->chancnt = ARRAY_SIZE(ioat_dma->idx); + } + xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); + xfercap_log &= 0x1f; /* bits [4:0] valid */ + if (xfercap_log == 0) + return 0; + dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); + + for (i = 0; i < dma->chancnt; i++) { + ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); + if (!ioat_chan) + break; + + ioat_init_channel(ioat_dma, ioat_chan, i); + ioat_chan->xfercap_log = xfercap_log; + spin_lock_init(&ioat_chan->prep_lock); + if (ioat_reset_hw(ioat_chan)) { + i = 0; + break; + } + } + dma->chancnt = i; + return i; +} + +/** + * ioat_free_chan_resources - release all the descriptors + * @chan: the channel to be cleaned + */ +static void ioat_free_chan_resources(struct dma_chan *c) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; + const int total_descs = 1 << ioat_chan->alloc_order; + int descs; + int i; + + /* Before freeing channel resources first check + * if they have been previously allocated for this channel. + */ + if (!ioat_chan->ring) + return; + + ioat_stop(ioat_chan); + ioat_reset_hw(ioat_chan); + + spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock_bh(&ioat_chan->prep_lock); + descs = ioat_ring_space(ioat_chan); + dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); + for (i = 0; i < descs; i++) { + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i); + ioat_free_ring_ent(desc, c); + } + + if (descs < total_descs) + dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", + total_descs - descs); + + for (i = 0; i < total_descs - descs; i++) { + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i); + dump_desc_dbg(ioat_chan, desc); + ioat_free_ring_ent(desc, c); + } + + kfree(ioat_chan->ring); + ioat_chan->ring = NULL; + ioat_chan->alloc_order = 0; + pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, + ioat_chan->completion_dma); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + + ioat_chan->last_completion = 0; + ioat_chan->completion_dma = 0; + ioat_chan->dmacount = 0; +} + +/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring + * @chan: channel to be initialized + */ +static int ioat_alloc_chan_resources(struct dma_chan *c) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioat_ring_ent **ring; + u64 status; + int order; + int i = 0; + u32 chanerr; + + /* have we already been set up? */ + if (ioat_chan->ring) + return 1 << ioat_chan->alloc_order; + + /* Setup register to interrupt and write completion status on error */ + writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); + + /* allocate a completion writeback area */ + /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ + ioat_chan->completion = + pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, + GFP_KERNEL, &ioat_chan->completion_dma); + if (!ioat_chan->completion) + return -ENOMEM; + + memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); + writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(((u64)ioat_chan->completion_dma) >> 32, + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); + + order = ioat_get_alloc_order(); + ring = ioat_alloc_ring(c, order, GFP_KERNEL); + if (!ring) + return -ENOMEM; + + spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock_bh(&ioat_chan->prep_lock); + ioat_chan->ring = ring; + ioat_chan->head = 0; + ioat_chan->issued = 0; + ioat_chan->tail = 0; + ioat_chan->alloc_order = order; + set_bit(IOAT_RUN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + + ioat_start_null_desc(ioat_chan); + + /* check that we got off the ground */ + do { + udelay(1); + status = ioat_chansts(ioat_chan); + } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); + + if (is_ioat_active(status) || is_ioat_idle(status)) + return 1 << ioat_chan->alloc_order; + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + + dev_WARN(to_dev(ioat_chan), + "failed to start channel chanerr: %#x\n", chanerr); + ioat_free_chan_resources(c); + return -EFAULT; +} + +/* common channel initialization */ +static void +ioat_init_channel(struct ioatdma_device *ioat_dma, + struct ioatdma_chan *ioat_chan, int idx) +{ + struct dma_device *dma = &ioat_dma->dma_dev; + struct dma_chan *c = &ioat_chan->dma_chan; + unsigned long data = (unsigned long) c; + + ioat_chan->ioat_dma = ioat_dma; + ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); + spin_lock_init(&ioat_chan->cleanup_lock); + ioat_chan->dma_chan.device = dma; + dma_cookie_init(&ioat_chan->dma_chan); + list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); + ioat_dma->idx[idx] = ioat_chan; + init_timer(&ioat_chan->timer); + ioat_chan->timer.function = ioat_timer_event; + ioat_chan->timer.data = data; + tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); +} + +#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ +static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) +{ + int i, src_idx; + struct page *dest; + struct page *xor_srcs[IOAT_NUM_SRC_TEST]; + struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; + dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; + dma_addr_t dest_dma; + struct dma_async_tx_descriptor *tx; + struct dma_chan *dma_chan; + dma_cookie_t cookie; + u8 cmp_byte = 0; + u32 cmp_word; + u32 xor_val_result; + int err = 0; + struct completion cmp; + unsigned long tmo; + struct device *dev = &ioat_dma->pdev->dev; + struct dma_device *dma = &ioat_dma->dma_dev; + u8 op = 0; + + dev_dbg(dev, "%s\n", __func__); + + if (!dma_has_cap(DMA_XOR, dma->cap_mask)) + return 0; + + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { + xor_srcs[src_idx] = alloc_page(GFP_KERNEL); + if (!xor_srcs[src_idx]) { + while (src_idx--) + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + } + + dest = alloc_page(GFP_KERNEL); + if (!dest) { + while (src_idx--) + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + + /* Fill in src buffers */ + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { + u8 *ptr = page_address(xor_srcs[src_idx]); + + for (i = 0; i < PAGE_SIZE; i++) + ptr[i] = (1 << src_idx); + } + + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) + cmp_byte ^= (u8) (1 << src_idx); + + cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | + (cmp_byte << 8) | cmp_byte; + + memset(page_address(dest), 0, PAGE_SIZE); + + dma_chan = container_of(dma->channels.next, struct dma_chan, + device_node); + if (dma->device_alloc_chan_resources(dma_chan) < 1) { + err = -ENODEV; + goto out; + } + + /* test xor */ + op = IOAT_OP_XOR; + + dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dest_dma)) + goto dma_unmap; + + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { + dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } + tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, + IOAT_NUM_SRC_TEST, PAGE_SIZE, + DMA_PREP_INTERRUPT); + + if (!tx) { + dev_err(dev, "Self-test xor prep failed\n"); + err = -ENODEV; + goto dma_unmap; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test xor setup failed\n"); + err = -ENODEV; + goto dma_unmap; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + dev_err(dev, "Self-test xor timed out\n"); + err = -ENODEV; + goto dma_unmap; + } + + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + + dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { + u32 *ptr = page_address(dest); + + if (ptr[i] != cmp_word) { + dev_err(dev, "Self-test xor failed compare\n"); + err = -ENODEV; + goto free_resources; + } + } + dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + + dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + + /* skip validate if the capability is not present */ + if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) + goto free_resources; + + op = IOAT_OP_XOR_VAL; + + /* validate the sources with the destintation page */ + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + xor_val_srcs[i] = xor_srcs[i]; + xor_val_srcs[i] = dest; + + xor_val_result = 1; + + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { + dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } + tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, + IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, + &xor_val_result, DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(dev, "Self-test zero prep failed\n"); + err = -ENODEV; + goto dma_unmap; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test zero setup failed\n"); + err = -ENODEV; + goto dma_unmap; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + dev_err(dev, "Self-test validate timed out\n"); + err = -ENODEV; + goto dma_unmap; + } + + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + + if (xor_val_result != 0) { + dev_err(dev, "Self-test validate failed compare\n"); + err = -ENODEV; + goto free_resources; + } + + memset(page_address(dest), 0, PAGE_SIZE); + + /* test for non-zero parity sum */ + op = IOAT_OP_XOR_VAL; + + xor_val_result = 0; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_srcs[i] = DMA_ERROR_CODE; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { + dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_srcs[i])) + goto dma_unmap; + } + tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, + IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, + &xor_val_result, DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(dev, "Self-test 2nd zero prep failed\n"); + err = -ENODEV; + goto dma_unmap; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test 2nd zero setup failed\n"); + err = -ENODEV; + goto dma_unmap; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + dev_err(dev, "Self-test 2nd validate timed out\n"); + err = -ENODEV; + goto dma_unmap; + } + + if (xor_val_result != SUM_CHECK_P_RESULT) { + dev_err(dev, "Self-test validate failed compare\n"); + err = -ENODEV; + goto dma_unmap; + } + + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + + goto free_resources; +dma_unmap: + if (op == IOAT_OP_XOR) { + if (dest_dma != DMA_ERROR_CODE) + dma_unmap_page(dev, dest_dma, PAGE_SIZE, + DMA_FROM_DEVICE); + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + if (dma_srcs[i] != DMA_ERROR_CODE) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + } else if (op == IOAT_OP_XOR_VAL) { + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + if (dma_srcs[i] != DMA_ERROR_CODE) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + } +free_resources: + dma->device_free_chan_resources(dma_chan); +out: + src_idx = IOAT_NUM_SRC_TEST; + while (src_idx--) + __free_page(xor_srcs[src_idx]); + __free_page(dest); + return err; +} + +static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) +{ + int rc; + + rc = ioat_dma_self_test(ioat_dma); + if (rc) + return rc; + + rc = ioat_xor_val_self_test(ioat_dma); + + return rc; +} + +static void ioat_intr_quirk(struct ioatdma_device *ioat_dma) +{ + struct dma_device *dma; + struct dma_chan *c; + struct ioatdma_chan *ioat_chan; + u32 errmask; + + dma = &ioat_dma->dma_dev; + + /* + * if we have descriptor write back error status, we mask the + * error interrupts + */ + if (ioat_dma->cap & IOAT_CAP_DWBES) { + list_for_each_entry(c, &dma->channels, device_node) { + ioat_chan = to_ioat_chan(c); + errmask = readl(ioat_chan->reg_base + + IOAT_CHANERR_MASK_OFFSET); + errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | + IOAT_CHANERR_XOR_Q_ERR; + writel(errmask, ioat_chan->reg_base + + IOAT_CHANERR_MASK_OFFSET); + } + } +} + +static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) +{ + struct pci_dev *pdev = ioat_dma->pdev; + int dca_en = system_has_dca_enabled(pdev); + struct dma_device *dma; + struct dma_chan *c; + struct ioatdma_chan *ioat_chan; + bool is_raid_device = false; + int err; + + dma = &ioat_dma->dma_dev; + dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; + dma->device_issue_pending = ioat_issue_pending; + dma->device_alloc_chan_resources = ioat_alloc_chan_resources; + dma->device_free_chan_resources = ioat_free_chan_resources; + + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); + dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock; + + ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); + + if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) + ioat_dma->cap &= + ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); + + /* dca is incompatible with raid operations */ + if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) + ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); + + if (ioat_dma->cap & IOAT_CAP_XOR) { + is_raid_device = true; + dma->max_xor = 8; + + dma_cap_set(DMA_XOR, dma->cap_mask); + dma->device_prep_dma_xor = ioat_prep_xor; + + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + dma->device_prep_dma_xor_val = ioat_prep_xor_val; + } + + if (ioat_dma->cap & IOAT_CAP_PQ) { + is_raid_device = true; + + dma->device_prep_dma_pq = ioat_prep_pq; + dma->device_prep_dma_pq_val = ioat_prep_pq_val; + dma_cap_set(DMA_PQ, dma->cap_mask); + dma_cap_set(DMA_PQ_VAL, dma->cap_mask); + + if (ioat_dma->cap & IOAT_CAP_RAID16SS) + dma_set_maxpq(dma, 16, 0); + else + dma_set_maxpq(dma, 8, 0); + + if (!(ioat_dma->cap & IOAT_CAP_XOR)) { + dma->device_prep_dma_xor = ioat_prep_pqxor; + dma->device_prep_dma_xor_val = ioat_prep_pqxor_val; + dma_cap_set(DMA_XOR, dma->cap_mask); + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + + if (ioat_dma->cap & IOAT_CAP_RAID16SS) + dma->max_xor = 16; + else + dma->max_xor = 8; + } + } + + dma->device_tx_status = ioat_tx_status; + + /* starting with CB3.3 super extended descriptors are supported */ + if (ioat_dma->cap & IOAT_CAP_RAID16SS) { + char pool_name[14]; + int i; + + for (i = 0; i < MAX_SED_POOLS; i++) { + snprintf(pool_name, 14, "ioat_hw%d_sed", i); + + /* allocate SED DMA pool */ + ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, + &pdev->dev, + SED_SIZE * (i + 1), 64, 0); + if (!ioat_dma->sed_hw_pool[i]) + return -ENOMEM; + + } + } + + if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + + err = ioat_probe(ioat_dma); + if (err) + return err; + + list_for_each_entry(c, &dma->channels, device_node) { + ioat_chan = to_ioat_chan(c); + writel(IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(ioat_dma); + if (err) + return err; + + ioat_kobject_add(ioat_dma, &ioat_ktype); + + if (dca) + ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); + + return 0; +} + +#define DRV_NAME "ioatdma" + +static struct pci_driver ioat_pci_driver = { + .name = DRV_NAME, + .id_table = ioat_pci_tbl, + .probe = ioat_pci_probe, + .remove = ioat_remove, +}; + +static struct ioatdma_device * +alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) +{ + struct device *dev = &pdev->dev; + struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + + if (!d) + return NULL; + d->pdev = pdev; + d->reg_base = iobase; + return d; +} + +static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + void __iomem * const *iomap; + struct device *dev = &pdev->dev; + struct ioatdma_device *device; + int err; + + err = pcim_enable_device(pdev); + if (err) + return err; + + err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); + if (err) + return err; + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + return err; + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) + return err; + + device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); + if (!device) + return -ENOMEM; + pci_set_master(pdev); + pci_set_drvdata(pdev, device); + + device->version = readb(device->reg_base + IOAT_VER_OFFSET); + if (device->version >= IOAT_VER_3_0) + err = ioat3_dma_probe(device, ioat_dca_enabled); + else + return -ENODEV; + + if (err) { + dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); + return -ENODEV; + } + + return 0; +} + +static void ioat_remove(struct pci_dev *pdev) +{ + struct ioatdma_device *device = pci_get_drvdata(pdev); + + if (!device) + return; + + dev_err(&pdev->dev, "Removing dma and dca services\n"); + if (device->dca) { + unregister_dca_provider(device->dca, &pdev->dev); + free_dca_provider(device->dca); + device->dca = NULL; + } + ioat_dma_remove(device); +} + +static int __init ioat_init_module(void) +{ + int err = -ENOMEM; + + pr_info("%s: Intel(R) QuickData Technology Driver %s\n", + DRV_NAME, IOAT_DMA_VERSION); + + ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!ioat_cache) + return -ENOMEM; + + ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); + if (!ioat_sed_cache) + goto err_ioat_cache; + + err = pci_register_driver(&ioat_pci_driver); + if (err) + goto err_ioat3_cache; + + return 0; + + err_ioat3_cache: + kmem_cache_destroy(ioat_sed_cache); + + err_ioat_cache: + kmem_cache_destroy(ioat_cache); + + return err; +} +module_init(ioat_init_module); + +static void __exit ioat_exit_module(void) +{ + pci_unregister_driver(&ioat_pci_driver); + kmem_cache_destroy(ioat_cache); +} +module_exit(ioat_exit_module); diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c deleted file mode 100644 index 76f0dc688..000000000 --- a/drivers/dma/ioat/pci.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Intel I/OAT DMA Linux driver - * Copyright(c) 2007 - 2009 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - */ - -/* - * This driver supports an Intel I/OAT DMA engine, which does asynchronous - * copy operations. - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/interrupt.h> -#include <linux/dca.h> -#include <linux/slab.h> -#include "dma.h" -#include "dma_v2.h" -#include "registers.h" -#include "hw.h" - -MODULE_VERSION(IOAT_DMA_VERSION); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Intel Corporation"); - -static struct pci_device_id ioat_pci_tbl[] = { - /* I/OAT v1 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, - { PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, - - /* I/OAT v2 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, - - /* I/OAT v3 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, - - /* I/OAT v3.2 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, - - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, - - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, - - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, - - /* I/OAT v3.3 platforms */ - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, - - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, - - { 0, } -}; -MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); - -static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); -static void ioat_remove(struct pci_dev *pdev); - -static int ioat_dca_enabled = 1; -module_param(ioat_dca_enabled, int, 0644); -MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); - -struct kmem_cache *ioat2_cache; -struct kmem_cache *ioat3_sed_cache; - -#define DRV_NAME "ioatdma" - -static struct pci_driver ioat_pci_driver = { - .name = DRV_NAME, - .id_table = ioat_pci_tbl, - .probe = ioat_pci_probe, - .remove = ioat_remove, -}; - -static struct ioatdma_device * -alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) -{ - struct device *dev = &pdev->dev; - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); - - if (!d) - return NULL; - d->pdev = pdev; - d->reg_base = iobase; - return d; -} - -static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - void __iomem * const *iomap; - struct device *dev = &pdev->dev; - struct ioatdma_device *device; - int err; - - err = pcim_enable_device(pdev); - if (err) - return err; - - err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); - if (err) - return err; - iomap = pcim_iomap_table(pdev); - if (!iomap) - return -ENOMEM; - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) - return err; - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) - return err; - - device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); - if (!device) - return -ENOMEM; - pci_set_master(pdev); - pci_set_drvdata(pdev, device); - - device->version = readb(device->reg_base + IOAT_VER_OFFSET); - if (device->version == IOAT_VER_1_2) - err = ioat1_dma_probe(device, ioat_dca_enabled); - else if (device->version == IOAT_VER_2_0) - err = ioat2_dma_probe(device, ioat_dca_enabled); - else if (device->version >= IOAT_VER_3_0) - err = ioat3_dma_probe(device, ioat_dca_enabled); - else - return -ENODEV; - - if (err) { - dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); - return -ENODEV; - } - - return 0; -} - -static void ioat_remove(struct pci_dev *pdev) -{ - struct ioatdma_device *device = pci_get_drvdata(pdev); - - if (!device) - return; - - dev_err(&pdev->dev, "Removing dma and dca services\n"); - if (device->dca) { - unregister_dca_provider(device->dca, &pdev->dev); - free_dca_provider(device->dca); - device->dca = NULL; - } - ioat_dma_remove(device); -} - -static int __init ioat_init_module(void) -{ - int err = -ENOMEM; - - pr_info("%s: Intel(R) QuickData Technology Driver %s\n", - DRV_NAME, IOAT_DMA_VERSION); - - ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (!ioat2_cache) - return -ENOMEM; - - ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); - if (!ioat3_sed_cache) - goto err_ioat2_cache; - - err = pci_register_driver(&ioat_pci_driver); - if (err) - goto err_ioat3_cache; - - return 0; - - err_ioat3_cache: - kmem_cache_destroy(ioat3_sed_cache); - - err_ioat2_cache: - kmem_cache_destroy(ioat2_cache); - - return err; -} -module_init(ioat_init_module); - -static void __exit ioat_exit_module(void) -{ - pci_unregister_driver(&ioat_pci_driver); - kmem_cache_destroy(ioat2_cache); -} -module_exit(ioat_exit_module); diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c new file mode 100644 index 000000000..ad4fb41cd --- /dev/null +++ b/drivers/dma/ioat/prep.c @@ -0,0 +1,715 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/gfp.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/prefetch.h> +#include "../dmaengine.h" +#include "registers.h" +#include "hw.h" +#include "dma.h" + +#define MAX_SCF 1024 + +/* provide a lookup table for setting the source address in the base or + * extended descriptor of an xor or pq descriptor + */ +static const u8 xor_idx_to_desc = 0xe0; +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; +static const u8 pq_idx_to_desc = 0xf8; +static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2 }; +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; +static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6 }; + +static void xor_set_src(struct ioat_raw_descriptor *descs[2], + dma_addr_t addr, u32 offset, int idx) +{ + struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; + + raw->field[xor_idx_to_field[idx]] = addr + offset; +} + +static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) +{ + struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; + + return raw->field[pq_idx_to_field[idx]]; +} + +static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) +{ + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; + + return raw->field[pq16_idx_to_field[idx]]; +} + +static void pq_set_src(struct ioat_raw_descriptor *descs[2], + dma_addr_t addr, u32 offset, u8 coef, int idx) +{ + struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; + struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; + + raw->field[pq_idx_to_field[idx]] = addr + offset; + pq->coef[idx] = coef; +} + +static void pq16_set_src(struct ioat_raw_descriptor *desc[3], + dma_addr_t addr, u32 offset, u8 coef, unsigned idx) +{ + struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; + struct ioat_pq16a_descriptor *pq16 = + (struct ioat_pq16a_descriptor *)desc[1]; + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; + + raw->field[pq16_idx_to_field[idx]] = addr + offset; + + if (idx < 8) + pq->coef[idx] = coef; + else + pq16->coef[idx - 8] = coef; +} + +static struct ioat_sed_ent * +ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool) +{ + struct ioat_sed_ent *sed; + gfp_t flags = __GFP_ZERO | GFP_ATOMIC; + + sed = kmem_cache_alloc(ioat_sed_cache, flags); + if (!sed) + return NULL; + + sed->hw_pool = hw_pool; + sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool], + flags, &sed->dma); + if (!sed->hw) { + kmem_cache_free(ioat_sed_cache, sed); + return NULL; + } + + return sed; +} + +struct dma_async_tx_descriptor * +ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *desc; + dma_addr_t dst = dma_dest; + dma_addr_t src = dma_src; + size_t total_len = len; + int num_descs, idx, i; + + num_descs = ioat_xferlen_to_descs(ioat_chan, len); + if (likely(num_descs) && + ioat_check_space_lock(ioat_chan, num_descs) == 0) + idx = ioat_chan->head; + else + return NULL; + i = 0; + do { + size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log); + + desc = ioat_get_ring_ent(ioat_chan, idx + i); + hw = desc->hw; + + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dst; + + len -= copy; + dst += copy; + src += copy; + dump_desc_dbg(ioat_chan, desc); + } while (++i < num_descs); + + desc->txd.flags = flags; + desc->len = total_len; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + hw->ctl_f.compl_write = 1; + dump_desc_dbg(ioat_chan, desc); + /* we leave the channel locked to ensure in order submission */ + + return &desc->txd; +} + + +static struct dma_async_tx_descriptor * +__ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, + dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, + size_t len, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioat_ring_ent *compl_desc; + struct ioat_ring_ent *desc; + struct ioat_ring_ent *ext; + size_t total_len = len; + struct ioat_xor_descriptor *xor; + struct ioat_xor_ext_descriptor *xor_ex = NULL; + struct ioat_dma_descriptor *hw; + int num_descs, with_ext, idx, i; + u32 offset = 0; + u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; + + BUG_ON(src_cnt < 2); + + num_descs = ioat_xferlen_to_descs(ioat_chan, len); + /* we need 2x the number of descriptors to cover greater than 5 + * sources + */ + if (src_cnt > 5) { + with_ext = 1; + num_descs *= 2; + } else + with_ext = 0; + + /* completion writes from the raid engine may pass completion + * writes from the legacy engine, so we need one extra null + * (legacy) descriptor to ensure all completion writes arrive in + * order. + */ + if (likely(num_descs) && + ioat_check_space_lock(ioat_chan, num_descs+1) == 0) + idx = ioat_chan->head; + else + return NULL; + i = 0; + do { + struct ioat_raw_descriptor *descs[2]; + size_t xfer_size = min_t(size_t, + len, 1 << ioat_chan->xfercap_log); + int s; + + desc = ioat_get_ring_ent(ioat_chan, idx + i); + xor = desc->xor; + + /* save a branch by unconditionally retrieving the + * extended descriptor xor_set_src() knows to not write + * to it in the single descriptor case + */ + ext = ioat_get_ring_ent(ioat_chan, idx + i + 1); + xor_ex = ext->xor_ex; + + descs[0] = (struct ioat_raw_descriptor *) xor; + descs[1] = (struct ioat_raw_descriptor *) xor_ex; + for (s = 0; s < src_cnt; s++) + xor_set_src(descs, src[s], offset, s); + xor->size = xfer_size; + xor->dst_addr = dest + offset; + xor->ctl = 0; + xor->ctl_f.op = op; + xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); + + len -= xfer_size; + offset += xfer_size; + dump_desc_dbg(ioat_chan, desc); + } while ((i += 1 + with_ext) < num_descs); + + /* last xor descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + + /* completion descriptor carries interrupt bit */ + compl_desc = ioat_get_ring_ent(ioat_chan, idx + i); + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; + hw = compl_desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + dump_desc_dbg(ioat_chan, compl_desc); + + /* we leave the channel locked to ensure in order submission */ + return &compl_desc->txd; +} + +struct dma_async_tx_descriptor * +ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); +} + +struct dma_async_tx_descriptor * +ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags) +{ + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *result = 0; + + return __ioat_prep_xor_lock(chan, result, src[0], &src[1], + src_cnt - 1, len, flags); +} + +static void +dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc, + struct ioat_ring_ent *ext) +{ + struct device *dev = to_dev(ioat_chan); + struct ioat_pq_descriptor *pq = desc->pq; + struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; + struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; + int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); + int i; + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" + " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" + " src_cnt: %d)\n", + desc_id(desc), (unsigned long long) desc->txd.phys, + (unsigned long long) (pq_ex ? pq_ex->next : pq->next), + desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, + pq->ctl_f.int_en, pq->ctl_f.compl_write, + pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", + pq->ctl_f.src_cnt); + for (i = 0; i < src_cnt; i++) + dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, + (unsigned long long) pq_get_src(descs, i), pq->coef[i]); + dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); + dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); + dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); +} + +static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan, + struct ioat_ring_ent *desc) +{ + struct device *dev = to_dev(ioat_chan); + struct ioat_pq_descriptor *pq = desc->pq; + struct ioat_raw_descriptor *descs[] = { (void *)pq, + (void *)pq, + (void *)pq }; + int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); + int i; + + if (desc->sed) { + descs[1] = (void *)desc->sed->hw; + descs[2] = (void *)desc->sed->hw + 64; + } + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" + " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" + " src_cnt: %d)\n", + desc_id(desc), (unsigned long long) desc->txd.phys, + (unsigned long long) pq->next, + desc->txd.flags, pq->size, pq->ctl, + pq->ctl_f.op, pq->ctl_f.int_en, + pq->ctl_f.compl_write, + pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", + pq->ctl_f.src_cnt); + for (i = 0; i < src_cnt; i++) { + dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, + (unsigned long long) pq16_get_src(descs, i), + pq->coef[i]); + } + dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); + dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); +} + +static struct dma_async_tx_descriptor * +__ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, + const dma_addr_t *dst, const dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *compl_desc; + struct ioat_ring_ent *desc; + struct ioat_ring_ent *ext; + size_t total_len = len; + struct ioat_pq_descriptor *pq; + struct ioat_pq_ext_descriptor *pq_ex = NULL; + struct ioat_dma_descriptor *hw; + u32 offset = 0; + u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; + int i, s, idx, with_ext, num_descs; + int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0; + + dev_dbg(to_dev(ioat_chan), "%s\n", __func__); + /* the engine requires at least two sources (we provide + * at least 1 implied source in the DMA_PREP_CONTINUE case) + */ + BUG_ON(src_cnt + dmaf_continue(flags) < 2); + + num_descs = ioat_xferlen_to_descs(ioat_chan, len); + /* we need 2x the number of descriptors to cover greater than 3 + * sources (we need 1 extra source in the q-only continuation + * case and 3 extra sources in the p+q continuation case. + */ + if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || + (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { + with_ext = 1; + num_descs *= 2; + } else + with_ext = 0; + + /* completion writes from the raid engine may pass completion + * writes from the legacy engine, so we need one extra null + * (legacy) descriptor to ensure all completion writes arrive in + * order. + */ + if (likely(num_descs) && + ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0) + idx = ioat_chan->head; + else + return NULL; + i = 0; + do { + struct ioat_raw_descriptor *descs[2]; + size_t xfer_size = min_t(size_t, len, + 1 << ioat_chan->xfercap_log); + + desc = ioat_get_ring_ent(ioat_chan, idx + i); + pq = desc->pq; + + /* save a branch by unconditionally retrieving the + * extended descriptor pq_set_src() knows to not write + * to it in the single descriptor case + */ + ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext); + pq_ex = ext->pq_ex; + + descs[0] = (struct ioat_raw_descriptor *) pq; + descs[1] = (struct ioat_raw_descriptor *) pq_ex; + + for (s = 0; s < src_cnt; s++) + pq_set_src(descs, src[s], offset, scf[s], s); + + /* see the comment for dma_maxpq in include/linux/dmaengine.h */ + if (dmaf_p_disabled_continue(flags)) + pq_set_src(descs, dst[1], offset, 1, s++); + else if (dmaf_continue(flags)) { + pq_set_src(descs, dst[0], offset, 0, s++); + pq_set_src(descs, dst[1], offset, 1, s++); + pq_set_src(descs, dst[1], offset, 0, s++); + } + pq->size = xfer_size; + pq->p_addr = dst[0] + offset; + pq->q_addr = dst[1] + offset; + pq->ctl = 0; + pq->ctl_f.op = op; + /* we turn on descriptor write back error status */ + if (ioat_dma->cap & IOAT_CAP_DWBES) + pq->ctl_f.wb_en = result ? 1 : 0; + pq->ctl_f.src_cnt = src_cnt_to_hw(s); + pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); + pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); + + len -= xfer_size; + offset += xfer_size; + } while ((i += 1 + with_ext) < num_descs); + + /* last pq descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + dump_pq_desc_dbg(ioat_chan, desc, ext); + + if (!cb32) { + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + pq->ctl_f.compl_write = 1; + compl_desc = desc; + } else { + /* completion descriptor carries interrupt bit */ + compl_desc = ioat_get_ring_ent(ioat_chan, idx + i); + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; + hw = compl_desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + dump_desc_dbg(ioat_chan, compl_desc); + } + + + /* we leave the channel locked to ensure in order submission */ + return &compl_desc->txd; +} + +static struct dma_async_tx_descriptor * +__ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, + const dma_addr_t *dst, const dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; + size_t total_len = len; + struct ioat_pq_descriptor *pq; + u32 offset = 0; + u8 op; + int i, s, idx, num_descs; + + /* this function is only called with 9-16 sources */ + op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; + + dev_dbg(to_dev(ioat_chan), "%s\n", __func__); + + num_descs = ioat_xferlen_to_descs(ioat_chan, len); + + /* + * 16 source pq is only available on cb3.3 and has no completion + * write hw bug. + */ + if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0) + idx = ioat_chan->head; + else + return NULL; + + i = 0; + + do { + struct ioat_raw_descriptor *descs[4]; + size_t xfer_size = min_t(size_t, len, + 1 << ioat_chan->xfercap_log); + + desc = ioat_get_ring_ent(ioat_chan, idx + i); + pq = desc->pq; + + descs[0] = (struct ioat_raw_descriptor *) pq; + + desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3); + if (!desc->sed) { + dev_err(to_dev(ioat_chan), + "%s: no free sed entries\n", __func__); + return NULL; + } + + pq->sed_addr = desc->sed->dma; + desc->sed->parent = desc; + + descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; + descs[2] = (void *)descs[1] + 64; + + for (s = 0; s < src_cnt; s++) + pq16_set_src(descs, src[s], offset, scf[s], s); + + /* see the comment for dma_maxpq in include/linux/dmaengine.h */ + if (dmaf_p_disabled_continue(flags)) + pq16_set_src(descs, dst[1], offset, 1, s++); + else if (dmaf_continue(flags)) { + pq16_set_src(descs, dst[0], offset, 0, s++); + pq16_set_src(descs, dst[1], offset, 1, s++); + pq16_set_src(descs, dst[1], offset, 0, s++); + } + + pq->size = xfer_size; + pq->p_addr = dst[0] + offset; + pq->q_addr = dst[1] + offset; + pq->ctl = 0; + pq->ctl_f.op = op; + pq->ctl_f.src_cnt = src16_cnt_to_hw(s); + /* we turn on descriptor write back error status */ + if (ioat_dma->cap & IOAT_CAP_DWBES) + pq->ctl_f.wb_en = result ? 1 : 0; + pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); + pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); + + len -= xfer_size; + offset += xfer_size; + } while (++i < num_descs); + + /* last pq descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + + /* with cb3.3 we should be able to do completion w/o a null desc */ + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + pq->ctl_f.compl_write = 1; + + dump_pq16_desc_dbg(ioat_chan, desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + +static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) +{ + if (dmaf_p_disabled_continue(flags)) + return src_cnt + 1; + else if (dmaf_continue(flags)) + return src_cnt + 3; + else + return src_cnt; +} + +struct dma_async_tx_descriptor * +ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + unsigned long flags) +{ + /* specify valid address for disabled result */ + if (flags & DMA_PREP_PQ_DISABLE_P) + dst[0] = dst[1]; + if (flags & DMA_PREP_PQ_DISABLE_Q) + dst[1] = dst[0]; + + /* handle the single source multiply case from the raid6 + * recovery path + */ + if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { + dma_addr_t single_source[2]; + unsigned char single_source_coef[2]; + + BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); + single_source[0] = src[0]; + single_source[1] = src[0]; + single_source_coef[0] = scf[0]; + single_source_coef[1] = 0; + + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, NULL, dst, single_source, + 2, single_source_coef, len, + flags) : + __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2, + single_source_coef, len, flags); + + } else { + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags) : + __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags); + } +} + +struct dma_async_tx_descriptor * +ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags) +{ + /* specify valid address for disabled result */ + if (flags & DMA_PREP_PQ_DISABLE_P) + pq[0] = pq[1]; + if (flags & DMA_PREP_PQ_DISABLE_Q) + pq[1] = pq[0]; + + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *pqres = 0; + + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags) : + __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags); +} + +struct dma_async_tx_descriptor * +ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + unsigned char scf[MAX_SCF]; + dma_addr_t pq[2]; + + if (src_cnt > MAX_SCF) + return NULL; + + memset(scf, 0, src_cnt); + pq[0] = dst; + flags |= DMA_PREP_PQ_DISABLE_Q; + pq[1] = dst; /* specify valid address for disabled result */ + + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags) : + __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags); +} + +struct dma_async_tx_descriptor * +ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags) +{ + unsigned char scf[MAX_SCF]; + dma_addr_t pq[2]; + + if (src_cnt > MAX_SCF) + return NULL; + + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *result = 0; + + memset(scf, 0, src_cnt); + pq[0] = src[0]; + flags |= DMA_PREP_PQ_DISABLE_Q; + pq[1] = pq[0]; /* specify valid address for disabled result */ + + return src_cnt_flags(src_cnt, flags) > 8 ? + __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags) : + __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags); +} + +struct dma_async_tx_descriptor * +ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct ioat_ring_ent *desc; + struct ioat_dma_descriptor *hw; + + if (ioat_check_space_lock(ioat_chan, 1) == 0) + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); + else + return NULL; + + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; + + desc->txd.flags = flags; + desc->len = 1; + + dump_desc_dbg(ioat_chan, desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c new file mode 100644 index 000000000..cb4a857ee --- /dev/null +++ b/drivers/dma/ioat/sysfs.c @@ -0,0 +1,135 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/dmaengine.h> +#include <linux/pci.h> +#include "dma.h" +#include "registers.h" +#include "hw.h" + +#include "../dmaengine.h" + +static ssize_t cap_show(struct dma_chan *c, char *page) +{ + struct dma_device *dma = c->device; + + return sprintf(page, "copy%s%s%s%s%s\n", + dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", + dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", + dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", + dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", + dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); + +} +struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); + +static ssize_t version_show(struct dma_chan *c, char *page) +{ + struct dma_device *dma = c->device; + struct ioatdma_device *ioat_dma = to_ioatdma_device(dma); + + return sprintf(page, "%d.%d\n", + ioat_dma->version >> 4, ioat_dma->version & 0xf); +} +struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); + +static ssize_t +ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct ioat_sysfs_entry *entry; + struct ioatdma_chan *ioat_chan; + + entry = container_of(attr, struct ioat_sysfs_entry, attr); + ioat_chan = container_of(kobj, struct ioatdma_chan, kobj); + + if (!entry->show) + return -EIO; + return entry->show(&ioat_chan->dma_chan, page); +} + +const struct sysfs_ops ioat_sysfs_ops = { + .show = ioat_attr_show, +}; + +void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type) +{ + struct dma_device *dma = &ioat_dma->dma_dev; + struct dma_chan *c; + + list_for_each_entry(c, &dma->channels, device_node) { + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + struct kobject *parent = &c->dev->device.kobj; + int err; + + err = kobject_init_and_add(&ioat_chan->kobj, type, + parent, "quickdata"); + if (err) { + dev_warn(to_dev(ioat_chan), + "sysfs init error (%d), continuing...\n", err); + kobject_put(&ioat_chan->kobj); + set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state); + } + } +} + +void ioat_kobject_del(struct ioatdma_device *ioat_dma) +{ + struct dma_device *dma = &ioat_dma->dma_dev; + struct dma_chan *c; + + list_for_each_entry(c, &dma->channels, device_node) { + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + + if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) { + kobject_del(&ioat_chan->kobj); + kobject_put(&ioat_chan->kobj); + } + } +} + +static ssize_t ring_size_show(struct dma_chan *c, char *page) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + + return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1); +} +static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); + +static ssize_t ring_active_show(struct dma_chan *c, char *page) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + + /* ...taken outside the lock, no need to be precise */ + return sprintf(page, "%d\n", ioat_ring_active(ioat_chan)); +} +static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); + +static struct attribute *ioat_attrs[] = { + &ring_size_attr.attr, + &ring_active_attr.attr, + &ioat_cap_attr.attr, + &ioat_version_attr.attr, + NULL, +}; + +struct kobj_type ioat_ktype = { + .sysfs_ops = &ioat_sysfs_ops, + .default_attrs = ioat_attrs, +}; |